From 30c3981c2caa6d063026bb3af58dc05bf46d4d26 Mon Sep 17 00:00:00 2001 From: "verwaest@chromium.org" Date: Fri, 22 Aug 2014 11:43:39 +0000 Subject: [PATCH] Move IC code into a subdir and move ic-compilation related code from stub-cache into ic-compiler BUG= R=bmeurer@chromium.org Review URL: https://codereview.chromium.org/483683005 git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@23306 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- BUILD.gn | 30 +- src/arm/builtins-arm.cc | 1 - src/arm/code-stubs-arm.cc | 6 +- src/arm/code-stubs-arm.h | 2 +- src/arm/codegen-arm.h | 2 +- src/arm/full-codegen-arm.cc | 1 - src/arm/lithium-codegen-arm.cc | 1 - src/arm64/builtins-arm64.cc | 1 - src/arm64/code-stubs-arm64.cc | 6 +- src/arm64/code-stubs-arm64.h | 2 +- src/arm64/codegen-arm64.h | 2 +- src/arm64/full-codegen-arm64.cc | 1 - src/arm64/lithium-codegen-arm64.cc | 1 - src/assembler.cc | 5 +- src/builtins.cc | 4 +- src/code-stubs.cc | 2 +- src/code-stubs.h | 1 + src/codegen.cc | 1 - src/debug.cc | 3 - src/full-codegen.cc | 1 - src/heap/mark-compact.cc | 3 +- src/heap/objects-visiting.cc | 1 - src/hydrogen.cc | 5 +- src/ia32/builtins-ia32.cc | 1 - src/ia32/code-stubs-ia32.cc | 2 +- src/ia32/code-stubs-ia32.h | 3 +- src/ia32/codegen-ia32.h | 2 +- src/ia32/full-codegen-ia32.cc | 1 - src/ia32/lithium-codegen-ia32.cc | 2 - src/{ => ic}/arm/ic-arm.cc | 246 +++---- .../arm/ic-compiler-arm.cc} | 271 +------- src/ic/arm/stub-cache-arm.cc | 173 +++++ src/{ => ic}/arm64/ic-arm64.cc | 306 ++++----- .../arm64/ic-compiler-arm64.cc} | 234 ++----- src/ic/arm64/stub-cache-arm64.cc | 146 ++++ .../ia32/ic-compiler-ia32.cc} | 275 ++------ src/{ => ic}/ia32/ic-ia32.cc | 224 +++---- src/ic/ia32/stub-cache-ia32.cc | 183 +++++ src/{stub-cache.cc => ic/ic-compiler.cc} | 392 ++--------- src/{stub-cache.h => ic/ic-compiler.h} | 224 +------ src/{ => ic}/ic-inl.h | 56 +- src/{ => ic}/ic.cc | 626 ++++++++++-------- src/{ => ic}/ic.h | 265 +++----- src/ic/stub-cache.cc | 146 ++++ src/ic/stub-cache.h | 168 +++++ .../x64/ic-compiler-x64.cc} | 241 ++----- src/{ => ic}/x64/ic-x64.cc | 281 +++----- src/ic/x64/stub-cache-x64.cc | 149 +++++ src/isolate.cc | 2 +- src/runtime.cc | 1 - src/serialize.cc | 4 +- src/type-info.cc | 4 +- src/x64/builtins-x64.cc | 1 - src/x64/code-stubs-x64.cc | 5 +- src/x64/code-stubs-x64.h | 2 +- src/x64/codegen-x64.h | 2 +- src/x64/full-codegen-x64.cc | 1 - src/x64/lithium-codegen-x64.cc | 1 - test/cctest/test-debug.cc | 1 - test/cctest/test-deoptimization.cc | 1 - test/cctest/test-disasm-ia32.cc | 2 +- test/cctest/test-disasm-x64.cc | 2 +- test/cctest/test-heap.cc | 2 +- test/cctest/test-serialize.cc | 1 - tools/gyp/v8.gyp | 32 +- 65 files changed, 2143 insertions(+), 2620 deletions(-) rename src/{ => ic}/arm/ic-arm.cc (83%) rename src/{arm/stub-cache-arm.cc => ic/arm/ic-compiler-arm.cc} (78%) create mode 100644 src/ic/arm/stub-cache-arm.cc rename src/{ => ic}/arm64/ic-arm64.cc (80%) rename src/{arm64/stub-cache-arm64.cc => ic/arm64/ic-compiler-arm64.cc} (80%) create mode 100644 src/ic/arm64/stub-cache-arm64.cc rename src/{ia32/stub-cache-ia32.cc => ic/ia32/ic-compiler-ia32.cc} (78%) rename src/{ => ic}/ia32/ic-ia32.cc (84%) create mode 100644 src/ic/ia32/stub-cache-ia32.cc rename src/{stub-cache.cc => ic/ic-compiler.cc} (73%) rename src/{stub-cache.h => ic/ic-compiler.h} (72%) rename src/{ => ic}/ic-inl.h (82%) rename src/{ => ic}/ic.cc (86%) rename src/{ => ic}/ic.h (81%) create mode 100644 src/ic/stub-cache.cc create mode 100644 src/ic/stub-cache.h rename src/{x64/stub-cache-x64.cc => ic/x64/ic-compiler-x64.cc} (79%) rename src/{ => ic}/x64/ic-x64.cc (81%) create mode 100644 src/ic/x64/stub-cache-x64.cc diff --git a/BUILD.gn b/BUILD.gn index 5ea07bca7..b5bea372f 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -714,9 +714,11 @@ source_set("v8_base") { "src/i18n.h", "src/icu_util.cc", "src/icu_util.h", - "src/ic-inl.h", - "src/ic.cc", - "src/ic.h", + "src/ic/ic-inl.h", + "src/ic/ic.cc", + "src/ic/ic.h", + "src/ic/stub-cache.cc", + "src/ic/stub-cache.h", "src/interface.cc", "src/interface.h", "src/interpreter-irregexp.cc", @@ -819,8 +821,6 @@ source_set("v8_base") { "src/string-stream.h", "src/strtod.cc", "src/strtod.h", - "src/stub-cache.cc", - "src/stub-cache.h", "src/token.cc", "src/token.h", "src/transitions-inl.h", @@ -878,7 +878,6 @@ source_set("v8_base") { "src/ia32/frames-ia32.cc", "src/ia32/frames-ia32.h", "src/ia32/full-codegen-ia32.cc", - "src/ia32/ic-ia32.cc", "src/ia32/lithium-codegen-ia32.cc", "src/ia32/lithium-codegen-ia32.h", "src/ia32/lithium-gap-resolver-ia32.cc", @@ -889,11 +888,13 @@ source_set("v8_base") { "src/ia32/macro-assembler-ia32.h", "src/ia32/regexp-macro-assembler-ia32.cc", "src/ia32/regexp-macro-assembler-ia32.h", - "src/ia32/stub-cache-ia32.cc", "src/compiler/ia32/code-generator-ia32.cc", "src/compiler/ia32/instruction-codes-ia32.h", "src/compiler/ia32/instruction-selector-ia32.cc", "src/compiler/ia32/linkage-ia32.cc", + "src/ic/ia32/ic-ia32.cc", + "src/ic/ia32/handler-ia32.cc", + "src/ic/ia32/stub-cache-ia32.cc", ] } else if (v8_target_arch == "x64") { sources += [ @@ -912,7 +913,6 @@ source_set("v8_base") { "src/x64/frames-x64.cc", "src/x64/frames-x64.h", "src/x64/full-codegen-x64.cc", - "src/x64/ic-x64.cc", "src/x64/lithium-codegen-x64.cc", "src/x64/lithium-codegen-x64.h", "src/x64/lithium-gap-resolver-x64.cc", @@ -923,11 +923,13 @@ source_set("v8_base") { "src/x64/macro-assembler-x64.h", "src/x64/regexp-macro-assembler-x64.cc", "src/x64/regexp-macro-assembler-x64.h", - "src/x64/stub-cache-x64.cc", "src/compiler/x64/code-generator-x64.cc", "src/compiler/x64/instruction-codes-x64.h", "src/compiler/x64/instruction-selector-x64.cc", "src/compiler/x64/linkage-x64.cc", + "src/ic/x64/ic-x64.cc", + "src/ic/x64/ic-compiler-x64.cc", + "src/ic/x64/stub-cache-x64.cc", ] } else if (v8_target_arch == "arm") { sources += [ @@ -948,7 +950,6 @@ source_set("v8_base") { "src/arm/frames-arm.cc", "src/arm/frames-arm.h", "src/arm/full-codegen-arm.cc", - "src/arm/ic-arm.cc", "src/arm/lithium-arm.cc", "src/arm/lithium-arm.h", "src/arm/lithium-codegen-arm.cc", @@ -960,11 +961,13 @@ source_set("v8_base") { "src/arm/regexp-macro-assembler-arm.cc", "src/arm/regexp-macro-assembler-arm.h", "src/arm/simulator-arm.cc", - "src/arm/stub-cache-arm.cc", "src/compiler/arm/code-generator-arm.cc", "src/compiler/arm/instruction-codes-arm.h", "src/compiler/arm/instruction-selector-arm.cc", "src/compiler/arm/linkage-arm.cc", + "src/ic/arm/ic-arm.cc", + "src/ic/arm/ic-compiler-arm.cc", + "src/ic/arm/stub-cache-arm.cc", ] } else if (v8_target_arch == "arm64") { sources += [ @@ -988,7 +991,6 @@ source_set("v8_base") { "src/arm64/frames-arm64.cc", "src/arm64/frames-arm64.h", "src/arm64/full-codegen-arm64.cc", - "src/arm64/ic-arm64.cc", "src/arm64/instructions-arm64.cc", "src/arm64/instructions-arm64.h", "src/arm64/instrument-arm64.cc", @@ -1006,13 +1008,15 @@ source_set("v8_base") { "src/arm64/regexp-macro-assembler-arm64.h", "src/arm64/simulator-arm64.cc", "src/arm64/simulator-arm64.h", - "src/arm64/stub-cache-arm64.cc", "src/arm64/utils-arm64.cc", "src/arm64/utils-arm64.h", "src/compiler/arm64/code-generator-arm64.cc", "src/compiler/arm64/instruction-codes-arm64.h", "src/compiler/arm64/instruction-selector-arm64.cc", "src/compiler/arm64/linkage-arm64.cc", + "src/ic/arm64/ic-arm64.cc", + "src/ic/arm64/ic-compiler-arm64.cc", + "src/ic/arm64/stub-cache-arm64.cc", ] } else if (v8_target_arch == "mipsel") { sources += [ diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc index 357137f13..3a4c07a04 100644 --- a/src/arm/builtins-arm.cc +++ b/src/arm/builtins-arm.cc @@ -11,7 +11,6 @@ #include "src/deoptimizer.h" #include "src/full-codegen.h" #include "src/runtime.h" -#include "src/stub-cache.h" namespace v8 { namespace internal { diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc index a728d58fb..a04bb86f9 100644 --- a/src/arm/code-stubs-arm.cc +++ b/src/arm/code-stubs-arm.cc @@ -8,8 +8,12 @@ #include "src/bootstrapper.h" #include "src/code-stubs.h" +#include "src/codegen.h" +#include "src/ic/ic-compiler.h" +#include "src/isolate.h" +#include "src/jsregexp.h" #include "src/regexp-macro-assembler.h" -#include "src/stub-cache.h" +#include "src/runtime.h" namespace v8 { namespace internal { diff --git a/src/arm/code-stubs-arm.h b/src/arm/code-stubs-arm.h index ff2a80e67..08efecb8a 100644 --- a/src/arm/code-stubs-arm.h +++ b/src/arm/code-stubs-arm.h @@ -5,7 +5,7 @@ #ifndef V8_ARM_CODE_STUBS_ARM_H_ #define V8_ARM_CODE_STUBS_ARM_H_ -#include "src/ic-inl.h" +#include "src/code-stubs.h" namespace v8 { namespace internal { diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h index 9ec09583d..4c7c7688f 100644 --- a/src/arm/codegen-arm.h +++ b/src/arm/codegen-arm.h @@ -6,7 +6,7 @@ #define V8_ARM_CODEGEN_ARM_H_ #include "src/ast.h" -#include "src/ic-inl.h" +#include "src/macro-assembler.h" namespace v8 { namespace internal { diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc index 141571539..c61e21f06 100644 --- a/src/arm/full-codegen-arm.cc +++ b/src/arm/full-codegen-arm.cc @@ -14,7 +14,6 @@ #include "src/isolate-inl.h" #include "src/parser.h" #include "src/scopes.h" -#include "src/stub-cache.h" #include "src/arm/code-stubs-arm.h" #include "src/arm/macro-assembler-arm.h" diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc index 627aad79d..e2658f3c4 100644 --- a/src/arm/lithium-codegen-arm.cc +++ b/src/arm/lithium-codegen-arm.cc @@ -8,7 +8,6 @@ #include "src/arm/lithium-gap-resolver-arm.h" #include "src/code-stubs.h" #include "src/hydrogen-osr.h" -#include "src/stub-cache.h" namespace v8 { namespace internal { diff --git a/src/arm64/builtins-arm64.cc b/src/arm64/builtins-arm64.cc index 2e0aed77a..836ffe1ff 100644 --- a/src/arm64/builtins-arm64.cc +++ b/src/arm64/builtins-arm64.cc @@ -11,7 +11,6 @@ #include "src/deoptimizer.h" #include "src/full-codegen.h" #include "src/runtime.h" -#include "src/stub-cache.h" namespace v8 { namespace internal { diff --git a/src/arm64/code-stubs-arm64.cc b/src/arm64/code-stubs-arm64.cc index 3ef118aae..23a57374b 100644 --- a/src/arm64/code-stubs-arm64.cc +++ b/src/arm64/code-stubs-arm64.cc @@ -8,8 +8,12 @@ #include "src/bootstrapper.h" #include "src/code-stubs.h" +#include "src/codegen.h" +#include "src/ic/ic-compiler.h" +#include "src/isolate.h" +#include "src/jsregexp.h" #include "src/regexp-macro-assembler.h" -#include "src/stub-cache.h" +#include "src/runtime.h" namespace v8 { namespace internal { diff --git a/src/arm64/code-stubs-arm64.h b/src/arm64/code-stubs-arm64.h index 75a945299..6ba2f5725 100644 --- a/src/arm64/code-stubs-arm64.h +++ b/src/arm64/code-stubs-arm64.h @@ -5,7 +5,7 @@ #ifndef V8_ARM64_CODE_STUBS_ARM64_H_ #define V8_ARM64_CODE_STUBS_ARM64_H_ -#include "src/ic-inl.h" +#include "src/code-stubs.h" namespace v8 { namespace internal { diff --git a/src/arm64/codegen-arm64.h b/src/arm64/codegen-arm64.h index 9ef148cc4..2f01c510d 100644 --- a/src/arm64/codegen-arm64.h +++ b/src/arm64/codegen-arm64.h @@ -6,7 +6,7 @@ #define V8_ARM64_CODEGEN_ARM64_H_ #include "src/ast.h" -#include "src/ic-inl.h" +#include "src/macro-assembler.h" namespace v8 { namespace internal { diff --git a/src/arm64/full-codegen-arm64.cc b/src/arm64/full-codegen-arm64.cc index 840634feb..b44f1ff65 100644 --- a/src/arm64/full-codegen-arm64.cc +++ b/src/arm64/full-codegen-arm64.cc @@ -14,7 +14,6 @@ #include "src/isolate-inl.h" #include "src/parser.h" #include "src/scopes.h" -#include "src/stub-cache.h" #include "src/arm64/code-stubs-arm64.h" #include "src/arm64/macro-assembler-arm64.h" diff --git a/src/arm64/lithium-codegen-arm64.cc b/src/arm64/lithium-codegen-arm64.cc index 1962c98bd..d6c9ef6b7 100644 --- a/src/arm64/lithium-codegen-arm64.cc +++ b/src/arm64/lithium-codegen-arm64.cc @@ -8,7 +8,6 @@ #include "src/arm64/lithium-gap-resolver-arm64.h" #include "src/code-stubs.h" #include "src/hydrogen-osr.h" -#include "src/stub-cache.h" namespace v8 { namespace internal { diff --git a/src/assembler.cc b/src/assembler.cc index e6dc4eb14..4187675f1 100644 --- a/src/assembler.cc +++ b/src/assembler.cc @@ -40,19 +40,20 @@ #include "src/base/lazy-instance.h" #include "src/base/platform/platform.h" #include "src/builtins.h" +#include "src/codegen.h" #include "src/counters.h" #include "src/cpu-profiler.h" #include "src/debug.h" #include "src/deoptimizer.h" #include "src/execution.h" -#include "src/ic.h" +#include "src/ic/ic.h" +#include "src/ic/stub-cache.h" #include "src/isolate-inl.h" #include "src/jsregexp.h" #include "src/regexp-macro-assembler.h" #include "src/regexp-stack.h" #include "src/runtime.h" #include "src/serialize.h" -#include "src/stub-cache.h" #include "src/token.h" #if V8_TARGET_ARCH_IA32 diff --git a/src/builtins.cc b/src/builtins.cc index 498387353..0f7bc2029 100644 --- a/src/builtins.cc +++ b/src/builtins.cc @@ -13,9 +13,9 @@ #include "src/gdb-jit.h" #include "src/heap/mark-compact.h" #include "src/heap-profiler.h" -#include "src/ic-inl.h" +#include "src/ic/ic.h" +#include "src/ic/ic-compiler.h" #include "src/prototype.h" -#include "src/stub-cache.h" #include "src/vm-state-inl.h" namespace v8 { diff --git a/src/code-stubs.cc b/src/code-stubs.cc index 0e68ab8a5..b190321cd 100644 --- a/src/code-stubs.cc +++ b/src/code-stubs.cc @@ -9,8 +9,8 @@ #include "src/cpu-profiler.h" #include "src/factory.h" #include "src/gdb-jit.h" +#include "src/ic/ic-compiler.h" #include "src/macro-assembler.h" -#include "src/stub-cache.h" namespace v8 { namespace internal { diff --git a/src/code-stubs.h b/src/code-stubs.h index c1d051b3d..4d021c2dc 100644 --- a/src/code-stubs.h +++ b/src/code-stubs.h @@ -9,6 +9,7 @@ #include "src/assembler.h" #include "src/codegen.h" #include "src/globals.h" +#include "src/ic/ic.h" #include "src/macro-assembler.h" #include "src/ostreams.h" diff --git a/src/codegen.cc b/src/codegen.cc index 6b12d6456..8b54ff58f 100644 --- a/src/codegen.cc +++ b/src/codegen.cc @@ -12,7 +12,6 @@ #include "src/prettyprinter.h" #include "src/rewriter.h" #include "src/runtime.h" -#include "src/stub-cache.h" namespace v8 { namespace internal { diff --git a/src/debug.cc b/src/debug.cc index d974cd700..6b983cef3 100644 --- a/src/debug.cc +++ b/src/debug.cc @@ -16,14 +16,11 @@ #include "src/execution.h" #include "src/full-codegen.h" #include "src/global-handles.h" -#include "src/ic.h" -#include "src/ic-inl.h" #include "src/isolate-inl.h" #include "src/list.h" #include "src/log.h" #include "src/messages.h" #include "src/natives.h" -#include "src/stub-cache.h" #include "include/v8-debug.h" diff --git a/src/full-codegen.cc b/src/full-codegen.cc index 1b6ab3762..98ddae8d4 100644 --- a/src/full-codegen.cc +++ b/src/full-codegen.cc @@ -14,7 +14,6 @@ #include "src/scopeinfo.h" #include "src/scopes.h" #include "src/snapshot.h" -#include "src/stub-cache.h" namespace v8 { namespace internal { diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc index e4057ff4b..9139c6024 100644 --- a/src/heap/mark-compact.cc +++ b/src/heap/mark-compact.cc @@ -20,8 +20,7 @@ #include "src/heap/spaces-inl.h" #include "src/heap/sweeper-thread.h" #include "src/heap-profiler.h" -#include "src/ic-inl.h" -#include "src/stub-cache.h" +#include "src/ic/stub-cache.h" namespace v8 { namespace internal { diff --git a/src/heap/objects-visiting.cc b/src/heap/objects-visiting.cc index a316d12dc..a0fc231d0 100644 --- a/src/heap/objects-visiting.cc +++ b/src/heap/objects-visiting.cc @@ -5,7 +5,6 @@ #include "src/v8.h" #include "src/heap/objects-visiting.h" -#include "src/ic-inl.h" namespace v8 { namespace internal { diff --git a/src/hydrogen.cc b/src/hydrogen.cc index 87de7b6ff..bd646f197 100644 --- a/src/hydrogen.cc +++ b/src/hydrogen.cc @@ -39,8 +39,11 @@ #include "src/runtime.h" #include "src/scopeinfo.h" #include "src/scopes.h" -#include "src/stub-cache.h" #include "src/typing.h" +// CallOptimization +#include "src/ic/ic-compiler.h" +// GetRootConstructor +#include "src/ic/ic-inl.h" #if V8_TARGET_ARCH_IA32 #include "src/ia32/lithium-codegen-ia32.h" // NOLINT diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc index cca65f471..03b67fdd0 100644 --- a/src/ia32/builtins-ia32.cc +++ b/src/ia32/builtins-ia32.cc @@ -9,7 +9,6 @@ #include "src/codegen.h" #include "src/deoptimizer.h" #include "src/full-codegen.h" -#include "src/stub-cache.h" namespace v8 { namespace internal { diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc index 104576e64..6a5f99efe 100644 --- a/src/ia32/code-stubs-ia32.cc +++ b/src/ia32/code-stubs-ia32.cc @@ -9,11 +9,11 @@ #include "src/bootstrapper.h" #include "src/code-stubs.h" #include "src/codegen.h" +#include "src/ic/ic-compiler.h" #include "src/isolate.h" #include "src/jsregexp.h" #include "src/regexp-macro-assembler.h" #include "src/runtime.h" -#include "src/stub-cache.h" namespace v8 { namespace internal { diff --git a/src/ia32/code-stubs-ia32.h b/src/ia32/code-stubs-ia32.h index b72b6dd08..d0d521621 100644 --- a/src/ia32/code-stubs-ia32.h +++ b/src/ia32/code-stubs-ia32.h @@ -5,8 +5,7 @@ #ifndef V8_IA32_CODE_STUBS_IA32_H_ #define V8_IA32_CODE_STUBS_IA32_H_ -#include "src/ic-inl.h" -#include "src/macro-assembler.h" +#include "src/code-stubs.h" namespace v8 { namespace internal { diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h index 3f59c2cb2..2382388be 100644 --- a/src/ia32/codegen-ia32.h +++ b/src/ia32/codegen-ia32.h @@ -6,7 +6,7 @@ #define V8_IA32_CODEGEN_IA32_H_ #include "src/ast.h" -#include "src/ic-inl.h" +#include "src/macro-assembler.h" namespace v8 { namespace internal { diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc index 04fb4830c..2443f46f5 100644 --- a/src/ia32/full-codegen-ia32.cc +++ b/src/ia32/full-codegen-ia32.cc @@ -14,7 +14,6 @@ #include "src/isolate-inl.h" #include "src/parser.h" #include "src/scopes.h" -#include "src/stub-cache.h" namespace v8 { namespace internal { diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc index d216581ba..395ae39c3 100644 --- a/src/ia32/lithium-codegen-ia32.cc +++ b/src/ia32/lithium-codegen-ia32.cc @@ -11,8 +11,6 @@ #include "src/deoptimizer.h" #include "src/hydrogen-osr.h" #include "src/ia32/lithium-codegen-ia32.h" -#include "src/ic.h" -#include "src/stub-cache.h" namespace v8 { namespace internal { diff --git a/src/arm/ic-arm.cc b/src/ic/arm/ic-arm.cc similarity index 83% rename from src/arm/ic-arm.cc rename to src/ic/arm/ic-arm.cc index d1add6d2f..68b49c7fd 100644 --- a/src/arm/ic-arm.cc +++ b/src/ic/arm/ic-arm.cc @@ -6,13 +6,9 @@ #if V8_TARGET_ARCH_ARM -#include "src/arm/assembler-arm.h" -#include "src/code-stubs.h" #include "src/codegen.h" -#include "src/disasm.h" -#include "src/ic-inl.h" -#include "src/runtime.h" -#include "src/stub-cache.h" +#include "src/ic/ic.h" +#include "src/ic/stub-cache.h" namespace v8 { namespace internal { @@ -25,8 +21,7 @@ namespace internal { #define __ ACCESS_MASM(masm) -static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, - Register type, +static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type, Label* global_object) { // Register usage: // type: holds the receiver instance type on entry. @@ -52,12 +47,9 @@ static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, // result. // The generated code assumes that the receiver has slow properties, // is not a global object and does not have interceptors. -static void GenerateDictionaryLoad(MacroAssembler* masm, - Label* miss, - Register elements, - Register name, - Register result, - Register scratch1, +static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss, + Register elements, Register name, + Register result, Register scratch1, Register scratch2) { // Main use of the scratch registers. // scratch1: Used as temporary and to hold the capacity of the property @@ -66,18 +58,14 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label done; // Probe the dictionary. - NameDictionaryLookupStub::GeneratePositiveLookup(masm, - miss, - &done, - elements, - name, - scratch1, - scratch2); + NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements, + name, scratch1, scratch2); // If probing finds an entry check that the value is a normal // property. __ bind(&done); // scratch2 == elements + 4 * index - const int kElementsStartOffset = NameDictionary::kHeaderSize + + const int kElementsStartOffset = + NameDictionary::kHeaderSize + NameDictionary::kElementsStartIndex * kPointerSize; const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset)); @@ -101,12 +89,9 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, // result. // The generated code assumes that the receiver has slow properties, // is not a global object and does not have interceptors. -static void GenerateDictionaryStore(MacroAssembler* masm, - Label* miss, - Register elements, - Register name, - Register value, - Register scratch1, +static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss, + Register elements, Register name, + Register value, Register scratch1, Register scratch2) { // Main use of the scratch registers. // scratch1: Used as temporary and to hold the capacity of the property @@ -115,23 +100,20 @@ static void GenerateDictionaryStore(MacroAssembler* masm, Label done; // Probe the dictionary. - NameDictionaryLookupStub::GeneratePositiveLookup(masm, - miss, - &done, - elements, - name, - scratch1, - scratch2); + NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements, + name, scratch1, scratch2); // If probing finds an entry in the dictionary check that the value // is a normal property that is not read only. __ bind(&done); // scratch2 == elements + 4 * index - const int kElementsStartOffset = NameDictionary::kHeaderSize + + const int kElementsStartOffset = + NameDictionary::kHeaderSize + NameDictionary::kElementsStartIndex * kPointerSize; const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; const int kTypeAndReadOnlyMask = (PropertyDetails::TypeField::kMask | - PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize; + PropertyDetails::AttributesField::encode(READ_ONLY)) + << kSmiTagSize; __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset)); __ tst(scratch1, Operand(kTypeAndReadOnlyMask)); __ b(ne, miss); @@ -143,19 +125,17 @@ static void GenerateDictionaryStore(MacroAssembler* masm, // Update the write barrier. Make sure not to clobber the value. __ mov(scratch1, value); - __ RecordWrite( - elements, scratch2, scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs); + __ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved, + kDontSaveFPRegs); } // Checks the receiver for special cases (value type, slow case bits). // Falls through for regular JS object. static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm, - Register receiver, - Register map, + Register receiver, Register map, Register scratch, - int interceptor_bit, - Label* slow) { + int interceptor_bit, Label* slow) { // Check that the object isn't a smi. __ JumpIfSmi(receiver, slow); // Get the map of the receiver. @@ -178,14 +158,10 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm, // Loads an indexed element from a fast case array. // If not_fast_array is NULL, doesn't perform the elements map check. -static void GenerateFastArrayLoad(MacroAssembler* masm, - Register receiver, - Register key, - Register elements, - Register scratch1, - Register scratch2, - Register result, - Label* not_fast_array, +static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver, + Register key, Register elements, + Register scratch1, Register scratch2, + Register result, Label* not_fast_array, Label* out_of_range) { // Register use: // @@ -237,12 +213,9 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, // Checks whether a key is an array index string or a unique name. // Falls through if a key is a unique name. -static void GenerateKeyNameCheck(MacroAssembler* masm, - Register key, - Register map, - Register hash, - Label* index_string, - Label* not_unique) { +static void GenerateKeyNameCheck(MacroAssembler* masm, Register key, + Register map, Register hash, + Label* index_string, Label* not_unique) { // The key is not a smi. Label unique; // Is it a name? @@ -278,8 +251,8 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) { // Probe the stub cache. Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( Code::ComputeHandlerFlags(Code::LOAD_IC)); - masm->isolate()->stub_cache()->GenerateProbe( - masm, flags, receiver, name, r3, r4, r5, r6); + masm->isolate()->stub_cache()->GenerateProbe(masm, flags, receiver, name, r3, + r4, r5, r6); // Cache miss: Jump to runtime. GenerateMiss(masm); @@ -318,8 +291,7 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) { __ Push(LoadIC_TempRegister(), NameRegister()); // Perform tail call to the entry. - ExternalReference ref = - ExternalReference(IC_Utility(kLoadIC_Miss), isolate); + ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate); __ TailCallExternalReference(ref, 2, 1); } @@ -334,14 +306,10 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { } -static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm, - Register object, - Register key, - Register scratch1, - Register scratch2, - Register scratch3, - Label* unmapped_case, - Label* slow_case) { +static MemOperand GenerateMappedArgumentsLookup( + MacroAssembler* masm, Register object, Register key, Register scratch1, + Register scratch2, Register scratch3, Label* unmapped_case, + Label* slow_case) { Heap* heap = masm->isolate()->heap(); // Check that the receiver is a JSObject. Because of the map check @@ -412,9 +380,7 @@ static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm, __ b(cs, slow_case); __ mov(scratch, Operand(kPointerSize >> 1)); __ mul(scratch, key, scratch); - __ add(scratch, - scratch, - Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ add(scratch, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); return MemOperand(backing_store, scratch); } @@ -427,9 +393,8 @@ void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) { DCHECK(key.is(r2)); Label slow, notin; - MemOperand mapped_location = - GenerateMappedArgumentsLookup( - masm, receiver, key, r0, r3, r4, ¬in, &slow); + MemOperand mapped_location = GenerateMappedArgumentsLookup( + masm, receiver, key, r0, r3, r4, ¬in, &slow); __ ldr(r0, mapped_location); __ Ret(); __ bind(¬in); @@ -514,9 +479,7 @@ const Register StoreIC::NameRegister() { return r2; } const Register StoreIC::ValueRegister() { return r0; } -const Register KeyedStoreIC::MapRegister() { - return r3; -} +const Register KeyedStoreIC::MapRegister() { return r3; } void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { @@ -546,14 +509,13 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { // Now the key is known to be a smi. This place is also jumped to from below // where a numeric string is converted to a smi. - GenerateKeyedLoadReceiverCheck( - masm, receiver, r0, r3, Map::kHasIndexedInterceptor, &slow); + GenerateKeyedLoadReceiverCheck(masm, receiver, r0, r3, + Map::kHasIndexedInterceptor, &slow); // Check the receiver's map to see if it has fast elements. __ CheckFastElements(r0, r3, &check_number_dictionary); - GenerateFastArrayLoad( - masm, receiver, key, r0, r3, r4, r0, NULL, &slow); + GenerateFastArrayLoad(masm, receiver, key, r0, r3, r4, r0, NULL, &slow); __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, r4, r3); __ Ret(); @@ -573,15 +535,15 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { // Slow case, key and receiver still in r2 and r1. __ bind(&slow); - __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), - 1, r4, r3); + __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), 1, r4, + r3); GenerateRuntimeGetProperty(masm); __ bind(&check_name); GenerateKeyNameCheck(masm, key, r0, r3, &index_name, &slow); - GenerateKeyedLoadReceiverCheck( - masm, receiver, r0, r3, Map::kHasNamedInterceptor, &slow); + GenerateKeyedLoadReceiverCheck(masm, receiver, r0, r3, + Map::kHasNamedInterceptor, &slow); // If the receiver is a fast-case object, check the keyed lookup // cache. Otherwise probe the dictionary. @@ -659,8 +621,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { __ add(r6, r6, r5); // Index from start of object. __ sub(receiver, receiver, Operand(kHeapObjectTag)); // Remove the heap tag. __ ldr(r0, MemOperand(receiver, r6, LSL, kPointerSizeLog2)); - __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), - 1, r4, r3); + __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1, + r4, r3); __ Ret(); // Load property array property. @@ -668,8 +630,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { __ ldr(receiver, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); __ add(receiver, receiver, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); __ ldr(r0, MemOperand(receiver, r5, LSL, kPointerSizeLog2)); - __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), - 1, r4, r3); + __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1, + r4, r3); __ Ret(); // Do a quick inline probe of the receiver's dictionary, if it @@ -681,8 +643,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { GenerateGlobalInstanceTypeCheck(masm, r0, &slow); // Load the property to r0. GenerateDictionaryLoad(masm, &slow, r3, key, r0, r5, r4); - __ IncrementCounter( - isolate->counters()->keyed_load_generic_symbol(), 1, r4, r3); + __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 1, r4, + r3); __ Ret(); __ bind(&index_name); @@ -702,10 +664,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) { Register result = r0; DCHECK(!scratch.is(receiver) && !scratch.is(index)); - StringCharAtGenerator char_at_generator(receiver, - index, - scratch, - result, + StringCharAtGenerator char_at_generator(receiver, index, scratch, result, &miss, // When not a string. &miss, // When not a number. &miss, // When index out of range. @@ -802,7 +761,7 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm, // Push receiver, key and value for runtime call. __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); - __ mov(r0, Operand(Smi::FromInt(strict_mode))); // Strict mode. + __ mov(r0, Operand(Smi::FromInt(strict_mode))); // Strict mode. __ Push(r0); __ TailCallRuntime(Runtime::kSetProperty, 4, 1); @@ -810,18 +769,10 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm, static void KeyedStoreGenerateGenericHelper( - MacroAssembler* masm, - Label* fast_object, - Label* fast_double, - Label* slow, - KeyedStoreCheckMap check_map, - KeyedStoreIncrementLength increment_length, - Register value, - Register key, - Register receiver, - Register receiver_map, - Register elements_map, - Register elements) { + MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow, + KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length, + Register value, Register key, Register receiver, Register receiver_map, + Register elements_map, Register elements) { Label transition_smi_elements; Label finish_object_store, non_double_value, transition_double_elements; Label fast_double_without_map_check; @@ -882,13 +833,8 @@ static void KeyedStoreGenerateGenericHelper( __ str(value, MemOperand(address)); // Update write barrier for the elements array address. __ mov(scratch_value, value); // Preserve the value which is returned. - __ RecordWrite(elements, - address, - scratch_value, - kLRHasNotBeenSaved, - kDontSaveFPRegs, - EMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); + __ RecordWrite(elements, address, scratch_value, kLRHasNotBeenSaved, + kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); __ Ret(); __ bind(fast_double); @@ -903,8 +849,8 @@ static void KeyedStoreGenerateGenericHelper( // We have to see if the double version of the hole is present. If so // go to the runtime. __ add(address, elements, - Operand((FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32)) - - kHeapObjectTag)); + Operand((FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32)) - + kHeapObjectTag)); __ ldr(scratch_value, MemOperand(address, key, LSL, kPointerSizeLog2, PreIndex)); __ cmp(scratch_value, Operand(kHoleNanUpper32)); @@ -930,25 +876,19 @@ static void KeyedStoreGenerateGenericHelper( // Value is a double. Transition FAST_SMI_ELEMENTS -> // FAST_DOUBLE_ELEMENTS and complete the store. - __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, - FAST_DOUBLE_ELEMENTS, - receiver_map, - r4, - slow); - AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, - FAST_DOUBLE_ELEMENTS); - ElementsTransitionGenerator::GenerateSmiToDouble( - masm, receiver, key, value, receiver_map, mode, slow); + __ LoadTransitionedArrayMapConditional( + FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, r4, slow); + AllocationSiteMode mode = + AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS); + ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value, + receiver_map, mode, slow); __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ jmp(&fast_double_without_map_check); __ bind(&non_double_value); // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS - __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, - FAST_ELEMENTS, - receiver_map, - r4, - slow); + __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, + receiver_map, r4, slow); mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS); ElementsTransitionGenerator::GenerateMapChangeElementsTransition( masm, receiver, key, value, receiver_map, mode, slow); @@ -959,11 +899,8 @@ static void KeyedStoreGenerateGenericHelper( // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS - __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, - FAST_ELEMENTS, - receiver_map, - r4, - slow); + __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS, + receiver_map, r4, slow); mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS); ElementsTransitionGenerator::GenerateDoubleToObject( masm, receiver, key, value, receiver_map, mode, slow); @@ -1042,8 +979,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, __ cmp(key, Operand(ip)); __ b(hs, &slow); __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); - __ cmp(elements_map, - Operand(masm->isolate()->factory()->fixed_array_map())); + __ cmp(elements_map, Operand(masm->isolate()->factory()->fixed_array_map())); __ b(ne, &check_if_double_array); __ jmp(&fast_object_grow); @@ -1064,14 +1000,13 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, __ cmp(key, Operand(ip)); __ b(hs, &extra); - KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double, - &slow, kCheckMap, kDontIncrementLength, - value, key, receiver, receiver_map, - elements_map, elements); + KeyedStoreGenerateGenericHelper( + masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength, + value, key, receiver, receiver_map, elements_map, elements); KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow, - &slow, kDontCheckMap, kIncrementLength, - value, key, receiver, receiver_map, - elements_map, elements); + &slow, kDontCheckMap, kIncrementLength, value, + key, receiver, receiver_map, elements_map, + elements); } @@ -1086,8 +1021,8 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( Code::ComputeHandlerFlags(Code::STORE_IC)); - masm->isolate()->stub_cache()->GenerateProbe( - masm, flags, receiver, name, r3, r4, r5, r6); + masm->isolate()->stub_cache()->GenerateProbe(masm, flags, receiver, name, r3, + r4, r5, r6); // Cache miss: Jump to runtime. GenerateMiss(masm); @@ -1118,8 +1053,7 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) { GenerateDictionaryStore(masm, &miss, dictionary, name, value, r4, r5); Counters* counters = masm->isolate()->counters(); - __ IncrementCounter(counters->store_normal_hit(), - 1, r4, r5); + __ IncrementCounter(counters->store_normal_hit(), 1, r4, r5); __ Ret(); __ bind(&miss); @@ -1189,8 +1123,7 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { // The delta to the start of the map check instruction and the // condition code uses at the patched jump. int delta = Assembler::GetCmpImmediateRawImmediate(instr); - delta += - Assembler::GetCmpImmediateRegister(instr).code() * kOff12Mask; + delta += Assembler::GetCmpImmediateRegister(instr).code() * kOff12Mask; // If the delta is 0 the instruction is cmp r0, #0 which also signals that // nothing was inlined. if (delta == 0) { @@ -1198,8 +1131,8 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { } if (FLAG_trace_ic) { - PrintF("[ patching ic at %p, cmp=%p, delta=%d\n", - address, cmp_instruction_address, delta); + PrintF("[ patching ic at %p, cmp=%p, delta=%d\n", address, + cmp_instruction_address, delta); } Address patch_address = @@ -1235,8 +1168,7 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { patcher.EmitCondition(eq); } } - - -} } // namespace v8::internal +} +} // namespace v8::internal #endif // V8_TARGET_ARCH_ARM diff --git a/src/arm/stub-cache-arm.cc b/src/ic/arm/ic-compiler-arm.cc similarity index 78% rename from src/arm/stub-cache-arm.cc rename to src/ic/arm/ic-compiler-arm.cc index b87040378..2c6df44ba 100644 --- a/src/arm/stub-cache-arm.cc +++ b/src/ic/arm/ic-compiler-arm.cc @@ -1,4 +1,4 @@ -// Copyright 2012 the V8 project authors. All rights reserved. +// Copyright 2014 the V8 project authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. @@ -6,9 +6,7 @@ #if V8_TARGET_ARCH_ARM -#include "src/codegen.h" -#include "src/ic-inl.h" -#include "src/stub-cache.h" +#include "src/ic/ic-compiler.h" namespace v8 { namespace internal { @@ -16,88 +14,6 @@ namespace internal { #define __ ACCESS_MASM(masm) -static void ProbeTable(Isolate* isolate, - MacroAssembler* masm, - Code::Flags flags, - StubCache::Table table, - Register receiver, - Register name, - // Number of the cache entry, not scaled. - Register offset, - Register scratch, - Register scratch2, - Register offset_scratch) { - ExternalReference key_offset(isolate->stub_cache()->key_reference(table)); - ExternalReference value_offset(isolate->stub_cache()->value_reference(table)); - ExternalReference map_offset(isolate->stub_cache()->map_reference(table)); - - uint32_t key_off_addr = reinterpret_cast(key_offset.address()); - uint32_t value_off_addr = reinterpret_cast(value_offset.address()); - uint32_t map_off_addr = reinterpret_cast(map_offset.address()); - - // Check the relative positions of the address fields. - DCHECK(value_off_addr > key_off_addr); - DCHECK((value_off_addr - key_off_addr) % 4 == 0); - DCHECK((value_off_addr - key_off_addr) < (256 * 4)); - DCHECK(map_off_addr > key_off_addr); - DCHECK((map_off_addr - key_off_addr) % 4 == 0); - DCHECK((map_off_addr - key_off_addr) < (256 * 4)); - - Label miss; - Register base_addr = scratch; - scratch = no_reg; - - // Multiply by 3 because there are 3 fields per entry (name, code, map). - __ add(offset_scratch, offset, Operand(offset, LSL, 1)); - - // Calculate the base address of the entry. - __ mov(base_addr, Operand(key_offset)); - __ add(base_addr, base_addr, Operand(offset_scratch, LSL, kPointerSizeLog2)); - - // Check that the key in the entry matches the name. - __ ldr(ip, MemOperand(base_addr, 0)); - __ cmp(name, ip); - __ b(ne, &miss); - - // Check the map matches. - __ ldr(ip, MemOperand(base_addr, map_off_addr - key_off_addr)); - __ ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset)); - __ cmp(ip, scratch2); - __ b(ne, &miss); - - // Get the code entry from the cache. - Register code = scratch2; - scratch2 = no_reg; - __ ldr(code, MemOperand(base_addr, value_off_addr - key_off_addr)); - - // Check that the flags match what we're looking for. - Register flags_reg = base_addr; - base_addr = no_reg; - __ ldr(flags_reg, FieldMemOperand(code, Code::kFlagsOffset)); - // It's a nice optimization if this constant is encodable in the bic insn. - - uint32_t mask = Code::kFlagsNotUsedInLookup; - DCHECK(__ ImmediateFitsAddrMode1Instruction(mask)); - __ bic(flags_reg, flags_reg, Operand(mask)); - __ cmp(flags_reg, Operand(flags)); - __ b(ne, &miss); - -#ifdef DEBUG - if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) { - __ jmp(&miss); - } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) { - __ jmp(&miss); - } -#endif - - // Jump to the first instruction in the code stub. - __ add(pc, code, Operand(Code::kHeaderSize - kHeapObjectTag)); - - // Miss: fall through. - __ bind(&miss); -} - - void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup( MacroAssembler* masm, Label* miss_label, Register receiver, Handle name, Register scratch0, Register scratch1) { @@ -138,112 +54,13 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup( __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); - NameDictionaryLookupStub::GenerateNegativeLookup(masm, - miss_label, - &done, - receiver, - properties, - name, - scratch1); + NameDictionaryLookupStub::GenerateNegativeLookup( + masm, miss_label, &done, receiver, properties, name, scratch1); __ bind(&done); __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1); } -void StubCache::GenerateProbe(MacroAssembler* masm, - Code::Flags flags, - Register receiver, - Register name, - Register scratch, - Register extra, - Register extra2, - Register extra3) { - Isolate* isolate = masm->isolate(); - Label miss; - - // Make sure that code is valid. The multiplying code relies on the - // entry size being 12. - DCHECK(sizeof(Entry) == 12); - - // Make sure the flags does not name a specific type. - DCHECK(Code::ExtractTypeFromFlags(flags) == 0); - - // Make sure that there are no register conflicts. - DCHECK(!scratch.is(receiver)); - DCHECK(!scratch.is(name)); - DCHECK(!extra.is(receiver)); - DCHECK(!extra.is(name)); - DCHECK(!extra.is(scratch)); - DCHECK(!extra2.is(receiver)); - DCHECK(!extra2.is(name)); - DCHECK(!extra2.is(scratch)); - DCHECK(!extra2.is(extra)); - - // Check scratch, extra and extra2 registers are valid. - DCHECK(!scratch.is(no_reg)); - DCHECK(!extra.is(no_reg)); - DCHECK(!extra2.is(no_reg)); - DCHECK(!extra3.is(no_reg)); - - Counters* counters = masm->isolate()->counters(); - __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, - extra2, extra3); - - // Check that the receiver isn't a smi. - __ JumpIfSmi(receiver, &miss); - - // Get the map of the receiver and compute the hash. - __ ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset)); - __ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset)); - __ add(scratch, scratch, Operand(ip)); - uint32_t mask = kPrimaryTableSize - 1; - // We shift out the last two bits because they are not part of the hash and - // they are always 01 for maps. - __ mov(scratch, Operand(scratch, LSR, kCacheIndexShift)); - // Mask down the eor argument to the minimum to keep the immediate - // ARM-encodable. - __ eor(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask)); - // Prefer and_ to ubfx here because ubfx takes 2 cycles. - __ and_(scratch, scratch, Operand(mask)); - - // Probe the primary table. - ProbeTable(isolate, - masm, - flags, - kPrimary, - receiver, - name, - scratch, - extra, - extra2, - extra3); - - // Primary miss: Compute hash for secondary probe. - __ sub(scratch, scratch, Operand(name, LSR, kCacheIndexShift)); - uint32_t mask2 = kSecondaryTableSize - 1; - __ add(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask2)); - __ and_(scratch, scratch, Operand(mask2)); - - // Probe the secondary table. - ProbeTable(isolate, - masm, - flags, - kSecondary, - receiver, - name, - scratch, - extra, - extra2, - extra3); - - // Cache miss: Fall-through and let caller handle the miss by - // entering the runtime system. - __ bind(&miss); - __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, - extra2, extra3); -} - - void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype( MacroAssembler* masm, int index, Register prototype, Label* miss) { Isolate* isolate = masm->isolate(); @@ -466,8 +283,8 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition( __ jmp(&do_store); __ bind(&heap_number); - __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex, - miss_label, DONT_DO_SMI_CHECK); + __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex, miss_label, + DONT_DO_SMI_CHECK); __ vldr(d0, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); __ bind(&do_store); @@ -497,13 +314,8 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition( __ str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset)); // Update the write barrier for the map field. - __ RecordWriteField(receiver_reg, - HeapObject::kMapOffset, - scratch1, - scratch2, - kLRHasNotBeenSaved, - kDontSaveFPRegs, - OMIT_REMEMBERED_SET, + __ RecordWriteField(receiver_reg, HeapObject::kMapOffset, scratch1, scratch2, + kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK); if (details.type() == CONSTANT) { @@ -521,8 +333,8 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition( index -= transition->inobject_properties(); // TODO(verwaest): Share this code as a code stub. - SmiCheck smi_check = representation.IsTagged() - ? INLINE_SMI_CHECK : OMIT_SMI_CHECK; + SmiCheck smi_check = + representation.IsTagged() ? INLINE_SMI_CHECK : OMIT_SMI_CHECK; if (index < 0) { // Set the property straight into the object. int offset = transition->instance_size() + (index * kPointerSize); @@ -537,14 +349,9 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition( if (!representation.IsDouble()) { __ mov(storage_reg, value_reg); } - __ RecordWriteField(receiver_reg, - offset, - storage_reg, - scratch1, - kLRHasNotBeenSaved, - kDontSaveFPRegs, - EMIT_REMEMBERED_SET, - smi_check); + __ RecordWriteField(receiver_reg, offset, storage_reg, scratch1, + kLRHasNotBeenSaved, kDontSaveFPRegs, + EMIT_REMEMBERED_SET, smi_check); } } else { // Write to the properties array. @@ -563,14 +370,9 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition( if (!representation.IsDouble()) { __ mov(storage_reg, value_reg); } - __ RecordWriteField(scratch1, - offset, - storage_reg, - receiver_reg, - kLRHasNotBeenSaved, - kDontSaveFPRegs, - EMIT_REMEMBERED_SET, - smi_check); + __ RecordWriteField(scratch1, offset, storage_reg, receiver_reg, + kLRHasNotBeenSaved, kDontSaveFPRegs, + EMIT_REMEMBERED_SET, smi_check); } } @@ -614,8 +416,8 @@ Register PropertyHandlerCompiler::CheckPrototypes( // Make sure there's no overlap between holder and object registers. DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg)); - DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) - && !scratch2.is(scratch1)); + DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) && + !scratch2.is(scratch1)); // Keep track of the current object in register reg. Register reg = object_reg; @@ -648,10 +450,10 @@ Register PropertyHandlerCompiler::CheckPrototypes( } DCHECK(current.is_null() || current->property_dictionary()->FindEntry(name) == - NameDictionary::kNotFound); + NameDictionary::kNotFound); - GenerateDictionaryNegativeLookup(masm(), miss, reg, name, - scratch1, scratch2); + GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1, + scratch2); __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset)); reg = holder_reg; // From now on the object will be in holder_reg. @@ -674,9 +476,8 @@ Register PropertyHandlerCompiler::CheckPrototypes( if (current_map->IsJSGlobalProxyMap()) { __ CheckAccessGlobalProxy(reg, scratch2, miss); } else if (current_map->IsJSGlobalObjectMap()) { - GenerateCheckPropertyCell( - masm(), Handle::cast(current), name, - scratch2, miss); + GenerateCheckPropertyCell(masm(), Handle::cast(current), + name, scratch2, miss); } reg = holder_reg; // From now on the object will be in holder_reg. @@ -765,8 +566,8 @@ void NamedLoadHandlerCompiler::GenerateLoadCallback( __ push(receiver()); if (heap()->InNewSpace(callback->data())) { __ Move(scratch3(), callback); - __ ldr(scratch3(), FieldMemOperand(scratch3(), - ExecutableAccessorInfo::kDataOffset)); + __ ldr(scratch3(), + FieldMemOperand(scratch3(), ExecutableAccessorInfo::kDataOffset)); } else { __ Move(scratch3(), Handle(callback->data(), isolate())); } @@ -774,8 +575,7 @@ void NamedLoadHandlerCompiler::GenerateLoadCallback( __ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex); __ mov(scratch4(), scratch3()); __ Push(scratch3(), scratch4()); - __ mov(scratch4(), - Operand(ExternalReference::isolate_address(isolate()))); + __ mov(scratch4(), Operand(ExternalReference::isolate_address(isolate()))); __ Push(scratch4(), reg); __ mov(scratch2(), sp); // scratch2 = PropertyAccessorInfo::args_ __ push(name()); @@ -915,8 +715,8 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter( __ Push(receiver, value()); ParameterCount actual(1); ParameterCount expected(setter); - __ InvokeFunction(setter, expected, actual, - CALL_FUNCTION, NullCallWrapper()); + __ InvokeFunction(setter, expected, actual, CALL_FUNCTION, + NullCallWrapper()); } else { // If we generate a global code snippet for deoptimization only, remember // the place to continue after deoptimization. @@ -955,7 +755,7 @@ Register* PropertyAccessCompiler::load_calling_convention() { // receiver, name, scratch1, scratch2, scratch3, scratch4. Register receiver = LoadIC::ReceiverRegister(); Register name = LoadIC::NameRegister(); - static Register registers[] = { receiver, name, r3, r0, r4, r5 }; + static Register registers[] = {receiver, name, r3, r0, r4, r5}; return registers; } @@ -965,7 +765,7 @@ Register* PropertyAccessCompiler::store_calling_convention() { Register receiver = StoreIC::ReceiverRegister(); Register name = StoreIC::NameRegister(); DCHECK(r3.is(KeyedStoreIC::MapRegister())); - static Register registers[] = { receiver, name, r3, r4, r5 }; + static Register registers[] = {receiver, name, r3, r4, r5}; return registers; } @@ -998,8 +798,8 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter( __ push(receiver); ParameterCount actual(0); ParameterCount expected(getter); - __ InvokeFunction(getter, expected, actual, - CALL_FUNCTION, NullCallWrapper()); + __ InvokeFunction(getter, expected, actual, CALL_FUNCTION, + NullCallWrapper()); } else { // If we generate a global code snippet for deoptimization only, remember // the place to continue after deoptimization. @@ -1153,8 +953,7 @@ void ElementHandlerCompiler::GenerateLoadDictionaryElement( __ bind(&slow); __ IncrementCounter( - masm->isolate()->counters()->keyed_load_external_array_slow(), - 1, r2, r3); + masm->isolate()->counters()->keyed_load_external_array_slow(), 1, r2, r3); TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow); @@ -1166,7 +965,7 @@ void ElementHandlerCompiler::GenerateLoadDictionaryElement( #undef __ - -} } // namespace v8::internal +} +} // namespace v8::internal #endif // V8_TARGET_ARCH_ARM diff --git a/src/ic/arm/stub-cache-arm.cc b/src/ic/arm/stub-cache-arm.cc new file mode 100644 index 000000000..a8b650bee --- /dev/null +++ b/src/ic/arm/stub-cache-arm.cc @@ -0,0 +1,173 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#if V8_TARGET_ARCH_ARM + +#include "src/codegen.h" +#include "src/ic/stub-cache.h" + +namespace v8 { +namespace internal { + +#define __ ACCESS_MASM(masm) + + +static void ProbeTable(Isolate* isolate, MacroAssembler* masm, + Code::Flags flags, StubCache::Table table, + Register receiver, Register name, + // Number of the cache entry, not scaled. + Register offset, Register scratch, Register scratch2, + Register offset_scratch) { + ExternalReference key_offset(isolate->stub_cache()->key_reference(table)); + ExternalReference value_offset(isolate->stub_cache()->value_reference(table)); + ExternalReference map_offset(isolate->stub_cache()->map_reference(table)); + + uint32_t key_off_addr = reinterpret_cast(key_offset.address()); + uint32_t value_off_addr = reinterpret_cast(value_offset.address()); + uint32_t map_off_addr = reinterpret_cast(map_offset.address()); + + // Check the relative positions of the address fields. + DCHECK(value_off_addr > key_off_addr); + DCHECK((value_off_addr - key_off_addr) % 4 == 0); + DCHECK((value_off_addr - key_off_addr) < (256 * 4)); + DCHECK(map_off_addr > key_off_addr); + DCHECK((map_off_addr - key_off_addr) % 4 == 0); + DCHECK((map_off_addr - key_off_addr) < (256 * 4)); + + Label miss; + Register base_addr = scratch; + scratch = no_reg; + + // Multiply by 3 because there are 3 fields per entry (name, code, map). + __ add(offset_scratch, offset, Operand(offset, LSL, 1)); + + // Calculate the base address of the entry. + __ mov(base_addr, Operand(key_offset)); + __ add(base_addr, base_addr, Operand(offset_scratch, LSL, kPointerSizeLog2)); + + // Check that the key in the entry matches the name. + __ ldr(ip, MemOperand(base_addr, 0)); + __ cmp(name, ip); + __ b(ne, &miss); + + // Check the map matches. + __ ldr(ip, MemOperand(base_addr, map_off_addr - key_off_addr)); + __ ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset)); + __ cmp(ip, scratch2); + __ b(ne, &miss); + + // Get the code entry from the cache. + Register code = scratch2; + scratch2 = no_reg; + __ ldr(code, MemOperand(base_addr, value_off_addr - key_off_addr)); + + // Check that the flags match what we're looking for. + Register flags_reg = base_addr; + base_addr = no_reg; + __ ldr(flags_reg, FieldMemOperand(code, Code::kFlagsOffset)); + // It's a nice optimization if this constant is encodable in the bic insn. + + uint32_t mask = Code::kFlagsNotUsedInLookup; + DCHECK(__ ImmediateFitsAddrMode1Instruction(mask)); + __ bic(flags_reg, flags_reg, Operand(mask)); + __ cmp(flags_reg, Operand(flags)); + __ b(ne, &miss); + +#ifdef DEBUG + if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) { + __ jmp(&miss); + } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) { + __ jmp(&miss); + } +#endif + + // Jump to the first instruction in the code stub. + __ add(pc, code, Operand(Code::kHeaderSize - kHeapObjectTag)); + + // Miss: fall through. + __ bind(&miss); +} + + +void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags, + Register receiver, Register name, + Register scratch, Register extra, Register extra2, + Register extra3) { + Isolate* isolate = masm->isolate(); + Label miss; + + // Make sure that code is valid. The multiplying code relies on the + // entry size being 12. + DCHECK(sizeof(Entry) == 12); + + // Make sure the flags does not name a specific type. + DCHECK(Code::ExtractTypeFromFlags(flags) == 0); + + // Make sure that there are no register conflicts. + DCHECK(!scratch.is(receiver)); + DCHECK(!scratch.is(name)); + DCHECK(!extra.is(receiver)); + DCHECK(!extra.is(name)); + DCHECK(!extra.is(scratch)); + DCHECK(!extra2.is(receiver)); + DCHECK(!extra2.is(name)); + DCHECK(!extra2.is(scratch)); + DCHECK(!extra2.is(extra)); + + // Check scratch, extra and extra2 registers are valid. + DCHECK(!scratch.is(no_reg)); + DCHECK(!extra.is(no_reg)); + DCHECK(!extra2.is(no_reg)); + DCHECK(!extra3.is(no_reg)); + + Counters* counters = masm->isolate()->counters(); + __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2, + extra3); + + // Check that the receiver isn't a smi. + __ JumpIfSmi(receiver, &miss); + + // Get the map of the receiver and compute the hash. + __ ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset)); + __ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset)); + __ add(scratch, scratch, Operand(ip)); + uint32_t mask = kPrimaryTableSize - 1; + // We shift out the last two bits because they are not part of the hash and + // they are always 01 for maps. + __ mov(scratch, Operand(scratch, LSR, kCacheIndexShift)); + // Mask down the eor argument to the minimum to keep the immediate + // ARM-encodable. + __ eor(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask)); + // Prefer and_ to ubfx here because ubfx takes 2 cycles. + __ and_(scratch, scratch, Operand(mask)); + + // Probe the primary table. + ProbeTable(isolate, masm, flags, kPrimary, receiver, name, scratch, extra, + extra2, extra3); + + // Primary miss: Compute hash for secondary probe. + __ sub(scratch, scratch, Operand(name, LSR, kCacheIndexShift)); + uint32_t mask2 = kSecondaryTableSize - 1; + __ add(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask2)); + __ and_(scratch, scratch, Operand(mask2)); + + // Probe the secondary table. + ProbeTable(isolate, masm, flags, kSecondary, receiver, name, scratch, extra, + extra2, extra3); + + // Cache miss: Fall-through and let caller handle the miss by + // entering the runtime system. + __ bind(&miss); + __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2, + extra3); +} + + +#undef __ +} +} // namespace v8::internal + +#endif // V8_TARGET_ARCH_ARM diff --git a/src/arm64/ic-arm64.cc b/src/ic/arm64/ic-arm64.cc similarity index 80% rename from src/arm64/ic-arm64.cc rename to src/ic/arm64/ic-arm64.cc index e08fcfd88..3f80465de 100644 --- a/src/arm64/ic-arm64.cc +++ b/src/ic/arm64/ic-arm64.cc @@ -6,13 +6,9 @@ #if V8_TARGET_ARCH_ARM64 -#include "src/arm64/assembler-arm64.h" -#include "src/code-stubs.h" #include "src/codegen.h" -#include "src/disasm.h" -#include "src/ic-inl.h" -#include "src/runtime.h" -#include "src/stub-cache.h" +#include "src/ic/ic.h" +#include "src/ic/stub-cache.h" namespace v8 { namespace internal { @@ -24,8 +20,7 @@ namespace internal { // "type" holds an instance type on entry and is not clobbered. // Generated code branch on "global_object" if type is any kind of global // JS object. -static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, - Register type, +static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type, Label* global_object) { __ Cmp(type, JS_GLOBAL_OBJECT_TYPE); __ Ccmp(type, JS_BUILTINS_OBJECT_TYPE, ZFlag, ne); @@ -45,12 +40,9 @@ static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, // The scratch registers need to be different from elements, name and result. // The generated code assumes that the receiver has slow properties, // is not a global object and does not have interceptors. -static void GenerateDictionaryLoad(MacroAssembler* masm, - Label* miss, - Register elements, - Register name, - Register result, - Register scratch1, +static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss, + Register elements, Register name, + Register result, Register scratch1, Register scratch2) { DCHECK(!AreAliased(elements, name, scratch1, scratch2)); DCHECK(!AreAliased(result, scratch1, scratch2)); @@ -58,18 +50,14 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label done; // Probe the dictionary. - NameDictionaryLookupStub::GeneratePositiveLookup(masm, - miss, - &done, - elements, - name, - scratch1, - scratch2); + NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements, + name, scratch1, scratch2); // If probing finds an entry check that the value is a normal property. __ Bind(&done); - static const int kElementsStartOffset = NameDictionary::kHeaderSize + + static const int kElementsStartOffset = + NameDictionary::kHeaderSize + NameDictionary::kElementsStartIndex * kPointerSize; static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; __ Ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset)); @@ -92,31 +80,24 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, // // The generated code assumes that the receiver has slow properties, // is not a global object and does not have interceptors. -static void GenerateDictionaryStore(MacroAssembler* masm, - Label* miss, - Register elements, - Register name, - Register value, - Register scratch1, +static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss, + Register elements, Register name, + Register value, Register scratch1, Register scratch2) { DCHECK(!AreAliased(elements, name, value, scratch1, scratch2)); Label done; // Probe the dictionary. - NameDictionaryLookupStub::GeneratePositiveLookup(masm, - miss, - &done, - elements, - name, - scratch1, - scratch2); + NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements, + name, scratch1, scratch2); // If probing finds an entry in the dictionary check that the value // is a normal property that is not read only. __ Bind(&done); - static const int kElementsStartOffset = NameDictionary::kHeaderSize + + static const int kElementsStartOffset = + NameDictionary::kHeaderSize + NameDictionary::kElementsStartIndex * kPointerSize; static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; static const int kTypeAndReadOnlyMask = @@ -133,8 +114,8 @@ static void GenerateDictionaryStore(MacroAssembler* masm, // Update the write barrier. Make sure not to clobber the value. __ Mov(scratch1, value); - __ RecordWrite( - elements, scratch2, scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs); + __ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved, + kDontSaveFPRegs); } @@ -145,8 +126,7 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm, Register receiver, Register map_scratch, Register scratch, - int interceptor_bit, - Label* slow) { + int interceptor_bit, Label* slow) { DCHECK(!AreAliased(map_scratch, scratch)); // Check that the object isn't a smi. @@ -187,14 +167,10 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm, // Allowed to be the the same as 'receiver' or 'key'. // Unchanged on bailout so 'receiver' and 'key' can be safely // used by further computation. -static void GenerateFastArrayLoad(MacroAssembler* masm, - Register receiver, - Register key, - Register elements, - Register elements_map, - Register scratch2, - Register result, - Label* not_fast_array, +static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver, + Register key, Register elements, + Register elements_map, Register scratch2, + Register result, Label* not_fast_array, Label* slow) { DCHECK(!AreAliased(receiver, key, elements, elements_map, scratch2)); @@ -239,12 +215,9 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, // The map of the key is returned in 'map_scratch'. // If the jump to 'index_string' is done the hash of the key is left // in 'hash_scratch'. -static void GenerateKeyNameCheck(MacroAssembler* masm, - Register key, - Register map_scratch, - Register hash_scratch, - Label* index_string, - Label* not_unique) { +static void GenerateKeyNameCheck(MacroAssembler* masm, Register key, + Register map_scratch, Register hash_scratch, + Label* index_string, Label* not_unique) { DCHECK(!AreAliased(key, map_scratch, hash_scratch)); // Is the key a name? @@ -256,8 +229,7 @@ static void GenerateKeyNameCheck(MacroAssembler* masm, // Is the string an array index with cached numeric value? __ Ldr(hash_scratch.W(), FieldMemOperand(key, Name::kHashFieldOffset)); - __ TestAndBranchIfAllClear(hash_scratch, - Name::kContainsCachedArrayIndexMask, + __ TestAndBranchIfAllClear(hash_scratch, Name::kContainsCachedArrayIndexMask, index_string); // Is the string internalized? We know it's a string, so a single bit test is @@ -277,10 +249,8 @@ static void GenerateKeyNameCheck(MacroAssembler* masm, // left with the object's elements map. Otherwise, it is used as a scratch // register. static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm, - Register object, - Register key, - Register map, - Register scratch1, + Register object, Register key, + Register map, Register scratch1, Register scratch2, Label* unmapped_case, Label* slow_case) { @@ -293,8 +263,8 @@ static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm, // whether it requires access checks. __ JumpIfSmi(object, slow_case); // Check that the object is some kind of JSObject. - __ JumpIfObjectType(object, map, scratch1, FIRST_JS_RECEIVER_TYPE, - slow_case, lt); + __ JumpIfObjectType(object, map, scratch1, FIRST_JS_RECEIVER_TYPE, slow_case, + lt); // Check that the key is a positive smi. __ JumpIfNotSmi(key, slow_case); @@ -347,14 +317,13 @@ static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm, Register backing_store = parameter_map; __ Ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset)); Handle fixed_array_map(masm->isolate()->heap()->fixed_array_map()); - __ CheckMap( - backing_store, scratch, fixed_array_map, slow_case, DONT_DO_SMI_CHECK); + __ CheckMap(backing_store, scratch, fixed_array_map, slow_case, + DONT_DO_SMI_CHECK); __ Ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset)); __ Cmp(key, scratch); __ B(hs, slow_case); - __ Add(backing_store, - backing_store, + __ Add(backing_store, backing_store, FixedArray::kHeaderSize - kHeapObjectTag); __ SmiUntag(scratch, key); return MemOperand(backing_store, scratch, LSL, kPointerSizeLog2); @@ -371,8 +340,8 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) { // Probe the stub cache. Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( Code::ComputeHandlerFlags(Code::LOAD_IC)); - masm->isolate()->stub_cache()->GenerateProbe( - masm, flags, receiver, name, x3, x4, x5, x6); + masm->isolate()->stub_cache()->GenerateProbe(masm, flags, receiver, name, x3, + x4, x5, x6); // Cache miss: Jump to runtime. GenerateMiss(masm); @@ -405,8 +374,7 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) { // Perform tail call to the entry. __ Push(ReceiverRegister(), NameRegister()); - ExternalReference ref = - ExternalReference(IC_Utility(kLoadIC_Miss), isolate); + ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate); __ TailCallExternalReference(ref, 2, 1); } @@ -464,10 +432,8 @@ void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) { Register mapped1 = x4; Register mapped2 = x5; - MemOperand mapped = - GenerateMappedArgumentsLookup(masm, receiver, key, map, - mapped1, mapped2, - ¬in, &slow); + MemOperand mapped = GenerateMappedArgumentsLookup( + masm, receiver, key, map, mapped1, mapped2, ¬in, &slow); Operand mapped_offset = mapped.OffsetAsOperand(); __ Str(value, mapped); __ Add(x10, mapped.base(), mapped_offset); @@ -479,7 +445,7 @@ void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) { // These registers are used by GenerateMappedArgumentsLookup to build a // MemOperand. They are live for as long as the MemOperand is live. - Register unmapped1 = map; // This is assumed to alias 'map'. + Register unmapped1 = map; // This is assumed to alias 'map'. Register unmapped2 = x4; MemOperand unmapped = GenerateUnmappedArgumentsLookup(masm, key, unmapped1, unmapped2, &slow); @@ -487,8 +453,8 @@ void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) { __ Str(value, unmapped); __ Add(x10, unmapped.base(), unmapped_offset); __ Mov(x11, value); - __ RecordWrite(unmapped.base(), x10, x11, - kLRHasNotBeenSaved, kDontSaveFPRegs); + __ RecordWrite(unmapped.base(), x10, x11, kLRHasNotBeenSaved, + kDontSaveFPRegs); __ Ret(); __ Bind(&slow); GenerateMiss(masm); @@ -532,9 +498,7 @@ const Register StoreIC::NameRegister() { return x2; } const Register StoreIC::ValueRegister() { return x0; } -const Register KeyedStoreIC::MapRegister() { - return x3; -} +const Register KeyedStoreIC::MapRegister() { return x3; } void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { @@ -544,33 +508,29 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { } -static void GenerateKeyedLoadWithSmiKey(MacroAssembler* masm, - Register key, - Register receiver, - Register scratch1, - Register scratch2, - Register scratch3, - Register scratch4, - Register scratch5, - Label *slow) { - DCHECK(!AreAliased( - key, receiver, scratch1, scratch2, scratch3, scratch4, scratch5)); +static void GenerateKeyedLoadWithSmiKey(MacroAssembler* masm, Register key, + Register receiver, Register scratch1, + Register scratch2, Register scratch3, + Register scratch4, Register scratch5, + Label* slow) { + DCHECK(!AreAliased(key, receiver, scratch1, scratch2, scratch3, scratch4, + scratch5)); Isolate* isolate = masm->isolate(); Label check_number_dictionary; // If we can load the value, it should be returned in x0. Register result = x0; - GenerateKeyedLoadReceiverCheck( - masm, receiver, scratch1, scratch2, Map::kHasIndexedInterceptor, slow); + GenerateKeyedLoadReceiverCheck(masm, receiver, scratch1, scratch2, + Map::kHasIndexedInterceptor, slow); // Check the receiver's map to see if it has fast elements. __ CheckFastElements(scratch1, scratch2, &check_number_dictionary); - GenerateFastArrayLoad( - masm, receiver, key, scratch3, scratch2, scratch1, result, NULL, slow); - __ IncrementCounter( - isolate->counters()->keyed_load_generic_smi(), 1, scratch1, scratch2); + GenerateFastArrayLoad(masm, receiver, key, scratch3, scratch2, scratch1, + result, NULL, slow); + __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, + scratch1, scratch2); __ Ret(); __ Bind(&check_number_dictionary); @@ -580,30 +540,26 @@ static void GenerateKeyedLoadWithSmiKey(MacroAssembler* masm, // Check whether we have a number dictionary. __ JumpIfNotRoot(scratch2, Heap::kHashTableMapRootIndex, slow); - __ LoadFromNumberDictionary( - slow, scratch3, key, result, scratch1, scratch2, scratch4, scratch5); + __ LoadFromNumberDictionary(slow, scratch3, key, result, scratch1, scratch2, + scratch4, scratch5); __ Ret(); } -static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm, - Register key, - Register receiver, - Register scratch1, - Register scratch2, - Register scratch3, - Register scratch4, - Register scratch5, - Label *slow) { - DCHECK(!AreAliased( - key, receiver, scratch1, scratch2, scratch3, scratch4, scratch5)); +static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm, Register key, + Register receiver, Register scratch1, + Register scratch2, Register scratch3, + Register scratch4, Register scratch5, + Label* slow) { + DCHECK(!AreAliased(key, receiver, scratch1, scratch2, scratch3, scratch4, + scratch5)); Isolate* isolate = masm->isolate(); Label probe_dictionary, property_array_property; // If we can load the value, it should be returned in x0. Register result = x0; - GenerateKeyedLoadReceiverCheck( - masm, receiver, scratch1, scratch2, Map::kHasNamedInterceptor, slow); + GenerateKeyedLoadReceiverCheck(masm, receiver, scratch1, scratch2, + Map::kHasNamedInterceptor, slow); // If the receiver is a fast-case object, check the keyed lookup cache. // Otherwise probe the dictionary. @@ -678,11 +634,11 @@ static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm, // Load in-object property. __ Bind(&load_in_object_property); __ Ldrb(scratch5, FieldMemOperand(receiver_map, Map::kInstanceSizeOffset)); - __ Add(scratch5, scratch5, scratch4); // Index from start of object. + __ Add(scratch5, scratch5, scratch4); // Index from start of object. __ Sub(receiver, receiver, kHeapObjectTag); // Remove the heap tag. __ Ldr(result, MemOperand(receiver, scratch5, LSL, kPointerSizeLog2)); - __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), - 1, scratch1, scratch2); + __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1, + scratch1, scratch2); __ Ret(); // Load property array property. @@ -690,8 +646,8 @@ static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm, __ Ldr(scratch1, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); __ Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag); __ Ldr(result, MemOperand(scratch1, scratch4, LSL, kPointerSizeLog2)); - __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), - 1, scratch1, scratch2); + __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1, + scratch1, scratch2); __ Ret(); // Do a quick inline probe of the receiver's dictionary, if it exists. @@ -701,8 +657,8 @@ static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm, GenerateGlobalInstanceTypeCheck(masm, scratch1, slow); // Load the property. GenerateDictionaryLoad(masm, slow, scratch2, key, result, scratch1, scratch3); - __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), - 1, scratch1, scratch2); + __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 1, + scratch1, scratch2); __ Ret(); } @@ -724,8 +680,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { // Slow case. __ Bind(&slow); - __ IncrementCounter( - masm->isolate()->counters()->keyed_load_generic_slow(), 1, x4, x3); + __ IncrementCounter(masm->isolate()->counters()->keyed_load_generic_slow(), 1, + x4, x3); GenerateRuntimeGetProperty(masm); __ Bind(&check_name); @@ -750,10 +706,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) { Register scratch = x3; DCHECK(!scratch.is(receiver) && !scratch.is(index)); - StringCharAtGenerator char_at_generator(receiver, - index, - scratch, - result, + StringCharAtGenerator char_at_generator(receiver, index, scratch, result, &miss, // When not a string. &miss, // When not a number. &miss, // When index out of range. @@ -792,8 +745,8 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { // Check that it has indexed interceptor and access checks // are not enabled for this object. __ Ldrb(scratch2, FieldMemOperand(map, Map::kBitFieldOffset)); - DCHECK(kSlowCaseBitFieldMask == - ((1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor))); + DCHECK(kSlowCaseBitFieldMask == ((1 << Map::kIsAccessCheckNeeded) | + (1 << Map::kHasIndexedInterceptor))); __ Tbnz(scratch2, Map::kIsAccessCheckNeeded, &slow); __ Tbz(scratch2, Map::kHasIndexedInterceptor, &slow); @@ -851,20 +804,12 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm, static void KeyedStoreGenerateGenericHelper( - MacroAssembler* masm, - Label* fast_object, - Label* fast_double, - Label* slow, - KeyedStoreCheckMap check_map, - KeyedStoreIncrementLength increment_length, - Register value, - Register key, - Register receiver, - Register receiver_map, - Register elements_map, - Register elements) { - DCHECK(!AreAliased( - value, key, receiver, receiver_map, elements_map, elements, x10, x11)); + MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow, + KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length, + Register value, Register key, Register receiver, Register receiver_map, + Register elements_map, Register elements) { + DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements, + x10, x11)); Label transition_smi_elements; Label transition_double_elements; @@ -914,13 +859,8 @@ static void KeyedStoreGenerateGenericHelper( // Update write barrier for the elements array address. __ Mov(x10, value); // Preserve the value which is returned. - __ RecordWrite(elements, - address, - x10, - kLRHasNotBeenSaved, - kDontSaveFPRegs, - EMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); + __ RecordWrite(elements, address, x10, kLRHasNotBeenSaved, kDontSaveFPRegs, + EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); __ Bind(&dont_record_write); __ Ret(); @@ -943,11 +883,7 @@ static void KeyedStoreGenerateGenericHelper( __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow); __ Bind(&fast_double_without_map_check); - __ StoreNumberToDoubleElements(value, - key, - elements, - x10, - d0, + __ StoreNumberToDoubleElements(value, key, elements, x10, d0, &transition_double_elements); if (increment_length == kIncrementLength) { // Add 1 to receiver->length. @@ -964,27 +900,19 @@ static void KeyedStoreGenerateGenericHelper( // Value is a double. Transition FAST_SMI_ELEMENTS -> // FAST_DOUBLE_ELEMENTS and complete the store. - __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, - FAST_DOUBLE_ELEMENTS, - receiver_map, - x10, - x11, - slow); - AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, - FAST_DOUBLE_ELEMENTS); - ElementsTransitionGenerator::GenerateSmiToDouble( - masm, receiver, key, value, receiver_map, mode, slow); + __ LoadTransitionedArrayMapConditional( + FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, x10, x11, slow); + AllocationSiteMode mode = + AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS); + ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value, + receiver_map, mode, slow); __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ B(&fast_double_without_map_check); __ Bind(&non_double_value); // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS. - __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, - FAST_ELEMENTS, - receiver_map, - x10, - x11, - slow); + __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, + receiver_map, x10, x11, slow); mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS); ElementsTransitionGenerator::GenerateMapChangeElementsTransition( @@ -997,12 +925,8 @@ static void KeyedStoreGenerateGenericHelper( // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS - __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, - FAST_ELEMENTS, - receiver_map, - x10, - x11, - slow); + __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS, + receiver_map, x10, x11, slow); mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS); ElementsTransitionGenerator::GenerateDoubleToObject( masm, receiver, key, value, receiver_map, mode, slow); @@ -1101,14 +1025,13 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, __ B(eq, &extra); // We can handle the case where we are appending 1 element. __ B(lo, &slow); - KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double, - &slow, kCheckMap, kDontIncrementLength, - value, key, receiver, receiver_map, - elements_map, elements); + KeyedStoreGenerateGenericHelper( + masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength, + value, key, receiver, receiver_map, elements_map, elements); KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow, - &slow, kDontCheckMap, kIncrementLength, - value, key, receiver, receiver_map, - elements_map, elements); + &slow, kDontCheckMap, kIncrementLength, value, + key, receiver, receiver_map, elements_map, + elements); } @@ -1120,8 +1043,8 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { // Probe the stub cache. Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( Code::ComputeHandlerFlags(Code::STORE_IC)); - masm->isolate()->stub_cache()->GenerateProbe( - masm, flags, receiver, name, x3, x4, x5, x6); + masm->isolate()->stub_cache()->GenerateProbe(masm, flags, receiver, name, x3, + x4, x5, x6); // Cache miss: Jump to runtime. GenerateMiss(masm); @@ -1215,8 +1138,7 @@ Condition CompareIC::ComputeCondition(Token::Value op) { bool CompareIC::HasInlinedSmiCode(Address address) { // The address of the instruction following the call. - Address info_address = - Assembler::return_address_from_call_start(address); + Address info_address = Assembler::return_address_from_call_start(address); InstructionSequence* patch_info = InstructionSequence::At(info_address); return patch_info->IsInlineData(); @@ -1231,8 +1153,7 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { // instructions which have no side effects, so we can safely execute them. // The patch information is encoded directly after the call to the helper // function which is requesting this patch operation. - Address info_address = - Assembler::return_address_from_call_start(address); + Address info_address = Assembler::return_address_from_call_start(address); InlineSmiCheckInfo info(info_address); // Check and decode the patch information instruction. @@ -1241,8 +1162,8 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { } if (FLAG_trace_ic) { - PrintF("[ Patching ic at %p, marker=%p, SMI check=%p\n", - address, info_address, reinterpret_cast(info.SmiCheck())); + PrintF("[ Patching ic at %p, marker=%p, SMI check=%p\n", address, + info_address, reinterpret_cast(info.SmiCheck())); } // Patch and activate code generated by JumpPatchSite::EmitJumpIfNotSmi() @@ -1280,8 +1201,7 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { patcher.tbz(smi_reg, 0, branch_imm); } } - - -} } // namespace v8::internal +} +} // namespace v8::internal #endif // V8_TARGET_ARCH_ARM64 diff --git a/src/arm64/stub-cache-arm64.cc b/src/ic/arm64/ic-compiler-arm64.cc similarity index 80% rename from src/arm64/stub-cache-arm64.cc rename to src/ic/arm64/ic-compiler-arm64.cc index b6f4c8a2b..d2ad8339b 100644 --- a/src/arm64/stub-cache-arm64.cc +++ b/src/ic/arm64/ic-compiler-arm64.cc @@ -1,4 +1,4 @@ -// Copyright 2013 the V8 project authors. All rights reserved. +// Copyright 2014 the V8 project authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. @@ -6,14 +6,11 @@ #if V8_TARGET_ARCH_ARM64 -#include "src/codegen.h" -#include "src/ic-inl.h" -#include "src/stub-cache.h" +#include "src/ic/ic-compiler.h" namespace v8 { namespace internal { - #define __ ACCESS_MASM(masm) @@ -50,154 +47,13 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup( __ Ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset)); __ JumpIfNotRoot(map, Heap::kHashTableMapRootIndex, miss_label); - NameDictionaryLookupStub::GenerateNegativeLookup(masm, - miss_label, - &done, - receiver, - properties, - name, - scratch1); + NameDictionaryLookupStub::GenerateNegativeLookup( + masm, miss_label, &done, receiver, properties, name, scratch1); __ Bind(&done); __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1); } -// Probe primary or secondary table. -// If the entry is found in the cache, the generated code jump to the first -// instruction of the stub in the cache. -// If there is a miss the code fall trough. -// -// 'receiver', 'name' and 'offset' registers are preserved on miss. -static void ProbeTable(Isolate* isolate, - MacroAssembler* masm, - Code::Flags flags, - StubCache::Table table, - Register receiver, - Register name, - Register offset, - Register scratch, - Register scratch2, - Register scratch3) { - // Some code below relies on the fact that the Entry struct contains - // 3 pointers (name, code, map). - STATIC_ASSERT(sizeof(StubCache::Entry) == (3 * kPointerSize)); - - ExternalReference key_offset(isolate->stub_cache()->key_reference(table)); - ExternalReference value_offset(isolate->stub_cache()->value_reference(table)); - ExternalReference map_offset(isolate->stub_cache()->map_reference(table)); - - uintptr_t key_off_addr = reinterpret_cast(key_offset.address()); - uintptr_t value_off_addr = - reinterpret_cast(value_offset.address()); - uintptr_t map_off_addr = reinterpret_cast(map_offset.address()); - - Label miss; - - DCHECK(!AreAliased(name, offset, scratch, scratch2, scratch3)); - - // Multiply by 3 because there are 3 fields per entry. - __ Add(scratch3, offset, Operand(offset, LSL, 1)); - - // Calculate the base address of the entry. - __ Mov(scratch, key_offset); - __ Add(scratch, scratch, Operand(scratch3, LSL, kPointerSizeLog2)); - - // Check that the key in the entry matches the name. - __ Ldr(scratch2, MemOperand(scratch)); - __ Cmp(name, scratch2); - __ B(ne, &miss); - - // Check the map matches. - __ Ldr(scratch2, MemOperand(scratch, map_off_addr - key_off_addr)); - __ Ldr(scratch3, FieldMemOperand(receiver, HeapObject::kMapOffset)); - __ Cmp(scratch2, scratch3); - __ B(ne, &miss); - - // Get the code entry from the cache. - __ Ldr(scratch, MemOperand(scratch, value_off_addr - key_off_addr)); - - // Check that the flags match what we're looking for. - __ Ldr(scratch2.W(), FieldMemOperand(scratch, Code::kFlagsOffset)); - __ Bic(scratch2.W(), scratch2.W(), Code::kFlagsNotUsedInLookup); - __ Cmp(scratch2.W(), flags); - __ B(ne, &miss); - -#ifdef DEBUG - if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) { - __ B(&miss); - } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) { - __ B(&miss); - } -#endif - - // Jump to the first instruction in the code stub. - __ Add(scratch, scratch, Code::kHeaderSize - kHeapObjectTag); - __ Br(scratch); - - // Miss: fall through. - __ Bind(&miss); -} - - -void StubCache::GenerateProbe(MacroAssembler* masm, - Code::Flags flags, - Register receiver, - Register name, - Register scratch, - Register extra, - Register extra2, - Register extra3) { - Isolate* isolate = masm->isolate(); - Label miss; - - // Make sure the flags does not name a specific type. - DCHECK(Code::ExtractTypeFromFlags(flags) == 0); - - // Make sure that there are no register conflicts. - DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3)); - - // Make sure extra and extra2 registers are valid. - DCHECK(!extra.is(no_reg)); - DCHECK(!extra2.is(no_reg)); - DCHECK(!extra3.is(no_reg)); - - Counters* counters = masm->isolate()->counters(); - __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, - extra2, extra3); - - // Check that the receiver isn't a smi. - __ JumpIfSmi(receiver, &miss); - - // Compute the hash for primary table. - __ Ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset)); - __ Ldr(extra, FieldMemOperand(receiver, HeapObject::kMapOffset)); - __ Add(scratch, scratch, extra); - __ Eor(scratch, scratch, flags); - // We shift out the last two bits because they are not part of the hash. - __ Ubfx(scratch, scratch, kCacheIndexShift, - CountTrailingZeros(kPrimaryTableSize, 64)); - - // Probe the primary table. - ProbeTable(isolate, masm, flags, kPrimary, receiver, name, - scratch, extra, extra2, extra3); - - // Primary miss: Compute hash for secondary table. - __ Sub(scratch, scratch, Operand(name, LSR, kCacheIndexShift)); - __ Add(scratch, scratch, flags >> kCacheIndexShift); - __ And(scratch, scratch, kSecondaryTableSize - 1); - - // Probe the secondary table. - ProbeTable(isolate, masm, flags, kSecondary, receiver, name, - scratch, extra, extra2, extra3); - - // Cache miss: Fall-through and let caller handle the miss by - // entering the runtime system. - __ Bind(&miss); - __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, - extra2, extra3); -} - - void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype( MacroAssembler* masm, int index, Register prototype, Label* miss) { Isolate* isolate = masm->isolate(); @@ -378,8 +234,8 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition( Register scratch2, Register scratch3, Label* miss_label, Label* slow) { Label exit; - DCHECK(!AreAliased(receiver_reg, storage_reg, value_reg, - scratch1, scratch2, scratch3)); + DCHECK(!AreAliased(receiver_reg, storage_reg, value_reg, scratch1, scratch2, + scratch3)); // We don't need scratch3. scratch3 = NoReg; @@ -423,8 +279,8 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition( Label do_store; __ JumpIfSmi(value_reg, &do_store); - __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex, - miss_label, DONT_DO_SMI_CHECK); + __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex, miss_label, + DONT_DO_SMI_CHECK); __ Ldr(temp_double, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); __ Bind(&do_store); @@ -454,13 +310,8 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition( __ Str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset)); // Update the write barrier for the map field. - __ RecordWriteField(receiver_reg, - HeapObject::kMapOffset, - scratch1, - scratch2, - kLRHasNotBeenSaved, - kDontSaveFPRegs, - OMIT_REMEMBERED_SET, + __ RecordWriteField(receiver_reg, HeapObject::kMapOffset, scratch1, scratch2, + kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK); if (details.type() == CONSTANT) { @@ -478,8 +329,8 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition( index -= transition->inobject_properties(); // TODO(verwaest): Share this code as a code stub. - SmiCheck smi_check = representation.IsTagged() - ? INLINE_SMI_CHECK : OMIT_SMI_CHECK; + SmiCheck smi_check = + representation.IsTagged() ? INLINE_SMI_CHECK : OMIT_SMI_CHECK; Register prop_reg = representation.IsDouble() ? storage_reg : value_reg; if (index < 0) { // Set the property straight into the object. @@ -491,14 +342,9 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition( if (!representation.IsDouble()) { __ Mov(storage_reg, value_reg); } - __ RecordWriteField(receiver_reg, - offset, - storage_reg, - scratch1, - kLRHasNotBeenSaved, - kDontSaveFPRegs, - EMIT_REMEMBERED_SET, - smi_check); + __ RecordWriteField(receiver_reg, offset, storage_reg, scratch1, + kLRHasNotBeenSaved, kDontSaveFPRegs, + EMIT_REMEMBERED_SET, smi_check); } } else { // Write to the properties array. @@ -513,14 +359,9 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition( if (!representation.IsDouble()) { __ Mov(storage_reg, value_reg); } - __ RecordWriteField(scratch1, - offset, - storage_reg, - receiver_reg, - kLRHasNotBeenSaved, - kDontSaveFPRegs, - EMIT_REMEMBERED_SET, - smi_check); + __ RecordWriteField(scratch1, offset, storage_reg, receiver_reg, + kLRHasNotBeenSaved, kDontSaveFPRegs, + EMIT_REMEMBERED_SET, smi_check); } } @@ -595,12 +436,11 @@ Register PropertyHandlerCompiler::CheckPrototypes( DCHECK(name->IsString()); name = factory()->InternalizeString(Handle::cast(name)); } - DCHECK(current.is_null() || - (current->property_dictionary()->FindEntry(name) == - NameDictionary::kNotFound)); + DCHECK(current.is_null() || (current->property_dictionary()->FindEntry( + name) == NameDictionary::kNotFound)); - GenerateDictionaryNegativeLookup(masm(), miss, reg, name, - scratch1, scratch2); + GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1, + scratch2); __ Ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset)); reg = holder_reg; // From now on the object will be in holder_reg. @@ -629,9 +469,8 @@ Register PropertyHandlerCompiler::CheckPrototypes( UseScratchRegisterScope temps(masm()); __ CheckAccessGlobalProxy(reg, scratch2, temps.AcquireX(), miss); } else if (current_map->IsJSGlobalObjectMap()) { - GenerateCheckPropertyCell( - masm(), Handle::cast(current), name, - scratch2, miss); + GenerateCheckPropertyCell(masm(), Handle::cast(current), + name, scratch2, miss); } reg = holder_reg; // From now on the object will be in holder_reg. @@ -721,8 +560,8 @@ void NamedLoadHandlerCompiler::GenerateLoadCallback( if (heap()->InNewSpace(callback->data())) { __ Mov(scratch3(), Operand(callback)); - __ Ldr(scratch3(), FieldMemOperand(scratch3(), - ExecutableAccessorInfo::kDataOffset)); + __ Ldr(scratch3(), + FieldMemOperand(scratch3(), ExecutableAccessorInfo::kDataOffset)); } else { __ Mov(scratch3(), Operand(Handle(callback->data(), isolate()))); } @@ -758,8 +597,8 @@ void NamedLoadHandlerCompiler::GenerateLoadCallback( void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup( LookupIterator* it, Register holder_reg) { - DCHECK(!AreAliased(receiver(), this->name(), - scratch1(), scratch2(), scratch3())); + DCHECK(!AreAliased(receiver(), this->name(), scratch1(), scratch2(), + scratch3())); DCHECK(holder()->HasNamedInterceptor()); DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined()); @@ -884,8 +723,8 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter( __ Push(receiver, value()); ParameterCount actual(1); ParameterCount expected(setter); - __ InvokeFunction(setter, expected, actual, - CALL_FUNCTION, NullCallWrapper()); + __ InvokeFunction(setter, expected, actual, CALL_FUNCTION, + NullCallWrapper()); } else { // If we generate a global code snippet for deoptimization only, remember // the place to continue after deoptimization. @@ -935,7 +774,7 @@ Register* PropertyAccessCompiler::load_calling_convention() { // receiver, name, scratch1, scratch2, scratch3, scratch4. Register receiver = LoadIC::ReceiverRegister(); Register name = LoadIC::NameRegister(); - static Register registers[] = { receiver, name, x3, x0, x4, x5 }; + static Register registers[] = {receiver, name, x3, x0, x4, x5}; return registers; } @@ -945,7 +784,7 @@ Register* PropertyAccessCompiler::store_calling_convention() { Register receiver = StoreIC::ReceiverRegister(); Register name = StoreIC::NameRegister(); DCHECK(x3.is(KeyedStoreIC::MapRegister())); - static Register registers[] = { receiver, name, x3, x4, x5 }; + static Register registers[] = {receiver, name, x3, x4, x5}; return registers; } @@ -972,8 +811,8 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter( __ Push(receiver); ParameterCount actual(0); ParameterCount expected(getter); - __ InvokeFunction(getter, expected, actual, - CALL_FUNCTION, NullCallWrapper()); + __ InvokeFunction(getter, expected, actual, CALL_FUNCTION, + NullCallWrapper()); } else { // If we generate a global code snippet for deoptimization only, remember // the place to continue after deoptimization. @@ -1135,7 +974,8 @@ void ElementHandlerCompiler::GenerateLoadDictionaryElement( TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss); } - -} } // namespace v8::internal +#undef __ +} +} // namespace v8::internal #endif // V8_TARGET_ARCH_ARM64 diff --git a/src/ic/arm64/stub-cache-arm64.cc b/src/ic/arm64/stub-cache-arm64.cc new file mode 100644 index 000000000..f16ffafc0 --- /dev/null +++ b/src/ic/arm64/stub-cache-arm64.cc @@ -0,0 +1,146 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#if V8_TARGET_ARCH_ARM64 + +#include "src/codegen.h" +#include "src/ic/stub-cache.h" + +namespace v8 { +namespace internal { + + +#define __ ACCESS_MASM(masm) + + +// Probe primary or secondary table. +// If the entry is found in the cache, the generated code jump to the first +// instruction of the stub in the cache. +// If there is a miss the code fall trough. +// +// 'receiver', 'name' and 'offset' registers are preserved on miss. +static void ProbeTable(Isolate* isolate, MacroAssembler* masm, + Code::Flags flags, StubCache::Table table, + Register receiver, Register name, Register offset, + Register scratch, Register scratch2, Register scratch3) { + // Some code below relies on the fact that the Entry struct contains + // 3 pointers (name, code, map). + STATIC_ASSERT(sizeof(StubCache::Entry) == (3 * kPointerSize)); + + ExternalReference key_offset(isolate->stub_cache()->key_reference(table)); + ExternalReference value_offset(isolate->stub_cache()->value_reference(table)); + ExternalReference map_offset(isolate->stub_cache()->map_reference(table)); + + uintptr_t key_off_addr = reinterpret_cast(key_offset.address()); + uintptr_t value_off_addr = + reinterpret_cast(value_offset.address()); + uintptr_t map_off_addr = reinterpret_cast(map_offset.address()); + + Label miss; + + DCHECK(!AreAliased(name, offset, scratch, scratch2, scratch3)); + + // Multiply by 3 because there are 3 fields per entry. + __ Add(scratch3, offset, Operand(offset, LSL, 1)); + + // Calculate the base address of the entry. + __ Mov(scratch, key_offset); + __ Add(scratch, scratch, Operand(scratch3, LSL, kPointerSizeLog2)); + + // Check that the key in the entry matches the name. + __ Ldr(scratch2, MemOperand(scratch)); + __ Cmp(name, scratch2); + __ B(ne, &miss); + + // Check the map matches. + __ Ldr(scratch2, MemOperand(scratch, map_off_addr - key_off_addr)); + __ Ldr(scratch3, FieldMemOperand(receiver, HeapObject::kMapOffset)); + __ Cmp(scratch2, scratch3); + __ B(ne, &miss); + + // Get the code entry from the cache. + __ Ldr(scratch, MemOperand(scratch, value_off_addr - key_off_addr)); + + // Check that the flags match what we're looking for. + __ Ldr(scratch2.W(), FieldMemOperand(scratch, Code::kFlagsOffset)); + __ Bic(scratch2.W(), scratch2.W(), Code::kFlagsNotUsedInLookup); + __ Cmp(scratch2.W(), flags); + __ B(ne, &miss); + +#ifdef DEBUG + if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) { + __ B(&miss); + } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) { + __ B(&miss); + } +#endif + + // Jump to the first instruction in the code stub. + __ Add(scratch, scratch, Code::kHeaderSize - kHeapObjectTag); + __ Br(scratch); + + // Miss: fall through. + __ Bind(&miss); +} + + +void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags, + Register receiver, Register name, + Register scratch, Register extra, Register extra2, + Register extra3) { + Isolate* isolate = masm->isolate(); + Label miss; + + // Make sure the flags does not name a specific type. + DCHECK(Code::ExtractTypeFromFlags(flags) == 0); + + // Make sure that there are no register conflicts. + DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3)); + + // Make sure extra and extra2 registers are valid. + DCHECK(!extra.is(no_reg)); + DCHECK(!extra2.is(no_reg)); + DCHECK(!extra3.is(no_reg)); + + Counters* counters = masm->isolate()->counters(); + __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2, + extra3); + + // Check that the receiver isn't a smi. + __ JumpIfSmi(receiver, &miss); + + // Compute the hash for primary table. + __ Ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset)); + __ Ldr(extra, FieldMemOperand(receiver, HeapObject::kMapOffset)); + __ Add(scratch, scratch, extra); + __ Eor(scratch, scratch, flags); + // We shift out the last two bits because they are not part of the hash. + __ Ubfx(scratch, scratch, kCacheIndexShift, + CountTrailingZeros(kPrimaryTableSize, 64)); + + // Probe the primary table. + ProbeTable(isolate, masm, flags, kPrimary, receiver, name, scratch, extra, + extra2, extra3); + + // Primary miss: Compute hash for secondary table. + __ Sub(scratch, scratch, Operand(name, LSR, kCacheIndexShift)); + __ Add(scratch, scratch, flags >> kCacheIndexShift); + __ And(scratch, scratch, kSecondaryTableSize - 1); + + // Probe the secondary table. + ProbeTable(isolate, masm, flags, kSecondary, receiver, name, scratch, extra, + extra2, extra3); + + // Cache miss: Fall-through and let caller handle the miss by + // entering the runtime system. + __ Bind(&miss); + __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2, + extra3); +} +} +} // namespace v8::internal + +#endif // V8_TARGET_ARCH_ARM64 diff --git a/src/ia32/stub-cache-ia32.cc b/src/ic/ia32/ic-compiler-ia32.cc similarity index 78% rename from src/ia32/stub-cache-ia32.cc rename to src/ic/ia32/ic-compiler-ia32.cc index da003598d..1a01a8334 100644 --- a/src/ia32/stub-cache-ia32.cc +++ b/src/ic/ia32/ic-compiler-ia32.cc @@ -1,4 +1,4 @@ -// Copyright 2012 the V8 project authors. All rights reserved. +// Copyright 2014 the V8 project authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. @@ -6,114 +6,13 @@ #if V8_TARGET_ARCH_IA32 -#include "src/codegen.h" -#include "src/ic-inl.h" -#include "src/stub-cache.h" +#include "src/ic/ic-compiler.h" namespace v8 { namespace internal { #define __ ACCESS_MASM(masm) - -static void ProbeTable(Isolate* isolate, - MacroAssembler* masm, - Code::Flags flags, - StubCache::Table table, - Register name, - Register receiver, - // Number of the cache entry pointer-size scaled. - Register offset, - Register extra) { - ExternalReference key_offset(isolate->stub_cache()->key_reference(table)); - ExternalReference value_offset(isolate->stub_cache()->value_reference(table)); - ExternalReference map_offset(isolate->stub_cache()->map_reference(table)); - - Label miss; - - // Multiply by 3 because there are 3 fields per entry (name, code, map). - __ lea(offset, Operand(offset, offset, times_2, 0)); - - if (extra.is_valid()) { - // Get the code entry from the cache. - __ mov(extra, Operand::StaticArray(offset, times_1, value_offset)); - - // Check that the key in the entry matches the name. - __ cmp(name, Operand::StaticArray(offset, times_1, key_offset)); - __ j(not_equal, &miss); - - // Check the map matches. - __ mov(offset, Operand::StaticArray(offset, times_1, map_offset)); - __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset)); - __ j(not_equal, &miss); - - // Check that the flags match what we're looking for. - __ mov(offset, FieldOperand(extra, Code::kFlagsOffset)); - __ and_(offset, ~Code::kFlagsNotUsedInLookup); - __ cmp(offset, flags); - __ j(not_equal, &miss); - -#ifdef DEBUG - if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) { - __ jmp(&miss); - } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) { - __ jmp(&miss); - } -#endif - - // Jump to the first instruction in the code stub. - __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag)); - __ jmp(extra); - - __ bind(&miss); - } else { - // Save the offset on the stack. - __ push(offset); - - // Check that the key in the entry matches the name. - __ cmp(name, Operand::StaticArray(offset, times_1, key_offset)); - __ j(not_equal, &miss); - - // Check the map matches. - __ mov(offset, Operand::StaticArray(offset, times_1, map_offset)); - __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset)); - __ j(not_equal, &miss); - - // Restore offset register. - __ mov(offset, Operand(esp, 0)); - - // Get the code entry from the cache. - __ mov(offset, Operand::StaticArray(offset, times_1, value_offset)); - - // Check that the flags match what we're looking for. - __ mov(offset, FieldOperand(offset, Code::kFlagsOffset)); - __ and_(offset, ~Code::kFlagsNotUsedInLookup); - __ cmp(offset, flags); - __ j(not_equal, &miss); - -#ifdef DEBUG - if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) { - __ jmp(&miss); - } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) { - __ jmp(&miss); - } -#endif - - // Restore offset and re-load code entry from cache. - __ pop(offset); - __ mov(offset, Operand::StaticArray(offset, times_1, value_offset)); - - // Jump to the first instruction in the code stub. - __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag)); - __ jmp(offset); - - // Pop at miss. - __ bind(&miss); - __ pop(offset); - } -} - - void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup( MacroAssembler* masm, Label* miss_label, Register receiver, Handle name, Register scratch0, Register scratch1) { @@ -147,89 +46,13 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup( __ j(not_equal, miss_label); Label done; - NameDictionaryLookupStub::GenerateNegativeLookup(masm, - miss_label, - &done, - properties, - name, - scratch1); + NameDictionaryLookupStub::GenerateNegativeLookup(masm, miss_label, &done, + properties, name, scratch1); __ bind(&done); __ DecrementCounter(counters->negative_lookups_miss(), 1); } -void StubCache::GenerateProbe(MacroAssembler* masm, - Code::Flags flags, - Register receiver, - Register name, - Register scratch, - Register extra, - Register extra2, - Register extra3) { - Label miss; - - // Assert that code is valid. The multiplying code relies on the entry size - // being 12. - DCHECK(sizeof(Entry) == 12); - - // Assert the flags do not name a specific type. - DCHECK(Code::ExtractTypeFromFlags(flags) == 0); - - // Assert that there are no register conflicts. - DCHECK(!scratch.is(receiver)); - DCHECK(!scratch.is(name)); - DCHECK(!extra.is(receiver)); - DCHECK(!extra.is(name)); - DCHECK(!extra.is(scratch)); - - // Assert scratch and extra registers are valid, and extra2/3 are unused. - DCHECK(!scratch.is(no_reg)); - DCHECK(extra2.is(no_reg)); - DCHECK(extra3.is(no_reg)); - - Register offset = scratch; - scratch = no_reg; - - Counters* counters = masm->isolate()->counters(); - __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1); - - // Check that the receiver isn't a smi. - __ JumpIfSmi(receiver, &miss); - - // Get the map of the receiver and compute the hash. - __ mov(offset, FieldOperand(name, Name::kHashFieldOffset)); - __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset)); - __ xor_(offset, flags); - // We mask out the last two bits because they are not part of the hash and - // they are always 01 for maps. Also in the two 'and' instructions below. - __ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift); - // ProbeTable expects the offset to be pointer scaled, which it is, because - // the heap object tag size is 2 and the pointer size log 2 is also 2. - DCHECK(kCacheIndexShift == kPointerSizeLog2); - - // Probe the primary table. - ProbeTable(isolate(), masm, flags, kPrimary, name, receiver, offset, extra); - - // Primary miss: Compute hash for secondary probe. - __ mov(offset, FieldOperand(name, Name::kHashFieldOffset)); - __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset)); - __ xor_(offset, flags); - __ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift); - __ sub(offset, name); - __ add(offset, Immediate(flags)); - __ and_(offset, (kSecondaryTableSize - 1) << kCacheIndexShift); - - // Probe the secondary table. - ProbeTable( - isolate(), masm, flags, kSecondary, name, receiver, offset, extra); - - // Cache miss: Fall-through and let caller handle the miss by - // entering the runtime system. - __ bind(&miss); - __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1); -} - - void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype( MacroAssembler* masm, int index, Register prototype, Label* miss) { // Get the global function with the given index. @@ -259,10 +82,8 @@ void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype( } -static void PushInterceptorArguments(MacroAssembler* masm, - Register receiver, - Register holder, - Register name, +static void PushInterceptorArguments(MacroAssembler* masm, Register receiver, + Register holder, Register name, Handle holder_obj) { STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0); STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex == 1); @@ -281,12 +102,8 @@ static void PushInterceptorArguments(MacroAssembler* masm, static void CompileCallLoadPropertyWithInterceptor( - MacroAssembler* masm, - Register receiver, - Register holder, - Register name, - Handle holder_obj, - IC::UtilityId id) { + MacroAssembler* masm, Register receiver, Register holder, Register name, + Handle holder_obj, IC::UtilityId id) { PushInterceptorArguments(masm, receiver, holder, name, holder_obj); __ CallExternalReference(ExternalReference(IC_Utility(id), masm->isolate()), NamedLoadHandlerCompiler::kInterceptorArgsLength); @@ -307,7 +124,7 @@ void PropertyHandlerCompiler::GenerateFastApiCall( __ push(receiver); // Write the arguments to stack frame. for (int i = 0; i < argc; i++) { - Register arg = values[argc-1-i]; + Register arg = values[argc - 1 - i]; DCHECK(!receiver.is(arg)); DCHECK(!scratch_in.is(arg)); __ push(arg); @@ -325,16 +142,15 @@ void PropertyHandlerCompiler::GenerateFastApiCall( // Put holder in place. CallOptimization::HolderLookup holder_lookup; - Handle api_holder = optimization.LookupHolderOfExpectedType( - receiver_map, - &holder_lookup); + Handle api_holder = + optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup); switch (holder_lookup) { case CallOptimization::kHolderIsReceiver: __ Move(holder, receiver); break; case CallOptimization::kHolderFound: __ LoadHeapObject(holder, api_holder); - break; + break; case CallOptimization::kHolderNotFound: UNREACHABLE(); break; @@ -376,8 +192,7 @@ void PropertyHandlerCompiler::GenerateFastApiCall( void PropertyHandlerCompiler::GenerateCheckPropertyCell( MacroAssembler* masm, Handle global, Handle name, Register scratch, Label* miss) { - Handle cell = - JSGlobalObject::EnsurePropertyCell(global, name); + Handle cell = JSGlobalObject::EnsurePropertyCell(global, name); DCHECK(cell->value()->IsTheHole()); Handle the_hole = masm->isolate()->factory()->the_hole_value(); if (masm->serializer_enabled()) { @@ -427,7 +242,7 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition( __ CmpObject(value_reg, constant); __ j(not_equal, miss_label); } else if (representation.IsSmi()) { - __ JumpIfNotSmi(value_reg, miss_label); + __ JumpIfNotSmi(value_reg, miss_label); } else if (representation.IsHeapObject()) { __ JumpIfSmi(value_reg, miss_label); HeapType* field_type = descriptors->GetFieldType(descriptor); @@ -489,13 +304,8 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition( __ mov(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1); // Update the write barrier for the map field. - __ RecordWriteField(receiver_reg, - HeapObject::kMapOffset, - scratch1, - scratch2, - kDontSaveFPRegs, - OMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); + __ RecordWriteField(receiver_reg, HeapObject::kMapOffset, scratch1, scratch2, + kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK); if (details.type() == CONSTANT) { DCHECK(value_reg.is(eax)); @@ -511,8 +321,8 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition( // object and the number of in-object properties is not going to change. index -= transition->inobject_properties(); - SmiCheck smi_check = representation.IsTagged() - ? INLINE_SMI_CHECK : OMIT_SMI_CHECK; + SmiCheck smi_check = + representation.IsTagged() ? INLINE_SMI_CHECK : OMIT_SMI_CHECK; // TODO(verwaest): Share this code as a code stub. if (index < 0) { // Set the property straight into the object. @@ -528,13 +338,8 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition( if (!representation.IsDouble()) { __ mov(storage_reg, value_reg); } - __ RecordWriteField(receiver_reg, - offset, - storage_reg, - scratch1, - kDontSaveFPRegs, - EMIT_REMEMBERED_SET, - smi_check); + __ RecordWriteField(receiver_reg, offset, storage_reg, scratch1, + kDontSaveFPRegs, EMIT_REMEMBERED_SET, smi_check); } } else { // Write to the properties array. @@ -552,13 +357,8 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition( if (!representation.IsDouble()) { __ mov(storage_reg, value_reg); } - __ RecordWriteField(scratch1, - offset, - storage_reg, - receiver_reg, - kDontSaveFPRegs, - EMIT_REMEMBERED_SET, - smi_check); + __ RecordWriteField(scratch1, offset, storage_reg, receiver_reg, + kDontSaveFPRegs, EMIT_REMEMBERED_SET, smi_check); } } @@ -600,8 +400,8 @@ Register PropertyHandlerCompiler::CheckPrototypes( // Make sure there's no overlap between holder and object registers. DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg)); - DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) - && !scratch2.is(scratch1)); + DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) && + !scratch2.is(scratch1)); // Keep track of the current object in register reg. Register reg = object_reg; @@ -633,10 +433,10 @@ Register PropertyHandlerCompiler::CheckPrototypes( } DCHECK(current.is_null() || current->property_dictionary()->FindEntry(name) == - NameDictionary::kNotFound); + NameDictionary::kNotFound); - GenerateDictionaryNegativeLookup(masm(), miss, reg, name, - scratch1, scratch2); + GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1, + scratch2); __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset)); reg = holder_reg; // From now on the object will be in holder_reg. @@ -661,9 +461,8 @@ Register PropertyHandlerCompiler::CheckPrototypes( if (current_map->IsJSGlobalProxyMap()) { __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss); } else if (current_map->IsJSGlobalObjectMap()) { - GenerateCheckPropertyCell( - masm(), Handle::cast(current), name, - scratch2, miss); + GenerateCheckPropertyCell(masm(), Handle::cast(current), + name, scratch2, miss); } if (load_prototype_from_map) { @@ -913,8 +712,8 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter( __ push(value()); ParameterCount actual(1); ParameterCount expected(setter); - __ InvokeFunction(setter, expected, actual, - CALL_FUNCTION, NullCallWrapper()); + __ InvokeFunction(setter, expected, actual, CALL_FUNCTION, + NullCallWrapper()); } else { // If we generate a global code snippet for deoptimization only, remember // the place to continue after deoptimization. @@ -983,7 +782,7 @@ Register* PropertyAccessCompiler::load_calling_convention() { // receiver, name, scratch1, scratch2, scratch3, scratch4. Register receiver = LoadIC::ReceiverRegister(); Register name = LoadIC::NameRegister(); - static Register registers[] = { receiver, name, ebx, eax, edi, no_reg }; + static Register registers[] = {receiver, name, ebx, eax, edi, no_reg}; return registers; } @@ -993,7 +792,7 @@ Register* PropertyAccessCompiler::store_calling_convention() { Register receiver = StoreIC::ReceiverRegister(); Register name = StoreIC::NameRegister(); DCHECK(ebx.is(KeyedStoreIC::MapRegister())); - static Register registers[] = { receiver, name, ebx, edi, no_reg }; + static Register registers[] = {receiver, name, ebx, edi, no_reg}; return registers; } @@ -1021,8 +820,8 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter( __ push(receiver); ParameterCount actual(0); ParameterCount expected(getter); - __ InvokeFunction(getter, expected, actual, - CALL_FUNCTION, NullCallWrapper()); + __ InvokeFunction(getter, expected, actual, CALL_FUNCTION, + NullCallWrapper()); } else { // If we generate a global code snippet for deoptimization only, remember // the place to continue after deoptimization. @@ -1181,7 +980,7 @@ void ElementHandlerCompiler::GenerateLoadDictionaryElement( #undef __ - -} } // namespace v8::internal +} +} // namespace v8::internal #endif // V8_TARGET_ARCH_IA32 diff --git a/src/ia32/ic-ia32.cc b/src/ic/ia32/ic-ia32.cc similarity index 84% rename from src/ia32/ic-ia32.cc rename to src/ic/ia32/ic-ia32.cc index 62e845eb2..c7dad0f25 100644 --- a/src/ia32/ic-ia32.cc +++ b/src/ic/ia32/ic-ia32.cc @@ -7,9 +7,8 @@ #if V8_TARGET_ARCH_IA32 #include "src/codegen.h" -#include "src/ic-inl.h" -#include "src/runtime.h" -#include "src/stub-cache.h" +#include "src/ic/ic.h" +#include "src/ic/stub-cache.h" namespace v8 { namespace internal { @@ -21,8 +20,7 @@ namespace internal { #define __ ACCESS_MASM(masm) -static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, - Register type, +static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type, Label* global_object) { // Register usage: // type: holds the receiver instance type on entry. @@ -42,13 +40,9 @@ static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, // name is not internalized, and will jump to the miss_label in that // case. The generated code assumes that the receiver has slow // properties, is not a global object and does not have interceptors. -static void GenerateDictionaryLoad(MacroAssembler* masm, - Label* miss_label, - Register elements, - Register name, - Register r0, - Register r1, - Register result) { +static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label, + Register elements, Register name, + Register r0, Register r1, Register result) { // Register use: // // elements - holds the property dictionary on entry and is unchanged. @@ -66,13 +60,8 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label done; // Probe the dictionary. - NameDictionaryLookupStub::GeneratePositiveLookup(masm, - miss_label, - &done, - elements, - name, - r0, - r1); + NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss_label, &done, + elements, name, r0, r1); // If probing finds an entry in the dictionary, r0 contains the // index into the dictionary. Check that the value is a normal @@ -99,13 +88,9 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, // call if name is not internalized, and will jump to the miss_label in // that case. The generated code assumes that the receiver has slow // properties, is not a global object and does not have interceptors. -static void GenerateDictionaryStore(MacroAssembler* masm, - Label* miss_label, - Register elements, - Register name, - Register value, - Register r0, - Register r1) { +static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss_label, + Register elements, Register name, + Register value, Register r0, Register r1) { // Register use: // // elements - holds the property dictionary on entry and is clobbered. @@ -121,13 +106,8 @@ static void GenerateDictionaryStore(MacroAssembler* masm, // Probe the dictionary. - NameDictionaryLookupStub::GeneratePositiveLookup(masm, - miss_label, - &done, - elements, - name, - r0, - r1); + NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss_label, &done, + elements, name, r0, r1); // If probing finds an entry in the dictionary, r0 contains the // index into the dictionary. Check that the value is a normal @@ -139,7 +119,8 @@ static void GenerateDictionaryStore(MacroAssembler* masm, const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; const int kTypeAndReadOnlyMask = (PropertyDetails::TypeField::kMask | - PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize; + PropertyDetails::AttributesField::encode(READ_ONLY)) + << kSmiTagSize; __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag), Immediate(kTypeAndReadOnlyMask)); __ j(not_zero, miss_label); @@ -158,10 +139,8 @@ static void GenerateDictionaryStore(MacroAssembler* masm, // Checks the receiver for special cases (value type, slow case bits). // Falls through for regular JS object. static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm, - Register receiver, - Register map, - int interceptor_bit, - Label* slow) { + Register receiver, Register map, + int interceptor_bit, Label* slow) { // Register use: // receiver - holds the receiver and is unchanged. // Scratch registers: @@ -190,12 +169,9 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm, // Loads an indexed element from a fast case array. // If not_fast_array is NULL, doesn't perform the elements map check. -static void GenerateFastArrayLoad(MacroAssembler* masm, - Register receiver, - Register key, - Register scratch, - Register result, - Label* not_fast_array, +static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver, + Register key, Register scratch, + Register result, Label* not_fast_array, Label* out_of_range) { // Register use: // receiver - holds the receiver and is unchanged. @@ -208,10 +184,8 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, __ mov(scratch, FieldOperand(receiver, JSObject::kElementsOffset)); if (not_fast_array != NULL) { // Check that the object is in fast mode and writable. - __ CheckMap(scratch, - masm->isolate()->factory()->fixed_array_map(), - not_fast_array, - DONT_DO_SMI_CHECK); + __ CheckMap(scratch, masm->isolate()->factory()->fixed_array_map(), + not_fast_array, DONT_DO_SMI_CHECK); } else { __ AssertFastElements(scratch); } @@ -233,12 +207,9 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, // Checks whether a key is an array index string or a unique name. // Falls through if the key is a unique name. -static void GenerateKeyNameCheck(MacroAssembler* masm, - Register key, - Register map, - Register hash, - Label* index_string, - Label* not_unique) { +static void GenerateKeyNameCheck(MacroAssembler* masm, Register key, + Register map, Register hash, + Label* index_string, Label* not_unique) { // Register use: // key - holds the key and is unchanged. Assumed to be non-smi. // Scratch registers: @@ -266,13 +237,9 @@ static void GenerateKeyNameCheck(MacroAssembler* masm, } -static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm, - Register object, - Register key, - Register scratch1, - Register scratch2, - Label* unmapped_case, - Label* slow_case) { +static Operand GenerateMappedArgumentsLookup( + MacroAssembler* masm, Register object, Register key, Register scratch1, + Register scratch2, Label* unmapped_case, Label* slow_case) { Heap* heap = masm->isolate()->heap(); Factory* factory = masm->isolate()->factory(); @@ -302,10 +269,8 @@ static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm, // Load element index and check whether it is the hole. const int kHeaderSize = FixedArray::kHeaderSize + 2 * kPointerSize; - __ mov(scratch2, FieldOperand(scratch1, - key, - times_half_pointer_size, - kHeaderSize)); + __ mov(scratch2, + FieldOperand(scratch1, key, times_half_pointer_size, kHeaderSize)); __ cmp(scratch2, factory->the_hole_value()); __ j(equal, unmapped_case); @@ -314,9 +279,7 @@ static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm, // map in scratch1). const int kContextOffset = FixedArray::kHeaderSize; __ mov(scratch1, FieldOperand(scratch1, kContextOffset)); - return FieldOperand(scratch1, - scratch2, - times_half_pointer_size, + return FieldOperand(scratch1, scratch2, times_half_pointer_size, Context::kHeaderSize); } @@ -336,9 +299,7 @@ static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm, __ mov(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset)); __ cmp(key, scratch); __ j(greater_equal, slow_case); - return FieldOperand(backing_store, - key, - times_half_pointer_size, + return FieldOperand(backing_store, key, times_half_pointer_size, FixedArray::kHeaderSize); } @@ -359,8 +320,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { // Now the key is known to be a smi. This place is also jumped to from // where a numeric string is converted to a smi. - GenerateKeyedLoadReceiverCheck( - masm, receiver, eax, Map::kHasIndexedInterceptor, &slow); + GenerateKeyedLoadReceiverCheck(masm, receiver, eax, + Map::kHasIndexedInterceptor, &slow); // Check the receiver's map to see if it has fast elements. __ CheckFastElements(eax, &check_number_dictionary); @@ -379,9 +340,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { // Check whether the elements is a number dictionary. // ebx: untagged index // eax: elements - __ CheckMap(eax, - isolate->factory()->hash_table_map(), - &slow, + __ CheckMap(eax, isolate->factory()->hash_table_map(), &slow, DONT_DO_SMI_CHECK); Label slow_pop_receiver; // Push receiver on the stack to free up a register for the dictionary @@ -404,8 +363,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { __ bind(&check_name); GenerateKeyNameCheck(masm, key, eax, ebx, &index_name, &slow); - GenerateKeyedLoadReceiverCheck( - masm, receiver, eax, Map::kHasNamedInterceptor, &slow); + GenerateKeyedLoadReceiverCheck(masm, receiver, eax, Map::kHasNamedInterceptor, + &slow); // If the receiver is a fast-case object, check the keyed lookup // cache. Otherwise probe the dictionary. @@ -492,8 +451,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { // Load property array property. __ bind(&property_array_property); __ mov(eax, FieldOperand(receiver, JSObject::kPropertiesOffset)); - __ mov(eax, FieldOperand(eax, edi, times_pointer_size, - FixedArray::kHeaderSize)); + __ mov(eax, + FieldOperand(eax, edi, times_pointer_size, FixedArray::kHeaderSize)); __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1); __ ret(0); @@ -527,10 +486,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) { Register result = eax; DCHECK(!result.is(scratch)); - StringCharAtGenerator char_at_generator(receiver, - index, - scratch, - result, + StringCharAtGenerator char_at_generator(receiver, index, scratch, result, &miss, // When not a string. &miss, // When not a number. &miss, // When index out of range. @@ -597,9 +553,8 @@ void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) { Label slow, notin; Factory* factory = masm->isolate()->factory(); - Operand mapped_location = - GenerateMappedArgumentsLookup( - masm, receiver, key, ebx, eax, ¬in, &slow); + Operand mapped_location = GenerateMappedArgumentsLookup( + masm, receiver, key, ebx, eax, ¬in, &slow); __ mov(eax, mapped_location); __ Ret(); __ bind(¬in); @@ -625,9 +580,8 @@ void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) { DCHECK(name.is(ecx)); DCHECK(value.is(eax)); - Operand mapped_location = - GenerateMappedArgumentsLookup(masm, receiver, name, ebx, edi, ¬in, - &slow); + Operand mapped_location = GenerateMappedArgumentsLookup( + masm, receiver, name, ebx, edi, ¬in, &slow); __ mov(mapped_location, value); __ lea(ecx, mapped_location); __ mov(edx, value); @@ -648,12 +602,8 @@ void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) { static void KeyedStoreGenerateGenericHelper( - MacroAssembler* masm, - Label* fast_object, - Label* fast_double, - Label* slow, - KeyedStoreCheckMap check_map, - KeyedStoreIncrementLength increment_length) { + MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow, + KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length) { Label transition_smi_elements; Label finish_object_store, non_double_value, transition_double_elements; Label fast_double_without_map_check; @@ -713,8 +663,8 @@ static void KeyedStoreGenerateGenericHelper( __ mov(FixedArrayElementOperand(ebx, key), value); // Update write barrier for the elements array address. __ mov(edx, value); // Preserve the value which is returned. - __ RecordWriteArray( - ebx, edx, key, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); + __ RecordWriteArray(ebx, edx, key, kDontSaveFPRegs, EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); __ ret(0); __ bind(fast_double); @@ -750,32 +700,24 @@ static void KeyedStoreGenerateGenericHelper( __ mov(ebx, FieldOperand(receiver, HeapObject::kMapOffset)); // Transition the array appropriately depending on the value type. - __ CheckMap(value, - masm->isolate()->factory()->heap_number_map(), - &non_double_value, - DONT_DO_SMI_CHECK); + __ CheckMap(value, masm->isolate()->factory()->heap_number_map(), + &non_double_value, DONT_DO_SMI_CHECK); // Value is a double. Transition FAST_SMI_ELEMENTS -> FAST_DOUBLE_ELEMENTS // and complete the store. __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, - FAST_DOUBLE_ELEMENTS, - ebx, - edi, - slow); - AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, - FAST_DOUBLE_ELEMENTS); - ElementsTransitionGenerator::GenerateSmiToDouble( - masm, receiver, key, value, ebx, mode, slow); + FAST_DOUBLE_ELEMENTS, ebx, edi, slow); + AllocationSiteMode mode = + AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS); + ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value, + ebx, mode, slow); __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset)); __ jmp(&fast_double_without_map_check); __ bind(&non_double_value); // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS - __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, - FAST_ELEMENTS, - ebx, - edi, - slow); + __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, ebx, + edi, slow); mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS); ElementsTransitionGenerator::GenerateMapChangeElementsTransition( masm, receiver, key, value, ebx, mode, slow); @@ -787,14 +729,11 @@ static void KeyedStoreGenerateGenericHelper( // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS __ mov(ebx, FieldOperand(receiver, HeapObject::kMapOffset)); - __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, - FAST_ELEMENTS, - ebx, - edi, - slow); + __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS, + ebx, edi, slow); mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS); - ElementsTransitionGenerator::GenerateDoubleToObject( - masm, receiver, key, value, ebx, mode, slow); + ElementsTransitionGenerator::GenerateDoubleToObject(masm, receiver, key, + value, ebx, mode, slow); __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset)); __ jmp(&finish_object_store); } @@ -877,8 +816,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, __ cmp(key, FieldOperand(receiver, JSArray::kLengthOffset)); // Compare smis. __ j(above_equal, &extra); - KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double, - &slow, kCheckMap, kDontIncrementLength); + KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double, &slow, + kCheckMap, kDontIncrementLength); KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow, &slow, kDontCheckMap, kIncrementLength); } @@ -894,8 +833,8 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) { // Probe the stub cache. Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( Code::ComputeHandlerFlags(Code::LOAD_IC)); - masm->isolate()->stub_cache()->GenerateProbe( - masm, flags, receiver, name, ebx, eax); + masm->isolate()->stub_cache()->GenerateProbe(masm, flags, receiver, name, ebx, + eax); // Cache miss: Jump to runtime. GenerateMiss(masm); @@ -990,9 +929,7 @@ const Register StoreIC::NameRegister() { return ecx; } const Register StoreIC::ValueRegister() { return eax; } -const Register KeyedStoreIC::MapRegister() { - return ebx; -} +const Register KeyedStoreIC::MapRegister() { return ebx; } void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { @@ -1008,9 +945,8 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { // Return address is on the stack. Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( Code::ComputeHandlerFlags(Code::STORE_IC)); - masm->isolate()->stub_cache()->GenerateProbe( - masm, flags, ReceiverRegister(), NameRegister(), - ebx, no_reg); + masm->isolate()->stub_cache()->GenerateProbe(masm, flags, ReceiverRegister(), + NameRegister(), ebx, no_reg); // Cache miss: Jump to runtime. GenerateMiss(masm); @@ -1186,8 +1122,8 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { // condition code uses at the patched jump. uint8_t delta = *reinterpret_cast(delta_address); if (FLAG_trace_ic) { - PrintF("[ patching ic at %p, test=%p, delta=%d\n", - address, test_instruction_address, delta); + PrintF("[ patching ic at %p, test=%p, delta=%d\n", address, + test_instruction_address, delta); } // Patch with a short conditional jump. Enabling means switching from a short @@ -1195,17 +1131,17 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { // reverse operation of that. Address jmp_address = test_instruction_address - delta; DCHECK((check == ENABLE_INLINED_SMI_CHECK) - ? (*jmp_address == Assembler::kJncShortOpcode || - *jmp_address == Assembler::kJcShortOpcode) - : (*jmp_address == Assembler::kJnzShortOpcode || - *jmp_address == Assembler::kJzShortOpcode)); - Condition cc = (check == ENABLE_INLINED_SMI_CHECK) - ? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero) - : (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry); + ? (*jmp_address == Assembler::kJncShortOpcode || + *jmp_address == Assembler::kJcShortOpcode) + : (*jmp_address == Assembler::kJnzShortOpcode || + *jmp_address == Assembler::kJzShortOpcode)); + Condition cc = + (check == ENABLE_INLINED_SMI_CHECK) + ? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero) + : (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry); *jmp_address = static_cast(Assembler::kJccShortPrefix | cc); } - - -} } // namespace v8::internal +} +} // namespace v8::internal #endif // V8_TARGET_ARCH_IA32 diff --git a/src/ic/ia32/stub-cache-ia32.cc b/src/ic/ia32/stub-cache-ia32.cc new file mode 100644 index 000000000..1babf71a6 --- /dev/null +++ b/src/ic/ia32/stub-cache-ia32.cc @@ -0,0 +1,183 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#if V8_TARGET_ARCH_IA32 + +#include "src/codegen.h" +#include "src/ic/stub-cache.h" + +namespace v8 { +namespace internal { + +#define __ ACCESS_MASM(masm) + + +static void ProbeTable(Isolate* isolate, MacroAssembler* masm, + Code::Flags flags, StubCache::Table table, Register name, + Register receiver, + // Number of the cache entry pointer-size scaled. + Register offset, Register extra) { + ExternalReference key_offset(isolate->stub_cache()->key_reference(table)); + ExternalReference value_offset(isolate->stub_cache()->value_reference(table)); + ExternalReference map_offset(isolate->stub_cache()->map_reference(table)); + + Label miss; + + // Multiply by 3 because there are 3 fields per entry (name, code, map). + __ lea(offset, Operand(offset, offset, times_2, 0)); + + if (extra.is_valid()) { + // Get the code entry from the cache. + __ mov(extra, Operand::StaticArray(offset, times_1, value_offset)); + + // Check that the key in the entry matches the name. + __ cmp(name, Operand::StaticArray(offset, times_1, key_offset)); + __ j(not_equal, &miss); + + // Check the map matches. + __ mov(offset, Operand::StaticArray(offset, times_1, map_offset)); + __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset)); + __ j(not_equal, &miss); + + // Check that the flags match what we're looking for. + __ mov(offset, FieldOperand(extra, Code::kFlagsOffset)); + __ and_(offset, ~Code::kFlagsNotUsedInLookup); + __ cmp(offset, flags); + __ j(not_equal, &miss); + +#ifdef DEBUG + if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) { + __ jmp(&miss); + } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) { + __ jmp(&miss); + } +#endif + + // Jump to the first instruction in the code stub. + __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag)); + __ jmp(extra); + + __ bind(&miss); + } else { + // Save the offset on the stack. + __ push(offset); + + // Check that the key in the entry matches the name. + __ cmp(name, Operand::StaticArray(offset, times_1, key_offset)); + __ j(not_equal, &miss); + + // Check the map matches. + __ mov(offset, Operand::StaticArray(offset, times_1, map_offset)); + __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset)); + __ j(not_equal, &miss); + + // Restore offset register. + __ mov(offset, Operand(esp, 0)); + + // Get the code entry from the cache. + __ mov(offset, Operand::StaticArray(offset, times_1, value_offset)); + + // Check that the flags match what we're looking for. + __ mov(offset, FieldOperand(offset, Code::kFlagsOffset)); + __ and_(offset, ~Code::kFlagsNotUsedInLookup); + __ cmp(offset, flags); + __ j(not_equal, &miss); + +#ifdef DEBUG + if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) { + __ jmp(&miss); + } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) { + __ jmp(&miss); + } +#endif + + // Restore offset and re-load code entry from cache. + __ pop(offset); + __ mov(offset, Operand::StaticArray(offset, times_1, value_offset)); + + // Jump to the first instruction in the code stub. + __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag)); + __ jmp(offset); + + // Pop at miss. + __ bind(&miss); + __ pop(offset); + } +} + + +void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags, + Register receiver, Register name, + Register scratch, Register extra, Register extra2, + Register extra3) { + Label miss; + + // Assert that code is valid. The multiplying code relies on the entry size + // being 12. + DCHECK(sizeof(Entry) == 12); + + // Assert the flags do not name a specific type. + DCHECK(Code::ExtractTypeFromFlags(flags) == 0); + + // Assert that there are no register conflicts. + DCHECK(!scratch.is(receiver)); + DCHECK(!scratch.is(name)); + DCHECK(!extra.is(receiver)); + DCHECK(!extra.is(name)); + DCHECK(!extra.is(scratch)); + + // Assert scratch and extra registers are valid, and extra2/3 are unused. + DCHECK(!scratch.is(no_reg)); + DCHECK(extra2.is(no_reg)); + DCHECK(extra3.is(no_reg)); + + Register offset = scratch; + scratch = no_reg; + + Counters* counters = masm->isolate()->counters(); + __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1); + + // Check that the receiver isn't a smi. + __ JumpIfSmi(receiver, &miss); + + // Get the map of the receiver and compute the hash. + __ mov(offset, FieldOperand(name, Name::kHashFieldOffset)); + __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset)); + __ xor_(offset, flags); + // We mask out the last two bits because they are not part of the hash and + // they are always 01 for maps. Also in the two 'and' instructions below. + __ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift); + // ProbeTable expects the offset to be pointer scaled, which it is, because + // the heap object tag size is 2 and the pointer size log 2 is also 2. + DCHECK(kCacheIndexShift == kPointerSizeLog2); + + // Probe the primary table. + ProbeTable(isolate(), masm, flags, kPrimary, name, receiver, offset, extra); + + // Primary miss: Compute hash for secondary probe. + __ mov(offset, FieldOperand(name, Name::kHashFieldOffset)); + __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset)); + __ xor_(offset, flags); + __ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift); + __ sub(offset, name); + __ add(offset, Immediate(flags)); + __ and_(offset, (kSecondaryTableSize - 1) << kCacheIndexShift); + + // Probe the secondary table. + ProbeTable(isolate(), masm, flags, kSecondary, name, receiver, offset, extra); + + // Cache miss: Fall-through and let caller handle the miss by + // entering the runtime system. + __ bind(&miss); + __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1); +} + + +#undef __ +} +} // namespace v8::internal + +#endif // V8_TARGET_ARCH_IA32 diff --git a/src/stub-cache.cc b/src/ic/ic-compiler.cc similarity index 73% rename from src/stub-cache.cc rename to src/ic/ic-compiler.cc index 32ff8d146..05a0a6ee9 100644 --- a/src/stub-cache.cc +++ b/src/ic/ic-compiler.cc @@ -1,112 +1,23 @@ -// Copyright 2012 the V8 project authors. All rights reserved. +// Copyright 2014 the V8 project authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "src/v8.h" -#include "src/api.h" -#include "src/arguments.h" -#include "src/ast.h" -#include "src/code-stubs.h" -#include "src/cpu-profiler.h" -#include "src/gdb-jit.h" -#include "src/ic-inl.h" -#include "src/stub-cache.h" -#include "src/type-info.h" -#include "src/vm-state-inl.h" +#include "src/ic/ic-inl.h" +#include "src/ic/ic-compiler.h" + namespace v8 { namespace internal { -// ----------------------------------------------------------------------- -// StubCache implementation. - - -StubCache::StubCache(Isolate* isolate) - : isolate_(isolate) { } - - -void StubCache::Initialize() { - DCHECK(IsPowerOf2(kPrimaryTableSize)); - DCHECK(IsPowerOf2(kSecondaryTableSize)); - Clear(); -} - - -static Code::Flags CommonStubCacheChecks(Name* name, Map* map, - Code::Flags flags) { - flags = Code::RemoveTypeAndHolderFromFlags(flags); - - // Validate that the name does not move on scavenge, and that we - // can use identity checks instead of structural equality checks. - DCHECK(!name->GetHeap()->InNewSpace(name)); - DCHECK(name->IsUniqueName()); - - // The state bits are not important to the hash function because the stub - // cache only contains handlers. Make sure that the bits are the least - // significant so they will be the ones masked out. - DCHECK_EQ(Code::HANDLER, Code::ExtractKindFromFlags(flags)); - STATIC_ASSERT((Code::ICStateField::kMask & 1) == 1); - - // Make sure that the code type and cache holder are not included in the hash. - DCHECK(Code::ExtractTypeFromFlags(flags) == 0); - DCHECK(Code::ExtractCacheHolderFromFlags(flags) == 0); - - return flags; -} - - -Code* StubCache::Set(Name* name, Map* map, Code* code) { - Code::Flags flags = CommonStubCacheChecks(name, map, code->flags()); - - // Compute the primary entry. - int primary_offset = PrimaryOffset(name, flags, map); - Entry* primary = entry(primary_, primary_offset); - Code* old_code = primary->value; - - // If the primary entry has useful data in it, we retire it to the - // secondary cache before overwriting it. - if (old_code != isolate_->builtins()->builtin(Builtins::kIllegal)) { - Map* old_map = primary->map; - Code::Flags old_flags = - Code::RemoveTypeAndHolderFromFlags(old_code->flags()); - int seed = PrimaryOffset(primary->key, old_flags, old_map); - int secondary_offset = SecondaryOffset(primary->key, old_flags, seed); - Entry* secondary = entry(secondary_, secondary_offset); - *secondary = *primary; - } - - // Update primary cache. - primary->key = name; - primary->value = code; - primary->map = map; - isolate()->counters()->megamorphic_stub_cache_updates()->Increment(); - return code; -} - - -Code* StubCache::Get(Name* name, Map* map, Code::Flags flags) { - flags = CommonStubCacheChecks(name, map, flags); - int primary_offset = PrimaryOffset(name, flags, map); - Entry* primary = entry(primary_, primary_offset); - if (primary->key == name && primary->map == map) { - return primary->value; - } - int secondary_offset = SecondaryOffset(name, flags, primary_offset); - Entry* secondary = entry(secondary_, secondary_offset); - if (secondary->key == name && secondary->map == map) { - return secondary->value; - } - return NULL; -} - Handle PropertyICCompiler::Find(Handle name, Handle stub_holder, Code::Kind kind, ExtraICState extra_state, CacheHolderFlag cache_holder) { - Code::Flags flags = Code::ComputeMonomorphicFlags( - kind, extra_state, cache_holder); + Code::Flags flags = + Code::ComputeMonomorphicFlags(kind, extra_state, cache_holder); Object* probe = stub_holder->FindInCodeCache(*name, flags); if (probe->IsCode()) return handle(Code::cast(probe)); return Handle::null(); @@ -261,22 +172,14 @@ Handle PropertyICCompiler::ComputeKeyedStoreMonomorphic( compiler.CompileKeyedStoreMonomorphic(receiver_map, store_mode); Map::UpdateCodeCache(receiver_map, name, code); - DCHECK(KeyedStoreIC::GetKeyedAccessStoreMode(code->extra_ic_state()) - == store_mode); + DCHECK(KeyedStoreIC::GetKeyedAccessStoreMode(code->extra_ic_state()) == + store_mode); return code; } #define CALL_LOGGER_TAG(kind, type) (Logger::KEYED_##type) -static void FillCache(Isolate* isolate, Handle code) { - Handle dictionary = - UnseededNumberDictionary::Set(isolate->factory()->non_monomorphic_cache(), - code->flags(), - code); - isolate->heap()->public_set_non_monomorphic_cache(*dictionary); -} - Code* PropertyICCompiler::FindPreMonomorphic(Isolate* isolate, Code::Kind kind, ExtraICState state) { @@ -292,6 +195,13 @@ Code* PropertyICCompiler::FindPreMonomorphic(Isolate* isolate, Code::Kind kind, } +static void FillCache(Isolate* isolate, Handle code) { + Handle dictionary = UnseededNumberDictionary::Set( + isolate->factory()->non_monomorphic_cache(), code->flags(), code); + isolate->heap()->public_set_non_monomorphic_cache(*dictionary); +} + + Handle PropertyICCompiler::ComputeLoad(Isolate* isolate, InlineCacheState ic_state, ExtraICState extra_state) { @@ -417,8 +327,8 @@ Handle PropertyICCompiler::ComputeKeyedStorePolymorphic( store_mode == STORE_NO_TRANSITION_HANDLE_COW); Handle cache = isolate->factory()->polymorphic_code_cache(); - ExtraICState extra_state = KeyedStoreIC::ComputeExtraICState( - strict_mode, store_mode); + ExtraICState extra_state = + KeyedStoreIC::ComputeExtraICState(strict_mode, store_mode); Code::Flags flags = Code::ComputeFlags(Code::KEYED_STORE_IC, POLYMORPHIC, extra_state); Handle probe = cache->Lookup(receiver_maps, flags); @@ -432,227 +342,10 @@ Handle PropertyICCompiler::ComputeKeyedStorePolymorphic( } -void StubCache::Clear() { - Code* empty = isolate_->builtins()->builtin(Builtins::kIllegal); - for (int i = 0; i < kPrimaryTableSize; i++) { - primary_[i].key = isolate()->heap()->empty_string(); - primary_[i].map = NULL; - primary_[i].value = empty; - } - for (int j = 0; j < kSecondaryTableSize; j++) { - secondary_[j].key = isolate()->heap()->empty_string(); - secondary_[j].map = NULL; - secondary_[j].value = empty; - } -} - - -void StubCache::CollectMatchingMaps(SmallMapList* types, - Handle name, - Code::Flags flags, - Handle native_context, - Zone* zone) { - for (int i = 0; i < kPrimaryTableSize; i++) { - if (primary_[i].key == *name) { - Map* map = primary_[i].map; - // Map can be NULL, if the stub is constant function call - // with a primitive receiver. - if (map == NULL) continue; - - int offset = PrimaryOffset(*name, flags, map); - if (entry(primary_, offset) == &primary_[i] && - !TypeFeedbackOracle::CanRetainOtherContext(map, *native_context)) { - types->AddMapIfMissing(Handle(map), zone); - } - } - } - - for (int i = 0; i < kSecondaryTableSize; i++) { - if (secondary_[i].key == *name) { - Map* map = secondary_[i].map; - // Map can be NULL, if the stub is constant function call - // with a primitive receiver. - if (map == NULL) continue; - - // Lookup in primary table and skip duplicates. - int primary_offset = PrimaryOffset(*name, flags, map); - - // Lookup in secondary table and add matches. - int offset = SecondaryOffset(*name, flags, primary_offset); - if (entry(secondary_, offset) == &secondary_[i] && - !TypeFeedbackOracle::CanRetainOtherContext(map, *native_context)) { - types->AddMapIfMissing(Handle(map), zone); - } - } - } -} - - -// ------------------------------------------------------------------------ -// StubCompiler implementation. - - -RUNTIME_FUNCTION(StoreCallbackProperty) { - Handle receiver = args.at(0); - Handle holder = args.at(1); - Handle callback = args.at(2); - Handle name = args.at(3); - Handle value = args.at(4); - HandleScope scope(isolate); - - DCHECK(callback->IsCompatibleReceiver(*receiver)); - - Address setter_address = v8::ToCData
(callback->setter()); - v8::AccessorNameSetterCallback fun = - FUNCTION_CAST(setter_address); - DCHECK(fun != NULL); - - LOG(isolate, ApiNamedPropertyAccess("store", *receiver, *name)); - PropertyCallbackArguments custom_args(isolate, callback->data(), *receiver, - *holder); - custom_args.Call(fun, v8::Utils::ToLocal(name), v8::Utils::ToLocal(value)); - RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate); - return *value; -} - - -/** - * Attempts to load a property with an interceptor (which must be present), - * but doesn't search the prototype chain. - * - * Returns |Heap::no_interceptor_result_sentinel()| if interceptor doesn't - * provide any value for the given name. - */ -RUNTIME_FUNCTION(LoadPropertyWithInterceptorOnly) { - DCHECK(args.length() == NamedLoadHandlerCompiler::kInterceptorArgsLength); - Handle name_handle = - args.at(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex); - Handle interceptor_info = args.at( - NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex); - - // TODO(rossberg): Support symbols in the API. - if (name_handle->IsSymbol()) - return isolate->heap()->no_interceptor_result_sentinel(); - Handle name = Handle::cast(name_handle); - - Address getter_address = v8::ToCData
(interceptor_info->getter()); - v8::NamedPropertyGetterCallback getter = - FUNCTION_CAST(getter_address); - DCHECK(getter != NULL); - - Handle receiver = - args.at(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex); - Handle holder = - args.at(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex); - PropertyCallbackArguments callback_args( - isolate, interceptor_info->data(), *receiver, *holder); - { - // Use the interceptor getter. - HandleScope scope(isolate); - v8::Handle r = - callback_args.Call(getter, v8::Utils::ToLocal(name)); - RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate); - if (!r.IsEmpty()) { - Handle result = v8::Utils::OpenHandle(*r); - result->VerifyApiCallResultType(); - return *v8::Utils::OpenHandle(*r); - } - } - - return isolate->heap()->no_interceptor_result_sentinel(); -} - - -static Object* ThrowReferenceError(Isolate* isolate, Name* name) { - // If the load is non-contextual, just return the undefined result. - // Note that both keyed and non-keyed loads may end up here. - HandleScope scope(isolate); - LoadIC ic(IC::NO_EXTRA_FRAME, isolate); - if (ic.contextual_mode() != CONTEXTUAL) { - return isolate->heap()->undefined_value(); - } - - // Throw a reference error. - Handle name_handle(name); - Handle error = - isolate->factory()->NewReferenceError("not_defined", - HandleVector(&name_handle, 1)); - return isolate->Throw(*error); -} - - -/** - * Loads a property with an interceptor performing post interceptor - * lookup if interceptor failed. - */ -RUNTIME_FUNCTION(LoadPropertyWithInterceptor) { - HandleScope scope(isolate); - DCHECK(args.length() == NamedLoadHandlerCompiler::kInterceptorArgsLength); - Handle name = - args.at(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex); - Handle receiver = - args.at(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex); - Handle holder = - args.at(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex); - - Handle result; - LookupIterator it(receiver, name, holder); - ASSIGN_RETURN_FAILURE_ON_EXCEPTION( - isolate, result, JSObject::GetProperty(&it)); - - if (it.IsFound()) return *result; - - return ThrowReferenceError(isolate, Name::cast(args[0])); -} - - -RUNTIME_FUNCTION(StorePropertyWithInterceptor) { - HandleScope scope(isolate); - DCHECK(args.length() == 3); - StoreIC ic(IC::NO_EXTRA_FRAME, isolate); - Handle receiver = args.at(0); - Handle name = args.at(1); - Handle value = args.at(2); -#ifdef DEBUG - PrototypeIterator iter(isolate, receiver, - PrototypeIterator::START_AT_RECEIVER); - bool found = false; - while (!iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN)) { - Handle current = PrototypeIterator::GetCurrent(iter); - if (current->IsJSObject() && - Handle::cast(current)->HasNamedInterceptor()) { - found = true; - break; - } - } - DCHECK(found); -#endif - Handle result; - ASSIGN_RETURN_FAILURE_ON_EXCEPTION( - isolate, result, - JSObject::SetProperty(receiver, name, value, ic.strict_mode())); - return *result; -} - - -RUNTIME_FUNCTION(LoadElementWithInterceptor) { - HandleScope scope(isolate); - Handle receiver = args.at(0); - DCHECK(args.smi_at(1) >= 0); - uint32_t index = args.smi_at(1); - Handle result; - ASSIGN_RETURN_FAILURE_ON_EXCEPTION( - isolate, result, - JSObject::GetElementWithInterceptor(receiver, receiver, index)); - return *result; -} - - Handle PropertyICCompiler::CompileLoadInitialize(Code::Flags flags) { LoadIC::GenerateInitialize(masm()); Handle code = GetCodeWithFlags(flags, "CompileLoadInitialize"); - PROFILE(isolate(), - CodeCreateEvent(Logger::LOAD_INITIALIZE_TAG, *code, 0)); + PROFILE(isolate(), CodeCreateEvent(Logger::LOAD_INITIALIZE_TAG, *code, 0)); return code; } @@ -669,8 +362,7 @@ Handle PropertyICCompiler::CompileLoadPreMonomorphic(Code::Flags flags) { Handle PropertyICCompiler::CompileLoadMegamorphic(Code::Flags flags) { LoadIC::GenerateMegamorphic(masm()); Handle code = GetCodeWithFlags(flags, "CompileLoadMegamorphic"); - PROFILE(isolate(), - CodeCreateEvent(Logger::LOAD_MEGAMORPHIC_TAG, *code, 0)); + PROFILE(isolate(), CodeCreateEvent(Logger::LOAD_MEGAMORPHIC_TAG, *code, 0)); return code; } @@ -678,8 +370,7 @@ Handle PropertyICCompiler::CompileLoadMegamorphic(Code::Flags flags) { Handle PropertyICCompiler::CompileStoreInitialize(Code::Flags flags) { StoreIC::GenerateInitialize(masm()); Handle code = GetCodeWithFlags(flags, "CompileStoreInitialize"); - PROFILE(isolate(), - CodeCreateEvent(Logger::STORE_INITIALIZE_TAG, *code, 0)); + PROFILE(isolate(), CodeCreateEvent(Logger::STORE_INITIALIZE_TAG, *code, 0)); return code; } @@ -698,8 +389,7 @@ Handle PropertyICCompiler::CompileStoreGeneric(Code::Flags flags) { StrictMode strict_mode = StoreIC::GetStrictMode(extra_state); StoreIC::GenerateRuntimeSetProperty(masm(), strict_mode); Handle code = GetCodeWithFlags(flags, "CompileStoreGeneric"); - PROFILE(isolate(), - CodeCreateEvent(Logger::STORE_GENERIC_TAG, *code, 0)); + PROFILE(isolate(), CodeCreateEvent(Logger::STORE_GENERIC_TAG, *code, 0)); return code; } @@ -707,8 +397,7 @@ Handle PropertyICCompiler::CompileStoreGeneric(Code::Flags flags) { Handle PropertyICCompiler::CompileStoreMegamorphic(Code::Flags flags) { StoreIC::GenerateMegamorphic(masm()); Handle code = GetCodeWithFlags(flags, "CompileStoreMegamorphic"); - PROFILE(isolate(), - CodeCreateEvent(Logger::STORE_MEGAMORPHIC_TAG, *code, 0)); + PROFILE(isolate(), CodeCreateEvent(Logger::STORE_MEGAMORPHIC_TAG, *code, 0)); return code; } @@ -736,8 +425,9 @@ Handle PropertyAccessCompiler::GetCodeWithFlags(Code::Flags flags, Handle PropertyAccessCompiler::GetCodeWithFlags(Code::Flags flags, Handle name) { return (FLAG_print_code_stubs && !name.is_null() && name->IsString()) - ? GetCodeWithFlags(flags, Handle::cast(name)->ToCString().get()) - : GetCodeWithFlags(flags, NULL); + ? GetCodeWithFlags(flags, + Handle::cast(name)->ToCString().get()) + : GetCodeWithFlags(flags, NULL); } @@ -762,8 +452,8 @@ Register NamedLoadHandlerCompiler::FrontendHeader(Register object_reg, } if (check_type == CHECK_ALL_MAPS) { - GenerateDirectLoadGlobalFunctionPrototype( - masm(), function_index, scratch1(), miss); + GenerateDirectLoadGlobalFunctionPrototype(masm(), function_index, + scratch1(), miss); Object* function = isolate()->native_context()->get(function_index); Object* prototype = JSFunction::cast(function)->instance_prototype(); set_type_for_object(handle(prototype, isolate())); @@ -885,9 +575,8 @@ Handle NamedLoadHandlerCompiler::CompileLoadCallback( DCHECK(call_optimization.is_simple_api_call()); Frontend(receiver(), name); Handle receiver_map = IC::TypeToMap(*type(), isolate()); - GenerateFastApiCall( - masm(), call_optimization, receiver_map, - receiver(), scratch1(), false, 0, NULL); + GenerateFastApiCall(masm(), call_optimization, receiver_map, receiver(), + scratch1(), false, 0, NULL); return GetCode(kind(), Code::FAST, name); } @@ -1033,10 +722,9 @@ Handle NamedStoreHandlerCompiler::CompileStoreCallback( Handle object, Handle name, const CallOptimization& call_optimization) { Frontend(receiver(), name); - Register values[] = { value() }; - GenerateFastApiCall( - masm(), call_optimization, handle(object->map()), - receiver(), scratch1(), true, 1, values); + Register values[] = {value()}; + GenerateFastApiCall(masm(), call_optimization, handle(object->map()), + receiver(), scratch1(), true, 1, values); return GetCode(kind(), Code::FAST, name); } @@ -1198,8 +886,7 @@ CallOptimization::CallOptimization(Handle function) { Handle CallOptimization::LookupHolderOfExpectedType( - Handle object_map, - HolderLookup* holder_lookup) const { + Handle object_map, HolderLookup* holder_lookup) const { DCHECK(is_simple_api_call()); if (!object_map->IsJSObjectMap()) { *holder_lookup = kHolderNotFound; @@ -1231,8 +918,7 @@ bool CallOptimization::IsCompatibleReceiver(Handle receiver, if (!receiver->IsJSObject()) return false; Handle map(JSObject::cast(*receiver)->map()); HolderLookup holder_lookup; - Handle api_holder = - LookupHolderOfExpectedType(map, &holder_lookup); + Handle api_holder = LookupHolderOfExpectedType(map, &holder_lookup); switch (holder_lookup) { case kHolderNotFound: return false; @@ -1286,14 +972,12 @@ void CallOptimization::AnalyzePossibleApiFunction(Handle function) { Handle(SignatureInfo::cast(info->signature())); if (!signature->args()->IsUndefined()) return; if (!signature->receiver()->IsUndefined()) { - expected_receiver_type_ = - Handle( - FunctionTemplateInfo::cast(signature->receiver())); + expected_receiver_type_ = Handle( + FunctionTemplateInfo::cast(signature->receiver())); } } is_simple_api_call_ = true; } - - -} } // namespace v8::internal +} +} // namespace v8::internal diff --git a/src/stub-cache.h b/src/ic/ic-compiler.h similarity index 72% rename from src/stub-cache.h rename to src/ic/ic-compiler.h index a734c8a6b..8570ab172 100644 --- a/src/stub-cache.h +++ b/src/ic/ic-compiler.h @@ -1,202 +1,23 @@ -// Copyright 2012 the V8 project authors. All rights reserved. +// Copyright 2014 the V8 project authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -#ifndef V8_STUB_CACHE_H_ -#define V8_STUB_CACHE_H_ +#ifndef V8_IC_IC_COMPILER_H_ +#define V8_IC_IC_COMPILER_H_ -#include "src/allocation.h" -#include "src/arguments.h" #include "src/code-stubs.h" -#include "src/ic-inl.h" #include "src/macro-assembler.h" #include "src/objects.h" -#include "src/zone-inl.h" namespace v8 { namespace internal { -// The stub cache is used for megamorphic property accesses. -// It maps (map, name, type) to property access handlers. The cache does not -// need explicit invalidation when a prototype chain is modified, since the -// handlers verify the chain. - - class CallOptimization; class SmallMapList; class StubCache; -class SCTableReference { - public: - Address address() const { return address_; } - - private: - explicit SCTableReference(Address address) : address_(address) {} - - Address address_; - - friend class StubCache; -}; - - -class StubCache { - public: - struct Entry { - Name* key; - Code* value; - Map* map; - }; - - void Initialize(); - // Access cache for entry hash(name, map). - Code* Set(Name* name, Map* map, Code* code); - Code* Get(Name* name, Map* map, Code::Flags flags); - // Clear the lookup table (@ mark compact collection). - void Clear(); - // Collect all maps that match the name and flags. - void CollectMatchingMaps(SmallMapList* types, - Handle name, - Code::Flags flags, - Handle native_context, - Zone* zone); - // Generate code for probing the stub cache table. - // Arguments extra, extra2 and extra3 may be used to pass additional scratch - // registers. Set to no_reg if not needed. - void GenerateProbe(MacroAssembler* masm, - Code::Flags flags, - Register receiver, - Register name, - Register scratch, - Register extra, - Register extra2 = no_reg, - Register extra3 = no_reg); - - enum Table { - kPrimary, - kSecondary - }; - - SCTableReference key_reference(StubCache::Table table) { - return SCTableReference( - reinterpret_cast
(&first_entry(table)->key)); - } - - SCTableReference map_reference(StubCache::Table table) { - return SCTableReference( - reinterpret_cast
(&first_entry(table)->map)); - } - - SCTableReference value_reference(StubCache::Table table) { - return SCTableReference( - reinterpret_cast
(&first_entry(table)->value)); - } - - StubCache::Entry* first_entry(StubCache::Table table) { - switch (table) { - case StubCache::kPrimary: return StubCache::primary_; - case StubCache::kSecondary: return StubCache::secondary_; - } - UNREACHABLE(); - return NULL; - } - - Isolate* isolate() { return isolate_; } - - // Setting the entry size such that the index is shifted by Name::kHashShift - // is convenient; shifting down the length field (to extract the hash code) - // automatically discards the hash bit field. - static const int kCacheIndexShift = Name::kHashShift; - - private: - explicit StubCache(Isolate* isolate); - - // The stub cache has a primary and secondary level. The two levels have - // different hashing algorithms in order to avoid simultaneous collisions - // in both caches. Unlike a probing strategy (quadratic or otherwise) the - // update strategy on updates is fairly clear and simple: Any existing entry - // in the primary cache is moved to the secondary cache, and secondary cache - // entries are overwritten. - - // Hash algorithm for the primary table. This algorithm is replicated in - // assembler for every architecture. Returns an index into the table that - // is scaled by 1 << kCacheIndexShift. - static int PrimaryOffset(Name* name, Code::Flags flags, Map* map) { - STATIC_ASSERT(kCacheIndexShift == Name::kHashShift); - // Compute the hash of the name (use entire hash field). - DCHECK(name->HasHashCode()); - uint32_t field = name->hash_field(); - // Using only the low bits in 64-bit mode is unlikely to increase the - // risk of collision even if the heap is spread over an area larger than - // 4Gb (and not at all if it isn't). - uint32_t map_low32bits = - static_cast(reinterpret_cast(map)); - // We always set the in_loop bit to zero when generating the lookup code - // so do it here too so the hash codes match. - uint32_t iflags = - (static_cast(flags) & ~Code::kFlagsNotUsedInLookup); - // Base the offset on a simple combination of name, flags, and map. - uint32_t key = (map_low32bits + field) ^ iflags; - return key & ((kPrimaryTableSize - 1) << kCacheIndexShift); - } - - // Hash algorithm for the secondary table. This algorithm is replicated in - // assembler for every architecture. Returns an index into the table that - // is scaled by 1 << kCacheIndexShift. - static int SecondaryOffset(Name* name, Code::Flags flags, int seed) { - // Use the seed from the primary cache in the secondary cache. - uint32_t name_low32bits = - static_cast(reinterpret_cast(name)); - // We always set the in_loop bit to zero when generating the lookup code - // so do it here too so the hash codes match. - uint32_t iflags = - (static_cast(flags) & ~Code::kFlagsNotUsedInLookup); - uint32_t key = (seed - name_low32bits) + iflags; - return key & ((kSecondaryTableSize - 1) << kCacheIndexShift); - } - - // Compute the entry for a given offset in exactly the same way as - // we do in generated code. We generate an hash code that already - // ends in Name::kHashShift 0s. Then we multiply it so it is a multiple - // of sizeof(Entry). This makes it easier to avoid making mistakes - // in the hashed offset computations. - static Entry* entry(Entry* table, int offset) { - const int multiplier = sizeof(*table) >> Name::kHashShift; - return reinterpret_cast( - reinterpret_cast
(table) + offset * multiplier); - } - - static const int kPrimaryTableBits = 11; - static const int kPrimaryTableSize = (1 << kPrimaryTableBits); - static const int kSecondaryTableBits = 9; - static const int kSecondaryTableSize = (1 << kSecondaryTableBits); - - Entry primary_[kPrimaryTableSize]; - Entry secondary_[kSecondaryTableSize]; - Isolate* isolate_; - - friend class Isolate; - friend class SCTableReference; - - DISALLOW_COPY_AND_ASSIGN(StubCache); -}; - - -// ------------------------------------------------------------------------ - - -// Support functions for IC stubs for callbacks. -DECLARE_RUNTIME_FUNCTION(StoreCallbackProperty); - - -// Support functions for IC stubs for interceptors. -DECLARE_RUNTIME_FUNCTION(LoadPropertyWithInterceptorOnly); -DECLARE_RUNTIME_FUNCTION(LoadPropertyWithInterceptor); -DECLARE_RUNTIME_FUNCTION(LoadElementWithInterceptor); -DECLARE_RUNTIME_FUNCTION(StorePropertyWithInterceptor); - - enum PrototypeCheckType { CHECK_ALL_MAPS, SKIP_RECEIVER }; enum IcCheckType { ELEMENT, PROPERTY }; @@ -409,8 +230,7 @@ class PropertyHandlerCompiler : public PropertyAccessCompiler { static void GenerateDictionaryNegativeLookup(MacroAssembler* masm, Label* miss_label, Register receiver, - Handle name, - Register r0, + Handle name, Register r0, Register r1); // Generate code to check that a global property cell is empty. Create @@ -418,8 +238,7 @@ class PropertyHandlerCompiler : public PropertyAccessCompiler { // property. static void GenerateCheckPropertyCell(MacroAssembler* masm, Handle global, - Handle name, - Register scratch, + Handle name, Register scratch, Label* miss); // Generates code that verifies that the property holder has not changed @@ -595,9 +414,12 @@ class NamedStoreHandlerCompiler : public PropertyHandlerCompiler { static Builtins::Name SlowBuiltin(Code::Kind kind) { switch (kind) { - case Code::STORE_IC: return Builtins::kStoreIC_Slow; - case Code::KEYED_STORE_IC: return Builtins::kKeyedStoreIC_Slow; - default: UNREACHABLE(); + case Code::STORE_IC: + return Builtins::kStoreIC_Slow; + case Code::KEYED_STORE_IC: + return Builtins::kKeyedStoreIC_Slow; + default: + UNREACHABLE(); } return Builtins::kStoreIC_Slow; } @@ -628,18 +450,14 @@ class CallOptimization BASE_EMBEDDED { public: explicit CallOptimization(Handle function); - bool is_constant_call() const { - return !constant_function_.is_null(); - } + bool is_constant_call() const { return !constant_function_.is_null(); } Handle constant_function() const { DCHECK(is_constant_call()); return constant_function_; } - bool is_simple_api_call() const { - return is_simple_api_call_; - } + bool is_simple_api_call() const { return is_simple_api_call_; } Handle expected_receiver_type() const { DCHECK(is_simple_api_call()); @@ -651,14 +469,9 @@ class CallOptimization BASE_EMBEDDED { return api_call_info_; } - enum HolderLookup { - kHolderNotFound, - kHolderIsReceiver, - kHolderFound - }; + enum HolderLookup { kHolderNotFound, kHolderIsReceiver, kHolderFound }; Handle LookupHolderOfExpectedType( - Handle receiver_map, - HolderLookup* holder_lookup) const; + Handle receiver_map, HolderLookup* holder_lookup) const; // Check if the api holder is between the receiver and the holder. bool IsCompatibleReceiver(Handle receiver, @@ -676,8 +489,7 @@ class CallOptimization BASE_EMBEDDED { Handle expected_receiver_type_; Handle api_call_info_; }; +} +} // namespace v8::internal - -} } // namespace v8::internal - -#endif // V8_STUB_CACHE_H_ +#endif // V8_IC_IC_COMPILER_H_ diff --git a/src/ic-inl.h b/src/ic/ic-inl.h similarity index 82% rename from src/ic-inl.h rename to src/ic/ic-inl.h index c7954ce13..f984cee6f 100644 --- a/src/ic-inl.h +++ b/src/ic/ic-inl.h @@ -5,7 +5,7 @@ #ifndef V8_IC_INL_H_ #define V8_IC_INL_H_ -#include "src/ic.h" +#include "src/ic/ic.h" #include "src/compiler.h" #include "src/debug.h" @@ -27,8 +27,8 @@ Address IC::address() const { // At least one break point is active perform additional test to ensure that // break point locations are updated correctly. - if (debug->IsDebugBreak(Assembler::target_address_at(result, - raw_constant_pool()))) { + if (debug->IsDebugBreak( + Assembler::target_address_at(result, raw_constant_pool()))) { // If the call site is a call to debug break then return the address in // the original code instead of the address in the running code. This will // cause the original code to be updated and keeps the breakpoint active in @@ -93,8 +93,7 @@ Code* IC::GetTargetAtAddress(Address address, } -void IC::SetTargetAtAddress(Address address, - Code* target, +void IC::SetTargetAtAddress(Address address, Code* target, ConstantPoolArray* constant_pool) { DCHECK(target->is_inline_cache_stub() || target->is_compare_ic_stub()); Heap* heap = target->GetHeap(); @@ -108,8 +107,8 @@ void IC::SetTargetAtAddress(Address address, StoreIC::GetStrictMode(target->extra_ic_state())); } #endif - Assembler::set_target_address_at( - address, constant_pool, target->instruction_start()); + Assembler::set_target_address_at(address, constant_pool, + target->instruction_start()); if (heap->gc_state() == Heap::MARK_COMPACT) { heap->mark_compact_collector()->RecordCodeTargetPatch(address, target); } else { @@ -119,6 +118,46 @@ void IC::SetTargetAtAddress(Address address, } +void IC::set_target(Code* code) { +#ifdef VERIFY_HEAP + code->VerifyEmbeddedObjectsDependency(); +#endif + SetTargetAtAddress(address(), code, constant_pool()); + target_set_ = true; +} + + +void LoadIC::set_target(Code* code) { + // The contextual mode must be preserved across IC patching. + DCHECK(GetContextualMode(code->extra_ic_state()) == + GetContextualMode(target()->extra_ic_state())); + + IC::set_target(code); +} + + +void StoreIC::set_target(Code* code) { + // Strict mode must be preserved across IC patching. + DCHECK(GetStrictMode(code->extra_ic_state()) == + GetStrictMode(target()->extra_ic_state())); + IC::set_target(code); +} + + +void KeyedStoreIC::set_target(Code* code) { + // Strict mode must be preserved across IC patching. + DCHECK(GetStrictMode(code->extra_ic_state()) == strict_mode()); + IC::set_target(code); +} + + +Code* IC::raw_target() const { + return GetTargetAtAddress(address(), constant_pool()); +} + +void IC::UpdateTarget() { target_ = handle(raw_target(), isolate_); } + + template JSFunction* IC::GetRootConstructor(TypeClass* type, Context* native_context) { if (type->Is(TypeClass::Boolean())) { @@ -184,6 +223,7 @@ IC::State CallIC::FeedbackToState(Handle vector, return state; } -} } // namespace v8::internal +} +} // namespace v8::internal #endif // V8_IC_INL_H_ diff --git a/src/ic.cc b/src/ic/ic.cc similarity index 86% rename from src/ic.cc rename to src/ic/ic.cc index 4d0b24715..2240abd27 100644 --- a/src/ic.cc +++ b/src/ic/ic.cc @@ -10,29 +10,37 @@ #include "src/codegen.h" #include "src/conversions.h" #include "src/execution.h" -#include "src/ic-inl.h" +#include "src/ic/ic-inl.h" +#include "src/ic/ic-compiler.h" +#include "src/ic/stub-cache.h" #include "src/prototype.h" #include "src/runtime.h" -#include "src/stub-cache.h" namespace v8 { namespace internal { char IC::TransitionMarkFromState(IC::State state) { switch (state) { - case UNINITIALIZED: return '0'; - case PREMONOMORPHIC: return '.'; - case MONOMORPHIC: return '1'; + case UNINITIALIZED: + return '0'; + case PREMONOMORPHIC: + return '.'; + case MONOMORPHIC: + return '1'; case PROTOTYPE_FAILURE: return '^'; - case POLYMORPHIC: return 'P'; - case MEGAMORPHIC: return 'N'; - case GENERIC: return 'G'; + case POLYMORPHIC: + return 'P'; + case MEGAMORPHIC: + return 'N'; + case GENERIC: + return 'G'; // We never see the debugger states here, because the state is // computed from the original code - not the patched code. Let // these cases fall through to the unreachable code below. - case DEBUG_STUB: break; + case DEBUG_STUB: + break; // Type-vector-based ICs resolve state to one of the above. case DEFAULT: break; @@ -122,18 +130,15 @@ void IC::TraceIC(const char* type, Handle name, State old_state, TraceIC(type, name, old_state, new_state) IC::IC(FrameDepth depth, Isolate* isolate) - : isolate_(isolate), - target_set_(false), - target_maps_set_(false) { + : isolate_(isolate), target_set_(false), target_maps_set_(false) { // To improve the performance of the (much used) IC code, we unfold a few // levels of the stack frame iteration code. This yields a ~35% speedup when // running DeltaBlue and a ~25% speedup of gbemu with the '--nouse-ic' flag. - const Address entry = - Isolate::c_entry_fp(isolate->thread_local_top()); + const Address entry = Isolate::c_entry_fp(isolate->thread_local_top()); Address constant_pool = NULL; if (FLAG_enable_ool_constant_pool) { - constant_pool = Memory::Address_at( - entry + ExitFrameConstants::kConstantPoolOffset); + constant_pool = + Memory::Address_at(entry + ExitFrameConstants::kConstantPoolOffset); } Address* pc_address = reinterpret_cast(entry + ExitFrameConstants::kCallerPCOffset); @@ -143,8 +148,8 @@ IC::IC(FrameDepth depth, Isolate* isolate) // find the frame pointer and the return address stack slot. if (depth == EXTRA_CALL_FRAME) { if (FLAG_enable_ool_constant_pool) { - constant_pool = Memory::Address_at( - fp + StandardFrameConstants::kConstantPoolOffset); + constant_pool = + Memory::Address_at(fp + StandardFrameConstants::kConstantPoolOffset); } const int kCallerPCOffset = StandardFrameConstants::kCallerPCOffset; pc_address = reinterpret_cast(fp + kCallerPCOffset); @@ -322,21 +327,20 @@ void IC::UpdateState(Handle receiver, Handle name) { } -MaybeHandle IC::TypeError(const char* type, - Handle object, +MaybeHandle IC::TypeError(const char* type, Handle object, Handle key) { HandleScope scope(isolate()); - Handle args[2] = { key, object }; - Handle error = isolate()->factory()->NewTypeError( - type, HandleVector(args, 2)); + Handle args[2] = {key, object}; + Handle error = + isolate()->factory()->NewTypeError(type, HandleVector(args, 2)); return isolate()->Throw(error); } MaybeHandle IC::ReferenceError(const char* type, Handle name) { HandleScope scope(isolate()); - Handle error = isolate()->factory()->NewReferenceError( - type, HandleVector(&name, 1)); + Handle error = + isolate()->factory()->NewReferenceError(type, HandleVector(&name, 1)); return isolate()->Throw(error); } @@ -381,8 +385,8 @@ static void ComputeTypeInfoCountDelta(IC::State old_state, IC::State new_state, void IC::OnTypeFeedbackChanged(Isolate* isolate, Address address, State old_state, State new_state, bool target_remains_ic_stub) { - Code* host = isolate-> - inner_pointer_to_code_cache()->GetCacheEntry(address)->code; + Code* host = + isolate->inner_pointer_to_code_cache()->GetCacheEntry(address)->code; if (host->kind() != Code::FUNCTION) return; if (FLAG_type_info_threshold > 0 && target_remains_ic_stub && @@ -397,8 +401,7 @@ void IC::OnTypeFeedbackChanged(Isolate* isolate, Address address, info->change_ic_generic_count(generic_delta); } if (host->type_feedback_info()->IsTypeFeedbackInfo()) { - TypeFeedbackInfo* info = - TypeFeedbackInfo::cast(host->type_feedback_info()); + TypeFeedbackInfo* info = TypeFeedbackInfo::cast(host->type_feedback_info()); info->change_own_type_change_checksum(); } host->set_profiler_ticks(0); @@ -466,7 +469,7 @@ void IC::InvalidateMaps(Code* stub) { void IC::Clear(Isolate* isolate, Address address, - ConstantPoolArray* constant_pool) { + ConstantPoolArray* constant_pool) { Code* target = GetTargetAtAddress(address, constant_pool); // Don't clear debug break inline cache as it will remove the break point. @@ -492,14 +495,13 @@ void IC::Clear(Isolate* isolate, Address address, // Clearing these is tricky and does not // make any performance difference. return; - default: UNREACHABLE(); + default: + UNREACHABLE(); } } -void KeyedLoadIC::Clear(Isolate* isolate, - Address address, - Code* target, +void KeyedLoadIC::Clear(Isolate* isolate, Address address, Code* target, ConstantPoolArray* constant_pool) { if (IsCleared(target)) return; // Make sure to also clear the map used in inline fast cases. If we @@ -509,17 +511,13 @@ void KeyedLoadIC::Clear(Isolate* isolate, } -void CallIC::Clear(Isolate* isolate, - Address address, - Code* target, +void CallIC::Clear(Isolate* isolate, Address address, Code* target, ConstantPoolArray* constant_pool) { // Currently, CallIC doesn't have state changes. } -void LoadIC::Clear(Isolate* isolate, - Address address, - Code* target, +void LoadIC::Clear(Isolate* isolate, Address address, Code* target, ConstantPoolArray* constant_pool) { if (IsCleared(target)) return; Code* code = PropertyICCompiler::FindPreMonomorphic(isolate, Code::LOAD_IC, @@ -528,9 +526,7 @@ void LoadIC::Clear(Isolate* isolate, } -void StoreIC::Clear(Isolate* isolate, - Address address, - Code* target, +void StoreIC::Clear(Isolate* isolate, Address address, Code* target, ConstantPoolArray* constant_pool) { if (IsCleared(target)) return; Code* code = PropertyICCompiler::FindPreMonomorphic(isolate, Code::STORE_IC, @@ -539,21 +535,17 @@ void StoreIC::Clear(Isolate* isolate, } -void KeyedStoreIC::Clear(Isolate* isolate, - Address address, - Code* target, +void KeyedStoreIC::Clear(Isolate* isolate, Address address, Code* target, ConstantPoolArray* constant_pool) { if (IsCleared(target)) return; - SetTargetAtAddress(address, - *pre_monomorphic_stub( - isolate, StoreIC::GetStrictMode(target->extra_ic_state())), + SetTargetAtAddress( + address, *pre_monomorphic_stub( + isolate, StoreIC::GetStrictMode(target->extra_ic_state())), constant_pool); } -void CompareIC::Clear(Isolate* isolate, - Address address, - Code* target, +void CompareIC::Clear(Isolate* isolate, Address address, Code* target, ConstantPoolArray* constant_pool) { DCHECK(CodeStub::GetMajorKey(target) == CodeStub::CompareIC); CompareIC::State handler_state; @@ -604,10 +596,8 @@ MaybeHandle LoadIC::Load(Handle object, Handle name) { } Handle result; ASSIGN_RETURN_ON_EXCEPTION( - isolate(), - result, - Runtime::GetElementOrCharAt(isolate(), object, index), - Object); + isolate(), result, + Runtime::GetElementOrCharAt(isolate(), object, index), Object); return result; } @@ -673,8 +663,7 @@ bool IC::UpdatePolymorphicIC(Handle name, Handle code) { // there was a prototoype chain failure. In that case, just overwrite the // handler. handler_to_overwrite = i; - } else if (handler_to_overwrite == -1 && - current_type->IsClass() && + } else if (handler_to_overwrite == -1 && current_type->IsClass() && type->IsClass() && IsTransitionOfMonomorphicTarget(*current_type->AsClass()->Map(), *type->AsClass()->Map())) { @@ -683,7 +672,7 @@ bool IC::UpdatePolymorphicIC(Handle name, Handle code) { } int number_of_valid_types = - number_of_types - deprecated_types - (handler_to_overwrite != -1); + number_of_types - deprecated_types - (handler_to_overwrite != -1); if (number_of_valid_types >= 4) return false; if (number_of_types == 0) return false; @@ -716,8 +705,8 @@ bool IC::UpdatePolymorphicIC(Handle name, Handle code) { Handle IC::CurrentTypeOf(Handle object, Isolate* isolate) { return object->IsJSGlobalObject() - ? HeapType::Constant(Handle::cast(object), isolate) - : HeapType::NowOf(object, isolate); + ? HeapType::Constant(Handle::cast(object), isolate) + : HeapType::NowOf(object, isolate); } @@ -748,12 +737,11 @@ typename T::TypeHandle IC::MapToType(Handle map, } -template -Type* IC::MapToType(Handle map, Zone* zone); +template Type* IC::MapToType(Handle map, Zone* zone); -template -Handle IC::MapToType(Handle map, Isolate* region); +template Handle IC::MapToType(Handle map, + Isolate* region); void IC::UpdateMonomorphicIC(Handle handler, Handle name) { @@ -779,12 +767,12 @@ bool IC::IsTransitionOfMonomorphicTarget(Map* source_map, Map* target_map) { if (source_map == NULL) return true; if (target_map == NULL) return false; ElementsKind target_elements_kind = target_map->elements_kind(); - bool more_general_transition = - IsMoreGeneralElementsKindTransition( - source_map->elements_kind(), target_elements_kind); - Map* transitioned_map = more_general_transition - ? source_map->LookupElementsTransitionMap(target_elements_kind) - : NULL; + bool more_general_transition = IsMoreGeneralElementsKindTransition( + source_map->elements_kind(), target_elements_kind); + Map* transitioned_map = + more_general_transition + ? source_map->LookupElementsTransitionMap(target_elements_kind) + : NULL; return transitioned_map == target_map; } @@ -804,7 +792,7 @@ void IC::PatchCache(Handle name, Handle code) { CopyICToMegamorphicCache(name); } set_target(*megamorphic_stub()); - // Fall through. + // Fall through. case MEGAMORPHIC: UpdateMegamorphicCache(*receiver_type(), *name, *code); break; @@ -1140,10 +1128,9 @@ Handle KeyedLoadIC::LoadElementStub(Handle receiver) { // monomorphic. If this optimistic assumption is not true, the IC will // miss again and it will become polymorphic and support both the // untransitioned and transitioned maps. - if (state() == MONOMORPHIC && - IsMoreGeneralElementsKindTransition( - target_receiver_maps.at(0)->elements_kind(), - receiver->GetElementsKind())) { + if (state() == MONOMORPHIC && IsMoreGeneralElementsKindTransition( + target_receiver_maps.at(0)->elements_kind(), + receiver->GetElementsKind())) { return PropertyICCompiler::ComputeKeyedLoadMonomorphic(receiver_map); } @@ -1174,9 +1161,7 @@ MaybeHandle KeyedLoadIC::Load(Handle object, if (MigrateDeprecated(object)) { Handle result; ASSIGN_RETURN_ON_EXCEPTION( - isolate(), - result, - Runtime::GetObjectProperty(isolate(), object, key), + isolate(), result, Runtime::GetObjectProperty(isolate(), object, key), Object); return result; } @@ -1189,11 +1174,9 @@ MaybeHandle KeyedLoadIC::Load(Handle object, key = TryConvertKey(key, isolate()); if (key->IsInternalizedString() || key->IsSymbol()) { - ASSIGN_RETURN_ON_EXCEPTION( - isolate(), - load_handle, - LoadIC::Load(object, Handle::cast(key)), - Object); + ASSIGN_RETURN_ON_EXCEPTION(isolate(), load_handle, + LoadIC::Load(object, Handle::cast(key)), + Object); } else if (FLAG_use_ic && !object->IsAccessCheckNeeded()) { if (object->IsString() && key->IsNumber()) { if (state() == UNINITIALIZED) stub = string_stub(); @@ -1222,11 +1205,9 @@ MaybeHandle KeyedLoadIC::Load(Handle object, if (!load_handle.is_null()) return load_handle; Handle result; - ASSIGN_RETURN_ON_EXCEPTION( - isolate(), - result, - Runtime::GetObjectProperty(isolate(), object, key), - Object); + ASSIGN_RETURN_ON_EXCEPTION(isolate(), result, + Runtime::GetObjectProperty(isolate(), object, key), + Object); return result; } @@ -1288,8 +1269,7 @@ bool StoreIC::LookupForWrite(LookupIterator* it, Handle value, } -MaybeHandle StoreIC::Store(Handle object, - Handle name, +MaybeHandle StoreIC::Store(Handle object, Handle name, Handle value, JSReceiver::StoreFromKeyed store_mode) { // TODO(verwaest): Let SetProperty do the migration, since storing a property @@ -1318,8 +1298,7 @@ MaybeHandle StoreIC::Store(Handle object, Handle result; ASSIGN_RETURN_ON_EXCEPTION( - isolate(), - result, + isolate(), result, JSObject::SetElement(receiver, index, value, NONE, strict_mode()), Object); return value; @@ -1355,8 +1334,7 @@ OStream& operator<<(OStream& os, const CallIC::State& s) { } -Handle CallIC::initialize_stub(Isolate* isolate, - int argc, +Handle CallIC::initialize_stub(Isolate* isolate, int argc, CallType call_type) { CallICStub stub(isolate, State(argc, call_type)); Handle code = stub.GetCode(); @@ -1625,7 +1603,7 @@ Handle KeyedStoreIC::StoreElementStub(Handle receiver, if (external_arrays != 0 && external_arrays != target_receiver_maps.length()) { TRACE_GENERIC_IC(isolate(), "KeyedIC", - "unsupported combination of external and normal arrays"); + "unsupported combination of external and normal arrays"); return generic_stub(); } } @@ -1636,8 +1614,7 @@ Handle KeyedStoreIC::StoreElementStub(Handle receiver, Handle KeyedStoreIC::ComputeTransitionedMap( - Handle map, - KeyedAccessStoreMode store_mode) { + Handle map, KeyedAccessStoreMode store_mode) { switch (store_mode) { case STORE_TRANSITION_SMI_TO_OBJECT: case STORE_TRANSITION_DOUBLE_TO_OBJECT: @@ -1657,7 +1634,7 @@ Handle KeyedStoreIC::ComputeTransitionedMap( return Map::TransitionElementsTo(map, FAST_HOLEY_DOUBLE_ELEMENTS); case STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS: DCHECK(map->has_external_array_elements()); - // Fall through + // Fall through case STORE_NO_TRANSITION_HANDLE_COW: case STANDARD_STORE: case STORE_AND_GROW_NO_TRANSITION: @@ -1668,11 +1645,10 @@ Handle KeyedStoreIC::ComputeTransitionedMap( } -bool IsOutOfBoundsAccess(Handle receiver, - int index) { +bool IsOutOfBoundsAccess(Handle receiver, int index) { if (receiver->IsJSArray()) { return JSArray::cast(*receiver)->length()->IsSmi() && - index >= Smi::cast(JSArray::cast(*receiver)->length())->value(); + index >= Smi::cast(JSArray::cast(*receiver)->length())->value(); } return index >= receiver->elements()->length(); } @@ -1687,7 +1663,7 @@ KeyedAccessStoreMode KeyedStoreIC::GetStoreMode(Handle receiver, // Don't consider this a growing store if the store would send the receiver to // dictionary mode. bool allow_growth = receiver->IsJSArray() && oob_access && - !receiver->WouldConvertToSlowElements(key); + !receiver->WouldConvertToSlowElements(key); if (allow_growth) { // Handle growing array in stub if necessary. if (receiver->HasFastSmiElements()) { @@ -1762,10 +1738,8 @@ MaybeHandle KeyedStoreIC::Store(Handle object, if (MigrateDeprecated(object)) { Handle result; ASSIGN_RETURN_ON_EXCEPTION( - isolate(), - result, - Runtime::SetObjectProperty( - isolate(), object, key, value, strict_mode()), + isolate(), result, Runtime::SetObjectProperty(isolate(), object, key, + value, strict_mode()), Object); return result; } @@ -1779,11 +1753,8 @@ MaybeHandle KeyedStoreIC::Store(Handle object, if (key->IsInternalizedString()) { ASSIGN_RETURN_ON_EXCEPTION( - isolate(), - store_handle, - StoreIC::Store(object, - Handle::cast(key), - value, + isolate(), store_handle, + StoreIC::Store(object, Handle::cast(key), value, JSReceiver::MAY_BE_STORE_FROM_KEYED), Object); TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "set generic"); @@ -1830,10 +1801,9 @@ MaybeHandle KeyedStoreIC::Store(Handle object, if (store_handle.is_null()) { ASSIGN_RETURN_ON_EXCEPTION( - isolate(), - store_handle, - Runtime::SetObjectProperty( - isolate(), object, key, value, strict_mode()), + isolate(), store_handle, + Runtime::SetObjectProperty(isolate(), object, key, value, + strict_mode()), Object); } @@ -1852,28 +1822,24 @@ MaybeHandle KeyedStoreIC::Store(Handle object, CallIC::State::State(ExtraICState extra_ic_state) : argc_(ArgcBits::decode(extra_ic_state)), - call_type_(CallTypeBits::decode(extra_ic_state)) { -} + call_type_(CallTypeBits::decode(extra_ic_state)) {} ExtraICState CallIC::State::GetExtraICState() const { ExtraICState extra_ic_state = - ArgcBits::encode(argc_) | - CallTypeBits::encode(call_type_); + ArgcBits::encode(argc_) | CallTypeBits::encode(call_type_); return extra_ic_state; } -bool CallIC::DoCustomHandler(Handle receiver, - Handle function, - Handle vector, - Handle slot, +bool CallIC::DoCustomHandler(Handle receiver, Handle function, + Handle vector, Handle slot, const State& state) { DCHECK(FLAG_use_ic && function->IsJSFunction()); // Are we the array function? - Handle array_function = Handle( - isolate()->native_context()->array_function()); + Handle array_function = + Handle(isolate()->native_context()->array_function()); if (array_function.is_identical_to(Handle::cast(function))) { // Alter the slot. IC::State old_state = FeedbackToState(vector, slot); @@ -1907,8 +1873,7 @@ void CallIC::PatchMegamorphic(Handle function, IC::State old_state = FeedbackToState(vector, slot); // We are going generic. - vector->set(slot->value(), - *TypeFeedbackInfo::MegamorphicSentinel(isolate()), + vector->set(slot->value(), *TypeFeedbackInfo::MegamorphicSentinel(isolate()), SKIP_WRITE_BARRIER); CallICStub stub(isolate(), state); @@ -1927,10 +1892,8 @@ void CallIC::PatchMegamorphic(Handle function, } -void CallIC::HandleMiss(Handle receiver, - Handle function, - Handle vector, - Handle slot) { +void CallIC::HandleMiss(Handle receiver, Handle function, + Handle vector, Handle slot) { State state(target()->extra_ic_state()); IC::State old_state = FeedbackToState(vector, slot); Handle name = isolate()->factory()->empty_string(); @@ -2065,9 +2028,7 @@ RUNTIME_FUNCTION(StoreIC_Miss) { ic.UpdateState(receiver, key); Handle result; ASSIGN_RETURN_FAILURE_ON_EXCEPTION( - isolate, - result, - ic.Store(receiver, key, args.at(2))); + isolate, result, ic.Store(receiver, key, args.at(2))); return *result; } @@ -2082,9 +2043,7 @@ RUNTIME_FUNCTION(StoreIC_MissFromStubFailure) { ic.UpdateState(receiver, key); Handle result; ASSIGN_RETURN_FAILURE_ON_EXCEPTION( - isolate, - result, - ic.Store(receiver, key, args.at(2))); + isolate, result, ic.Store(receiver, key, args.at(2))); return *result; } @@ -2124,9 +2083,7 @@ RUNTIME_FUNCTION(KeyedStoreIC_Miss) { ic.UpdateState(receiver, key); Handle result; ASSIGN_RETURN_FAILURE_ON_EXCEPTION( - isolate, - result, - ic.Store(receiver, key, args.at(2))); + isolate, result, ic.Store(receiver, key, args.at(2))); return *result; } @@ -2141,9 +2098,7 @@ RUNTIME_FUNCTION(KeyedStoreIC_MissFromStubFailure) { ic.UpdateState(receiver, key); Handle result; ASSIGN_RETURN_FAILURE_ON_EXCEPTION( - isolate, - result, - ic.Store(receiver, key, args.at(2))); + isolate, result, ic.Store(receiver, key, args.at(2))); return *result; } @@ -2159,8 +2114,7 @@ RUNTIME_FUNCTION(StoreIC_Slow) { Handle result; ASSIGN_RETURN_FAILURE_ON_EXCEPTION( isolate, result, - Runtime::SetObjectProperty( - isolate, object, key, value, strict_mode)); + Runtime::SetObjectProperty(isolate, object, key, value, strict_mode)); return *result; } @@ -2176,8 +2130,7 @@ RUNTIME_FUNCTION(KeyedStoreIC_Slow) { Handle result; ASSIGN_RETURN_FAILURE_ON_EXCEPTION( isolate, result, - Runtime::SetObjectProperty( - isolate, object, key, value, strict_mode)); + Runtime::SetObjectProperty(isolate, object, key, value, strict_mode)); return *result; } @@ -2199,20 +2152,19 @@ RUNTIME_FUNCTION(ElementsTransitionAndStoreIC_Miss) { Handle result; ASSIGN_RETURN_FAILURE_ON_EXCEPTION( isolate, result, - Runtime::SetObjectProperty( - isolate, object, key, value, strict_mode)); + Runtime::SetObjectProperty(isolate, object, key, value, strict_mode)); return *result; } BinaryOpIC::State::State(Isolate* isolate, ExtraICState extra_ic_state) : isolate_(isolate) { - op_ = static_cast( - FIRST_TOKEN + OpField::decode(extra_ic_state)); + op_ = + static_cast(FIRST_TOKEN + OpField::decode(extra_ic_state)); mode_ = OverwriteModeField::decode(extra_ic_state); - fixed_right_arg_ = Maybe( - HasFixedRightArgField::decode(extra_ic_state), - 1 << FixedRightArgValueField::decode(extra_ic_state)); + fixed_right_arg_ = + Maybe(HasFixedRightArgField::decode(extra_ic_state), + 1 << FixedRightArgValueField::decode(extra_ic_state)); left_kind_ = LeftKindField::decode(extra_ic_state); if (fixed_right_arg_.has_value) { right_kind_ = Smi::IsValid(fixed_right_arg_.value) ? SMI : INT32; @@ -2227,8 +2179,7 @@ BinaryOpIC::State::State(Isolate* isolate, ExtraICState extra_ic_state) ExtraICState BinaryOpIC::State::GetExtraICState() const { ExtraICState extra_ic_state = - OpField::encode(op_ - FIRST_TOKEN) | - OverwriteModeField::encode(mode_) | + OpField::encode(op_ - FIRST_TOKEN) | OverwriteModeField::encode(mode_) | LeftKindField::encode(left_kind_) | ResultKindField::encode(result_kind_) | HasFixedRightArgField::encode(fixed_right_arg_.has_value); @@ -2243,20 +2194,21 @@ ExtraICState BinaryOpIC::State::GetExtraICState() const { // static -void BinaryOpIC::State::GenerateAheadOfTime( - Isolate* isolate, void (*Generate)(Isolate*, const State&)) { - // TODO(olivf) We should investigate why adding stubs to the snapshot is so - // expensive at runtime. When solved we should be able to add most binops to - // the snapshot instead of hand-picking them. - // Generated list of commonly used stubs -#define GENERATE(op, left_kind, right_kind, result_kind, mode) \ - do { \ - State state(isolate, op, mode); \ - state.left_kind_ = left_kind; \ - state.fixed_right_arg_.has_value = false; \ - state.right_kind_ = right_kind; \ - state.result_kind_ = result_kind; \ - Generate(isolate, state); \ +void BinaryOpIC::State::GenerateAheadOfTime(Isolate* isolate, + void (*Generate)(Isolate*, + const State&)) { +// TODO(olivf) We should investigate why adding stubs to the snapshot is so +// expensive at runtime. When solved we should be able to add most binops to +// the snapshot instead of hand-picking them. +// Generated list of commonly used stubs +#define GENERATE(op, left_kind, right_kind, result_kind, mode) \ + do { \ + State state(isolate, op, mode); \ + state.left_kind_ = left_kind; \ + state.fixed_right_arg_.has_value = false; \ + state.right_kind_ = right_kind; \ + state.result_kind_ = result_kind; \ + Generate(isolate, state); \ } while (false) GENERATE(Token::ADD, INT32, INT32, INT32, NO_OVERWRITE); GENERATE(Token::ADD, INT32, INT32, INT32, OVERWRITE_LEFT); @@ -2496,8 +2448,7 @@ OStream& operator<<(OStream& os, const BinaryOpIC::State& s) { } -void BinaryOpIC::State::Update(Handle left, - Handle right, +void BinaryOpIC::State::Update(Handle left, Handle right, Handle result) { ExtraICState old_extra_ic_state = GetExtraICState(); @@ -2506,15 +2457,12 @@ void BinaryOpIC::State::Update(Handle left, int32_t fixed_right_arg_value = 0; bool has_fixed_right_arg = - op_ == Token::MOD && - right->ToInt32(&fixed_right_arg_value) && - fixed_right_arg_value > 0 && - IsPowerOf2(fixed_right_arg_value) && + op_ == Token::MOD && right->ToInt32(&fixed_right_arg_value) && + fixed_right_arg_value > 0 && IsPowerOf2(fixed_right_arg_value) && FixedRightArgValueField::is_valid(WhichPowerOf2(fixed_right_arg_value)) && (left_kind_ == SMI || left_kind_ == INT32) && (result_kind_ == NONE || !fixed_right_arg_.has_value); - fixed_right_arg_ = Maybe(has_fixed_right_arg, - fixed_right_arg_value); + fixed_right_arg_ = Maybe(has_fixed_right_arg, fixed_right_arg_value); result_kind_ = UpdateKind(result, result_kind_); @@ -2578,9 +2526,8 @@ BinaryOpIC::State::Kind BinaryOpIC::State::UpdateKind(Handle object, if (new_kind == INT32 && SmiValuesAre32Bits()) { new_kind = NUMBER; } - if (kind != NONE && - ((new_kind <= NUMBER && kind > NUMBER) || - (new_kind > NUMBER && kind <= NUMBER))) { + if (kind != NONE && ((new_kind <= NUMBER && kind > NUMBER) || + (new_kind > NUMBER && kind <= NUMBER))) { new_kind = GENERIC; } return Max(kind, new_kind); @@ -2590,12 +2537,18 @@ BinaryOpIC::State::Kind BinaryOpIC::State::UpdateKind(Handle object, // static const char* BinaryOpIC::State::KindToString(Kind kind) { switch (kind) { - case NONE: return "None"; - case SMI: return "Smi"; - case INT32: return "Int32"; - case NUMBER: return "Number"; - case STRING: return "String"; - case GENERIC: return "Generic"; + case NONE: + return "None"; + case SMI: + return "Smi"; + case INT32: + return "Int32"; + case NUMBER: + return "Number"; + case STRING: + return "String"; + case GENERIC: + return "Generic"; } UNREACHABLE(); return NULL; @@ -2605,12 +2558,18 @@ const char* BinaryOpIC::State::KindToString(Kind kind) { // static Type* BinaryOpIC::State::KindToType(Kind kind, Zone* zone) { switch (kind) { - case NONE: return Type::None(zone); - case SMI: return Type::SignedSmall(zone); - case INT32: return Type::Signed32(zone); - case NUMBER: return Type::Number(zone); - case STRING: return Type::String(zone); - case GENERIC: return Type::Any(zone); + case NONE: + return Type::None(zone); + case SMI: + return Type::SignedSmall(zone); + case INT32: + return Type::Signed32(zone); + case NUMBER: + return Type::Number(zone); + case STRING: + return Type::String(zone); + case GENERIC: + return Type::Any(zone); } UNREACHABLE(); return NULL; @@ -2618,8 +2577,7 @@ Type* BinaryOpIC::State::KindToType(Kind kind, Zone* zone) { MaybeHandle BinaryOpIC::Transition( - Handle allocation_site, - Handle left, + Handle allocation_site, Handle left, Handle right) { State state(isolate(), target()->extra_ic_state()); @@ -2629,9 +2587,7 @@ MaybeHandle BinaryOpIC::Transition( Handle function = handle(JSFunction::cast(builtin), isolate()); Handle result; ASSIGN_RETURN_ON_EXCEPTION( - isolate(), - result, - Execution::Call(isolate(), function, left, 1, &right), + isolate(), result, Execution::Call(isolate(), function, left, 1, &right), Object); // Execution::Call can execute arbitrary JavaScript, hence potentially @@ -2696,8 +2652,7 @@ RUNTIME_FUNCTION(BinaryOpIC_Miss) { BinaryOpIC ic(isolate); Handle result; ASSIGN_RETURN_FAILURE_ON_EXCEPTION( - isolate, - result, + isolate, result, ic.Transition(Handle::null(), left, right)); return *result; } @@ -2707,18 +2662,15 @@ RUNTIME_FUNCTION(BinaryOpIC_MissWithAllocationSite) { TimerEventScope timer(isolate); HandleScope scope(isolate); DCHECK_EQ(3, args.length()); - Handle allocation_site = args.at( - BinaryOpWithAllocationSiteStub::kAllocationSite); - Handle left = args.at( - BinaryOpWithAllocationSiteStub::kLeft); - Handle right = args.at( - BinaryOpWithAllocationSiteStub::kRight); + Handle allocation_site = + args.at(BinaryOpWithAllocationSiteStub::kAllocationSite); + Handle left = args.at(BinaryOpWithAllocationSiteStub::kLeft); + Handle right = + args.at(BinaryOpWithAllocationSiteStub::kRight); BinaryOpIC ic(isolate); Handle result; ASSIGN_RETURN_FAILURE_ON_EXCEPTION( - isolate, - result, - ic.Transition(allocation_site, left, right)); + isolate, result, ic.Transition(allocation_site, left, right)); return *result; } @@ -2739,36 +2691,51 @@ Handle CompareIC::GetUninitialized(Isolate* isolate, Token::Value op) { const char* CompareIC::GetStateName(State state) { switch (state) { - case UNINITIALIZED: return "UNINITIALIZED"; - case SMI: return "SMI"; - case NUMBER: return "NUMBER"; - case INTERNALIZED_STRING: return "INTERNALIZED_STRING"; - case STRING: return "STRING"; - case UNIQUE_NAME: return "UNIQUE_NAME"; - case OBJECT: return "OBJECT"; - case KNOWN_OBJECT: return "KNOWN_OBJECT"; - case GENERIC: return "GENERIC"; + case UNINITIALIZED: + return "UNINITIALIZED"; + case SMI: + return "SMI"; + case NUMBER: + return "NUMBER"; + case INTERNALIZED_STRING: + return "INTERNALIZED_STRING"; + case STRING: + return "STRING"; + case UNIQUE_NAME: + return "UNIQUE_NAME"; + case OBJECT: + return "OBJECT"; + case KNOWN_OBJECT: + return "KNOWN_OBJECT"; + case GENERIC: + return "GENERIC"; } UNREACHABLE(); return NULL; } -Type* CompareIC::StateToType( - Zone* zone, - CompareIC::State state, - Handle map) { +Type* CompareIC::StateToType(Zone* zone, CompareIC::State state, + Handle map) { switch (state) { - case CompareIC::UNINITIALIZED: return Type::None(zone); - case CompareIC::SMI: return Type::SignedSmall(zone); - case CompareIC::NUMBER: return Type::Number(zone); - case CompareIC::STRING: return Type::String(zone); - case CompareIC::INTERNALIZED_STRING: return Type::InternalizedString(zone); - case CompareIC::UNIQUE_NAME: return Type::UniqueName(zone); - case CompareIC::OBJECT: return Type::Receiver(zone); + case CompareIC::UNINITIALIZED: + return Type::None(zone); + case CompareIC::SMI: + return Type::SignedSmall(zone); + case CompareIC::NUMBER: + return Type::Number(zone); + case CompareIC::STRING: + return Type::String(zone); + case CompareIC::INTERNALIZED_STRING: + return Type::InternalizedString(zone); + case CompareIC::UNIQUE_NAME: + return Type::UniqueName(zone); + case CompareIC::OBJECT: + return Type::Receiver(zone); case CompareIC::KNOWN_OBJECT: return map.is_null() ? Type::Receiver(zone) : Type::Class(map, zone); - case CompareIC::GENERIC: return Type::Any(zone); + case CompareIC::GENERIC: + return Type::Any(zone); } UNREACHABLE(); return NULL; @@ -2829,12 +2796,10 @@ CompareIC::State CompareIC::NewInputState(State old_state, } -CompareIC::State CompareIC::TargetState(State old_state, - State old_left, +CompareIC::State CompareIC::TargetState(State old_state, State old_left, State old_right, bool has_inlined_smi_code, - Handle x, - Handle y) { + Handle x, Handle y) { switch (old_state) { case UNINITIALIZED: if (x->IsSmi() && y->IsSmi()) return SMI; @@ -2913,15 +2878,10 @@ Code* CompareIC::UpdateCaches(Handle x, Handle y) { if (FLAG_trace_ic) { PrintF("[CompareIC in "); JavaScriptFrame::PrintTop(isolate(), stdout, false, true); - PrintF(" ((%s+%s=%s)->(%s+%s=%s))#%s @ %p]\n", - GetStateName(previous_left), - GetStateName(previous_right), - GetStateName(previous_state), - GetStateName(new_left), - GetStateName(new_right), - GetStateName(state), - Token::Name(op_), - static_cast(*stub.GetCode())); + PrintF(" ((%s+%s=%s)->(%s+%s=%s))#%s @ %p]\n", GetStateName(previous_left), + GetStateName(previous_right), GetStateName(previous_state), + GetStateName(new_left), GetStateName(new_right), GetStateName(state), + Token::Name(op_), static_cast(*stub.GetCode())); } // Activate inlined smi code. @@ -2943,14 +2903,12 @@ RUNTIME_FUNCTION(CompareIC_Miss) { } -void CompareNilIC::Clear(Address address, - Code* target, +void CompareNilIC::Clear(Address address, Code* target, ConstantPoolArray* constant_pool) { if (IsCleared(target)) return; ExtraICState state = target->extra_ic_state(); - CompareNilICStub stub(target->GetIsolate(), - state, + CompareNilICStub stub(target->GetIsolate(), state, HydrogenCodeStub::UNINITIALIZED); stub.ClearState(); @@ -2961,8 +2919,7 @@ void CompareNilIC::Clear(Address address, } -Handle CompareNilIC::DoCompareNilSlow(Isolate* isolate, - NilValue nil, +Handle CompareNilIC::DoCompareNilSlow(Isolate* isolate, NilValue nil, Handle object) { if (object->IsNull() || object->IsUndefined()) { return handle(Smi::FromInt(true), isolate); @@ -2988,8 +2945,8 @@ Handle CompareNilIC::CompareNil(Handle object) { Handle code; if (stub.IsMonomorphic()) { Handle monomorphic_map(already_monomorphic && FirstTargetMap() != NULL - ? FirstTargetMap() - : HeapObject::cast(*object)->map()); + ? FirstTargetMap() + : HeapObject::cast(*object)->map()); code = PropertyICCompiler::ComputeCompareNil(monomorphic_map, &stub); } else { code = stub.GetCode(); @@ -3075,17 +3032,168 @@ RUNTIME_FUNCTION(ToBooleanIC_Miss) { } +RUNTIME_FUNCTION(StoreCallbackProperty) { + Handle receiver = args.at(0); + Handle holder = args.at(1); + Handle callback = args.at(2); + Handle name = args.at(3); + Handle value = args.at(4); + HandleScope scope(isolate); + + DCHECK(callback->IsCompatibleReceiver(*receiver)); + + Address setter_address = v8::ToCData
(callback->setter()); + v8::AccessorNameSetterCallback fun = + FUNCTION_CAST(setter_address); + DCHECK(fun != NULL); + + LOG(isolate, ApiNamedPropertyAccess("store", *receiver, *name)); + PropertyCallbackArguments custom_args(isolate, callback->data(), *receiver, + *holder); + custom_args.Call(fun, v8::Utils::ToLocal(name), v8::Utils::ToLocal(value)); + RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate); + return *value; +} + + +/** + * Attempts to load a property with an interceptor (which must be present), + * but doesn't search the prototype chain. + * + * Returns |Heap::no_interceptor_result_sentinel()| if interceptor doesn't + * provide any value for the given name. + */ +RUNTIME_FUNCTION(LoadPropertyWithInterceptorOnly) { + DCHECK(args.length() == NamedLoadHandlerCompiler::kInterceptorArgsLength); + Handle name_handle = + args.at(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex); + Handle interceptor_info = args.at( + NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex); + + // TODO(rossberg): Support symbols in the API. + if (name_handle->IsSymbol()) + return isolate->heap()->no_interceptor_result_sentinel(); + Handle name = Handle::cast(name_handle); + + Address getter_address = v8::ToCData
(interceptor_info->getter()); + v8::NamedPropertyGetterCallback getter = + FUNCTION_CAST(getter_address); + DCHECK(getter != NULL); + + Handle receiver = + args.at(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex); + Handle holder = + args.at(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex); + PropertyCallbackArguments callback_args(isolate, interceptor_info->data(), + *receiver, *holder); + { + // Use the interceptor getter. + HandleScope scope(isolate); + v8::Handle r = + callback_args.Call(getter, v8::Utils::ToLocal(name)); + RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate); + if (!r.IsEmpty()) { + Handle result = v8::Utils::OpenHandle(*r); + result->VerifyApiCallResultType(); + return *v8::Utils::OpenHandle(*r); + } + } + + return isolate->heap()->no_interceptor_result_sentinel(); +} + + +static Object* ThrowReferenceError(Isolate* isolate, Name* name) { + // If the load is non-contextual, just return the undefined result. + // Note that both keyed and non-keyed loads may end up here. + HandleScope scope(isolate); + LoadIC ic(IC::NO_EXTRA_FRAME, isolate); + if (ic.contextual_mode() != CONTEXTUAL) { + return isolate->heap()->undefined_value(); + } + + // Throw a reference error. + Handle name_handle(name); + Handle error = isolate->factory()->NewReferenceError( + "not_defined", HandleVector(&name_handle, 1)); + return isolate->Throw(*error); +} + + +/** + * Loads a property with an interceptor performing post interceptor + * lookup if interceptor failed. + */ +RUNTIME_FUNCTION(LoadPropertyWithInterceptor) { + HandleScope scope(isolate); + DCHECK(args.length() == NamedLoadHandlerCompiler::kInterceptorArgsLength); + Handle name = + args.at(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex); + Handle receiver = + args.at(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex); + Handle holder = + args.at(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex); + + Handle result; + LookupIterator it(receiver, name, holder); + ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, + JSObject::GetProperty(&it)); + + if (it.IsFound()) return *result; + + return ThrowReferenceError(isolate, Name::cast(args[0])); +} + + +RUNTIME_FUNCTION(StorePropertyWithInterceptor) { + HandleScope scope(isolate); + DCHECK(args.length() == 3); + StoreIC ic(IC::NO_EXTRA_FRAME, isolate); + Handle receiver = args.at(0); + Handle name = args.at(1); + Handle value = args.at(2); +#ifdef DEBUG + PrototypeIterator iter(isolate, receiver, + PrototypeIterator::START_AT_RECEIVER); + bool found = false; + while (!iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN)) { + Handle current = PrototypeIterator::GetCurrent(iter); + if (current->IsJSObject() && + Handle::cast(current)->HasNamedInterceptor()) { + found = true; + break; + } + } + DCHECK(found); +#endif + Handle result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, + JSObject::SetProperty(receiver, name, value, ic.strict_mode())); + return *result; +} + + +RUNTIME_FUNCTION(LoadElementWithInterceptor) { + HandleScope scope(isolate); + Handle receiver = args.at(0); + DCHECK(args.smi_at(1) >= 0); + uint32_t index = args.smi_at(1); + Handle result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, + JSObject::GetElementWithInterceptor(receiver, receiver, index)); + return *result; +} + + static const Address IC_utilities[] = { #define ADDR(name) FUNCTION_ADDR(name), - IC_UTIL_LIST(ADDR) - NULL + IC_UTIL_LIST(ADDR) NULL #undef ADDR }; -Address IC::AddressFromUtilityId(IC::UtilityId id) { - return IC_utilities[id]; +Address IC::AddressFromUtilityId(IC::UtilityId id) { return IC_utilities[id]; } } - - -} } // namespace v8::internal +} // namespace v8::internal diff --git a/src/ic.h b/src/ic/ic.h similarity index 81% rename from src/ic.h rename to src/ic/ic.h index 8d41f803b..7d4d23e92 100644 --- a/src/ic.h +++ b/src/ic/ic.h @@ -44,9 +44,9 @@ class IC { public: // The ids for utility called from the generated code. enum UtilityId { - #define CONST_NAME(name) k##name, +#define CONST_NAME(name) k##name, IC_UTIL_LIST(CONST_NAME) - #undef CONST_NAME +#undef CONST_NAME kUtilityCount }; @@ -58,10 +58,7 @@ class IC { // The IC code is either invoked with no extra frames on the stack // or with a single extra frame for supporting calls. - enum FrameDepth { - NO_EXTRA_FRAME = 0, - EXTRA_CALL_FRAME = 1 - }; + enum FrameDepth { NO_EXTRA_FRAME = 0, EXTRA_CALL_FRAME = 1 }; // Construct the IC structure with the given number of extra // JavaScript frames on the stack. @@ -89,8 +86,7 @@ class IC { static void InvalidateMaps(Code* stub); // Clear the inline cache to initial state. - static void Clear(Isolate* isolate, - Address address, + static void Clear(Isolate* isolate, Address address, ConstantPoolArray* constant_pool); #ifdef DEBUG @@ -102,9 +98,7 @@ class IC { return target()->is_store_stub() || target()->is_keyed_store_stub(); } - bool IsCallStub() const { - return target()->is_call_stub(); - } + bool IsCallStub() const { return target()->is_call_stub(); } #endif template @@ -151,14 +145,7 @@ class IC { Code* GetOriginalCode() const; // Set the call-site target. - void set_target(Code* code) { -#ifdef VERIFY_HEAP - code->VerifyEmbeddedObjectsDependency(); -#endif - SetTargetAtAddress(address(), code, constant_pool()); - target_set_ = true; - } - + inline void set_target(Code* code); bool is_target_set() { return target_set_; } char TransitionMarkFromState(IC::State state); @@ -166,16 +153,14 @@ class IC { void TraceIC(const char* type, Handle name, State old_state, State new_state); - MaybeHandle TypeError(const char* type, - Handle object, + MaybeHandle TypeError(const char* type, Handle object, Handle key); MaybeHandle ReferenceError(const char* type, Handle name); // Access the target code for the given IC address. static inline Code* GetTargetAtAddress(Address address, ConstantPoolArray* constant_pool); - static inline void SetTargetAtAddress(Address address, - Code* target, + static inline void SetTargetAtAddress(Address address, Code* target, ConstantPoolArray* constant_pool); static void OnTypeFeedbackChanged(Isolate* isolate, Address address, State old_state, State new_state, @@ -215,9 +200,7 @@ class IC { Handle name); ExtraICState extra_ic_state() const { return extra_ic_state_; } - void set_extra_ic_state(ExtraICState state) { - extra_ic_state_ = state; - } + void set_extra_ic_state(ExtraICState state) { extra_ic_state_ = state; } Handle receiver_type() { return receiver_type_; } void update_receiver_type(Handle receiver) { @@ -244,14 +227,10 @@ class IC { } protected: - void UpdateTarget() { - target_ = handle(raw_target(), isolate_); - } + inline void UpdateTarget(); private: - Code* raw_target() const { - return GetTargetAtAddress(address(), constant_pool()); - } + inline Code* raw_target() const; inline ConstantPoolArray* constant_pool() const; inline ConstantPoolArray* raw_constant_pool() const; @@ -302,18 +281,19 @@ class IC { class IC_Utility { public: explicit IC_Utility(IC::UtilityId id) - : address_(IC::AddressFromUtilityId(id)), id_(id) {} + : address_(IC::AddressFromUtilityId(id)), id_(id) {} Address address() const { return address_; } IC::UtilityId id() const { return id_; } + private: Address address_; IC::UtilityId id_; }; -class CallIC: public IC { +class CallIC : public IC { public: enum CallType { METHOD, FUNCTION }; @@ -321,14 +301,12 @@ class CallIC: public IC { public: explicit State(ExtraICState extra_ic_state); - State(int argc, CallType call_type) - : argc_(argc), call_type_(call_type) { - } + State(int argc, CallType call_type) : argc_(argc), call_type_(call_type) {} ExtraICState GetExtraICState() const; - static void GenerateAheadOfTime( - Isolate*, void (*Generate)(Isolate*, const State&)); + static void GenerateAheadOfTime(Isolate*, + void (*Generate)(Isolate*, const State&)); int arg_count() const { return argc_; } CallType call_type() const { return call_type_; } @@ -336,35 +314,28 @@ class CallIC: public IC { bool CallAsMethod() const { return call_type_ == METHOD; } private: - class ArgcBits: public BitField {}; - class CallTypeBits: public BitField {}; + class ArgcBits : public BitField {}; + class CallTypeBits : public BitField {}; const int argc_; const CallType call_type_; }; - explicit CallIC(Isolate* isolate) - : IC(EXTRA_CALL_FRAME, isolate) { - } + explicit CallIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) {} void PatchMegamorphic(Handle function, Handle vector, Handle slot); - void HandleMiss(Handle receiver, - Handle function, - Handle vector, - Handle slot); + void HandleMiss(Handle receiver, Handle function, + Handle vector, Handle slot); // Returns true if a custom handler was installed. - bool DoCustomHandler(Handle receiver, - Handle function, - Handle vector, - Handle slot, + bool DoCustomHandler(Handle receiver, Handle function, + Handle vector, Handle slot, const State& state); // Code generator routines. - static Handle initialize_stub(Isolate* isolate, - int argc, + static Handle initialize_stub(Isolate* isolate, int argc, CallType call_type); static void Clear(Isolate* isolate, Address address, Code* target, @@ -379,13 +350,9 @@ class CallIC: public IC { OStream& operator<<(OStream& os, const CallIC::State& s); -class LoadIC: public IC { +class LoadIC : public IC { public: - enum ParameterIndices { - kReceiverIndex, - kNameIndex, - kParameterCount - }; + enum ParameterIndices { kReceiverIndex, kNameIndex, kParameterCount }; static const Register ReceiverRegister(); static const Register NameRegister(); @@ -396,8 +363,7 @@ class LoadIC: public IC { class State V8_FINAL BASE_EMBEDDED { public: - explicit State(ExtraICState extra_ic_state) - : state_(extra_ic_state) {} + explicit State(ExtraICState extra_ic_state) : state_(extra_ic_state) {} explicit State(ContextualMode mode) : state_(ContextualModeBits::encode(mode)) {} @@ -409,7 +375,7 @@ class LoadIC: public IC { } private: - class ContextualModeBits: public BitField {}; + class ContextualModeBits : public BitField {}; STATIC_ASSERT(static_cast(NOT_CONTEXTUAL) == 0); const ExtraICState state_; @@ -427,8 +393,7 @@ class LoadIC: public IC { return GetContextualMode(extra_ic_state()); } - explicit LoadIC(FrameDepth depth, Isolate* isolate) - : IC(depth, isolate) { + explicit LoadIC(FrameDepth depth, Isolate* isolate) : IC(depth, isolate) { DCHECK(IsLoadStub()); } @@ -460,13 +425,7 @@ class LoadIC: public IC { Handle name); protected: - void set_target(Code* code) { - // The contextual mode must be preserved across IC patching. - DCHECK(GetContextualMode(code->extra_ic_state()) == - GetContextualMode(target()->extra_ic_state())); - - IC::set_target(code); - } + inline void set_target(Code* code); Handle slow_stub() const { if (kind() == Code::LOAD_IC) { @@ -494,16 +453,14 @@ class LoadIC: public IC { Handle SimpleFieldLoad(FieldIndex index); - static void Clear(Isolate* isolate, - Address address, - Code* target, + static void Clear(Isolate* isolate, Address address, Code* target, ConstantPoolArray* constant_pool); friend class IC; }; -class KeyedLoadIC: public LoadIC { +class KeyedLoadIC : public LoadIC { public: explicit KeyedLoadIC(FrameDepth depth, Isolate* isolate) : LoadIC(depth, isolate) { @@ -553,18 +510,16 @@ class KeyedLoadIC: public LoadIC { return isolate()->builtins()->KeyedLoadIC_String(); } - static void Clear(Isolate* isolate, - Address address, - Code* target, + static void Clear(Isolate* isolate, Address address, Code* target, ConstantPoolArray* constant_pool); friend class IC; }; -class StoreIC: public IC { +class StoreIC : public IC { public: - class StrictModeState: public BitField {}; + class StrictModeState : public BitField {}; static ExtraICState ComputeExtraICState(StrictMode flag) { return StrictModeState::encode(flag); } @@ -574,8 +529,7 @@ class StoreIC: public IC { // For convenience, a statically declared encoding of strict mode extra // IC state. - static const ExtraICState kStrictModeState = - 1 << StrictModeState::kShift; + static const ExtraICState kStrictModeState = 1 << StrictModeState::kShift; enum ParameterIndices { kReceiverIndex, @@ -587,8 +541,7 @@ class StoreIC: public IC { static const Register NameRegister(); static const Register ValueRegister(); - StoreIC(FrameDepth depth, Isolate* isolate) - : IC(depth, isolate) { + StoreIC(FrameDepth depth, Isolate* isolate) : IC(depth, isolate) { DCHECK(IsStoreStub()); } @@ -608,13 +561,10 @@ class StoreIC: public IC { static void GenerateRuntimeSetProperty(MacroAssembler* masm, StrictMode strict_mode); - static Handle initialize_stub(Isolate* isolate, - StrictMode strict_mode); + static Handle initialize_stub(Isolate* isolate, StrictMode strict_mode); MUST_USE_RESULT MaybeHandle Store( - Handle object, - Handle name, - Handle value, + Handle object, Handle name, Handle value, JSReceiver::StoreFromKeyed store_mode = JSReceiver::CERTAINLY_NOT_STORE_FROM_KEYED); @@ -647,45 +597,32 @@ class StoreIC: public IC { CacheHolderFlag cache_holder); private: - void set_target(Code* code) { - // Strict mode must be preserved across IC patching. - DCHECK(GetStrictMode(code->extra_ic_state()) == - GetStrictMode(target()->extra_ic_state())); - IC::set_target(code); - } + inline void set_target(Code* code); - static void Clear(Isolate* isolate, - Address address, - Code* target, + static void Clear(Isolate* isolate, Address address, Code* target, ConstantPoolArray* constant_pool); friend class IC; }; -enum KeyedStoreCheckMap { - kDontCheckMap, - kCheckMap -}; +enum KeyedStoreCheckMap { kDontCheckMap, kCheckMap }; -enum KeyedStoreIncrementLength { - kDontIncrementLength, - kIncrementLength -}; +enum KeyedStoreIncrementLength { kDontIncrementLength, kIncrementLength }; -class KeyedStoreIC: public StoreIC { +class KeyedStoreIC : public StoreIC { public: // ExtraICState bits (building on IC) // ExtraICState bits - class ExtraICStateKeyedAccessStoreMode: - public BitField {}; // NOLINT + class ExtraICStateKeyedAccessStoreMode + : public BitField {}; // NOLINT static ExtraICState ComputeExtraICState(StrictMode flag, KeyedAccessStoreMode mode) { return StrictModeState::encode(flag) | - ExtraICStateKeyedAccessStoreMode::encode(mode); + ExtraICStateKeyedAccessStoreMode::encode(mode); } static KeyedAccessStoreMode GetKeyedAccessStoreMode( @@ -698,8 +635,7 @@ class KeyedStoreIC: public StoreIC { // stub implementations requires it to be initialized. static const Register MapRegister(); - KeyedStoreIC(FrameDepth depth, Isolate* isolate) - : StoreIC(depth, isolate) { + KeyedStoreIC(FrameDepth depth, Isolate* isolate) : StoreIC(depth, isolate) { DCHECK(target()->is_keyed_store_stub()); } @@ -746,11 +682,7 @@ class KeyedStoreIC: public StoreIC { KeyedAccessStoreMode store_mode); private: - void set_target(Code* code) { - // Strict mode must be preserved across IC patching. - DCHECK(GetStrictMode(code->extra_ic_state()) == strict_mode()); - IC::set_target(code); - } + inline void set_target(Code* code); // Stub accessors. virtual Handle generic_stub() const { @@ -765,14 +697,11 @@ class KeyedStoreIC: public StoreIC { return isolate()->builtins()->KeyedStoreIC_SloppyArguments(); } - static void Clear(Isolate* isolate, - Address address, - Code* target, + static void Clear(Isolate* isolate, Address address, Code* target, ConstantPoolArray* constant_pool); KeyedAccessStoreMode GetStoreMode(Handle receiver, - Handle key, - Handle value); + Handle key, Handle value); Handle ComputeTransitionedMap(Handle map, KeyedAccessStoreMode store_mode); @@ -785,15 +714,19 @@ class KeyedStoreIC: public StoreIC { enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT }; // Type Recording BinaryOpIC, that records the types of the inputs and outputs. -class BinaryOpIC: public IC { +class BinaryOpIC : public IC { public: class State V8_FINAL BASE_EMBEDDED { public: State(Isolate* isolate, ExtraICState extra_ic_state); State(Isolate* isolate, Token::Value op, OverwriteMode mode) - : op_(op), mode_(mode), left_kind_(NONE), right_kind_(NONE), - result_kind_(NONE), isolate_(isolate) { + : op_(op), + mode_(mode), + left_kind_(NONE), + right_kind_(NONE), + result_kind_(NONE), + isolate_(isolate) { DCHECK_LE(FIRST_TOKEN, op); DCHECK_LE(op, LAST_TOKEN); } @@ -813,15 +746,15 @@ class BinaryOpIC: public IC { ExtraICState GetExtraICState() const; - static void GenerateAheadOfTime( - Isolate*, void (*Generate)(Isolate*, const State&)); + static void GenerateAheadOfTime(Isolate*, + void (*Generate)(Isolate*, const State&)); bool CanReuseDoubleBox() const { return (result_kind_ > SMI && result_kind_ <= NUMBER) && - ((mode_ == OVERWRITE_LEFT && - left_kind_ > SMI && left_kind_ <= NUMBER) || - (mode_ == OVERWRITE_RIGHT && - right_kind_ > SMI && right_kind_ <= NUMBER)); + ((mode_ == OVERWRITE_LEFT && left_kind_ > SMI && + left_kind_ <= NUMBER) || + (mode_ == OVERWRITE_RIGHT && right_kind_ > SMI && + right_kind_ <= NUMBER)); } // Returns true if the IC _could_ create allocation mementos. @@ -836,7 +769,7 @@ class BinaryOpIC: public IC { // Returns true if the IC _should_ create allocation mementos. bool ShouldCreateAllocationMementos() const { return FLAG_allocation_site_pretenuring && - CouldCreateAllocationMementos(); + CouldCreateAllocationMementos(); } bool HasSideEffects() const { @@ -856,16 +789,13 @@ class BinaryOpIC: public IC { OverwriteMode mode() const { return mode_; } Maybe fixed_right_arg() const { return fixed_right_arg_; } - Type* GetLeftType(Zone* zone) const { - return KindToType(left_kind_, zone); - } + Type* GetLeftType(Zone* zone) const { return KindToType(left_kind_, zone); } Type* GetRightType(Zone* zone) const { return KindToType(right_kind_, zone); } Type* GetResultType(Zone* zone) const; - void Update(Handle left, - Handle right, + void Update(Handle left, Handle right, Handle result); Isolate* isolate() const { return isolate_; } @@ -885,15 +815,15 @@ class BinaryOpIC: public IC { // We truncate the last bit of the token. STATIC_ASSERT(LAST_TOKEN - FIRST_TOKEN < (1 << 4)); - class OpField: public BitField {}; - class OverwriteModeField: public BitField {}; - class ResultKindField: public BitField {}; - class LeftKindField: public BitField {}; + class OpField : public BitField {}; + class OverwriteModeField : public BitField {}; + class ResultKindField : public BitField {}; + class LeftKindField : public BitField {}; // When fixed right arg is set, we don't need to store the right kind. // Thus the two fields can overlap. - class HasFixedRightArgField: public BitField {}; - class FixedRightArgValueField: public BitField {}; - class RightKindField: public BitField {}; + class HasFixedRightArgField : public BitField {}; + class FixedRightArgValueField : public BitField {}; + class RightKindField : public BitField {}; Token::Value op_; OverwriteMode mode_; @@ -904,7 +834,7 @@ class BinaryOpIC: public IC { Isolate* isolate_; }; - explicit BinaryOpIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) { } + explicit BinaryOpIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) {} static Builtins::JavaScript TokenToJSBuiltin(Token::Value op); @@ -917,7 +847,7 @@ class BinaryOpIC: public IC { OStream& operator<<(OStream& os, const BinaryOpIC::State& s); -class CompareIC: public IC { +class CompareIC : public IC { public: // The type/state lattice is defined by the following inequations: // UNINITIALIZED < ... @@ -931,16 +861,15 @@ class CompareIC: public IC { NUMBER, STRING, INTERNALIZED_STRING, - UNIQUE_NAME, // Symbol or InternalizedString - OBJECT, // JSObject - KNOWN_OBJECT, // JSObject with specific map (faster check) + UNIQUE_NAME, // Symbol or InternalizedString + OBJECT, // JSObject + KNOWN_OBJECT, // JSObject with specific map (faster check) GENERIC }; static State NewInputState(State old_state, Handle value); - static Type* StateToType(Zone* zone, - State state, + static Type* StateToType(Zone* zone, State state, Handle map = Handle()); static void StubInfoToType(uint32_t stub_key, Type** left_type, @@ -948,7 +877,7 @@ class CompareIC: public IC { Handle map, Zone* zone); CompareIC(Isolate* isolate, Token::Value op) - : IC(EXTRA_CALL_FRAME, isolate), op_(op) { } + : IC(EXTRA_CALL_FRAME, isolate), op_(op) {} // Update the inline cache for the given operands. Code* UpdateCaches(Handle x, Handle y); @@ -965,11 +894,8 @@ class CompareIC: public IC { private: static bool HasInlinedSmiCode(Address address); - State TargetState(State old_state, - State old_left, - State old_right, - bool has_inlined_smi_code, - Handle x, + State TargetState(State old_state, State old_left, State old_right, + bool has_inlined_smi_code, Handle x, Handle y); bool strict() const { return op_ == Token::EQ_STRICT; } @@ -977,9 +903,7 @@ class CompareIC: public IC { static Code* GetRawUninitialized(Isolate* isolate, Token::Value op); - static void Clear(Isolate* isolate, - Address address, - Code* target, + static void Clear(Isolate* isolate, Address address, Code* target, ConstantPoolArray* constant_pool); Token::Value op_; @@ -988,7 +912,7 @@ class CompareIC: public IC { }; -class CompareNilIC: public IC { +class CompareNilIC : public IC { public: explicit CompareNilIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) {} @@ -996,8 +920,7 @@ class CompareNilIC: public IC { static Handle GetUninitialized(); - static void Clear(Address address, - Code* target, + static void Clear(Address address, Code* target, ConstantPoolArray* constant_pool); static Handle DoCompareNilSlow(Isolate* isolate, NilValue nil, @@ -1005,9 +928,9 @@ class CompareNilIC: public IC { }; -class ToBooleanIC: public IC { +class ToBooleanIC : public IC { public: - explicit ToBooleanIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) { } + explicit ToBooleanIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) {} Handle ToBoolean(Handle object); }; @@ -1027,7 +950,15 @@ DECLARE_RUNTIME_FUNCTION(BinaryOpIC_MissWithAllocationSite); DECLARE_RUNTIME_FUNCTION(CompareNilIC_Miss); DECLARE_RUNTIME_FUNCTION(ToBooleanIC_Miss); +// Support functions for callbacks handlers. +DECLARE_RUNTIME_FUNCTION(StoreCallbackProperty); -} } // namespace v8::internal +// Support functions for interceptor handlers. +DECLARE_RUNTIME_FUNCTION(LoadPropertyWithInterceptorOnly); +DECLARE_RUNTIME_FUNCTION(LoadPropertyWithInterceptor); +DECLARE_RUNTIME_FUNCTION(LoadElementWithInterceptor); +DECLARE_RUNTIME_FUNCTION(StorePropertyWithInterceptor); +} +} // namespace v8::internal #endif // V8_IC_H_ diff --git a/src/ic/stub-cache.cc b/src/ic/stub-cache.cc new file mode 100644 index 000000000..e63fbfe9e --- /dev/null +++ b/src/ic/stub-cache.cc @@ -0,0 +1,146 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#include "src/ic/stub-cache.h" +#include "src/type-info.h" + +namespace v8 { +namespace internal { + + +StubCache::StubCache(Isolate* isolate) : isolate_(isolate) {} + + +void StubCache::Initialize() { + DCHECK(IsPowerOf2(kPrimaryTableSize)); + DCHECK(IsPowerOf2(kSecondaryTableSize)); + Clear(); +} + + +static Code::Flags CommonStubCacheChecks(Name* name, Map* map, + Code::Flags flags) { + flags = Code::RemoveTypeAndHolderFromFlags(flags); + + // Validate that the name does not move on scavenge, and that we + // can use identity checks instead of structural equality checks. + DCHECK(!name->GetHeap()->InNewSpace(name)); + DCHECK(name->IsUniqueName()); + + // The state bits are not important to the hash function because the stub + // cache only contains handlers. Make sure that the bits are the least + // significant so they will be the ones masked out. + DCHECK_EQ(Code::HANDLER, Code::ExtractKindFromFlags(flags)); + STATIC_ASSERT((Code::ICStateField::kMask & 1) == 1); + + // Make sure that the code type and cache holder are not included in the hash. + DCHECK(Code::ExtractTypeFromFlags(flags) == 0); + DCHECK(Code::ExtractCacheHolderFromFlags(flags) == 0); + + return flags; +} + + +Code* StubCache::Set(Name* name, Map* map, Code* code) { + Code::Flags flags = CommonStubCacheChecks(name, map, code->flags()); + + // Compute the primary entry. + int primary_offset = PrimaryOffset(name, flags, map); + Entry* primary = entry(primary_, primary_offset); + Code* old_code = primary->value; + + // If the primary entry has useful data in it, we retire it to the + // secondary cache before overwriting it. + if (old_code != isolate_->builtins()->builtin(Builtins::kIllegal)) { + Map* old_map = primary->map; + Code::Flags old_flags = + Code::RemoveTypeAndHolderFromFlags(old_code->flags()); + int seed = PrimaryOffset(primary->key, old_flags, old_map); + int secondary_offset = SecondaryOffset(primary->key, old_flags, seed); + Entry* secondary = entry(secondary_, secondary_offset); + *secondary = *primary; + } + + // Update primary cache. + primary->key = name; + primary->value = code; + primary->map = map; + isolate()->counters()->megamorphic_stub_cache_updates()->Increment(); + return code; +} + + +Code* StubCache::Get(Name* name, Map* map, Code::Flags flags) { + flags = CommonStubCacheChecks(name, map, flags); + int primary_offset = PrimaryOffset(name, flags, map); + Entry* primary = entry(primary_, primary_offset); + if (primary->key == name && primary->map == map) { + return primary->value; + } + int secondary_offset = SecondaryOffset(name, flags, primary_offset); + Entry* secondary = entry(secondary_, secondary_offset); + if (secondary->key == name && secondary->map == map) { + return secondary->value; + } + return NULL; +} + + +void StubCache::Clear() { + Code* empty = isolate_->builtins()->builtin(Builtins::kIllegal); + for (int i = 0; i < kPrimaryTableSize; i++) { + primary_[i].key = isolate()->heap()->empty_string(); + primary_[i].map = NULL; + primary_[i].value = empty; + } + for (int j = 0; j < kSecondaryTableSize; j++) { + secondary_[j].key = isolate()->heap()->empty_string(); + secondary_[j].map = NULL; + secondary_[j].value = empty; + } +} + + +void StubCache::CollectMatchingMaps(SmallMapList* types, Handle name, + Code::Flags flags, + Handle native_context, + Zone* zone) { + for (int i = 0; i < kPrimaryTableSize; i++) { + if (primary_[i].key == *name) { + Map* map = primary_[i].map; + // Map can be NULL, if the stub is constant function call + // with a primitive receiver. + if (map == NULL) continue; + + int offset = PrimaryOffset(*name, flags, map); + if (entry(primary_, offset) == &primary_[i] && + !TypeFeedbackOracle::CanRetainOtherContext(map, *native_context)) { + types->AddMapIfMissing(Handle(map), zone); + } + } + } + + for (int i = 0; i < kSecondaryTableSize; i++) { + if (secondary_[i].key == *name) { + Map* map = secondary_[i].map; + // Map can be NULL, if the stub is constant function call + // with a primitive receiver. + if (map == NULL) continue; + + // Lookup in primary table and skip duplicates. + int primary_offset = PrimaryOffset(*name, flags, map); + + // Lookup in secondary table and add matches. + int offset = SecondaryOffset(*name, flags, primary_offset); + if (entry(secondary_, offset) == &secondary_[i] && + !TypeFeedbackOracle::CanRetainOtherContext(map, *native_context)) { + types->AddMapIfMissing(Handle(map), zone); + } + } + } +} +} +} // namespace v8::internal diff --git a/src/ic/stub-cache.h b/src/ic/stub-cache.h new file mode 100644 index 000000000..eb5343f85 --- /dev/null +++ b/src/ic/stub-cache.h @@ -0,0 +1,168 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_STUB_CACHE_H_ +#define V8_STUB_CACHE_H_ + +#include "src/macro-assembler.h" + +namespace v8 { +namespace internal { + + +// The stub cache is used for megamorphic property accesses. +// It maps (map, name, type) to property access handlers. The cache does not +// need explicit invalidation when a prototype chain is modified, since the +// handlers verify the chain. + + +class SCTableReference { + public: + Address address() const { return address_; } + + private: + explicit SCTableReference(Address address) : address_(address) {} + + Address address_; + + friend class StubCache; +}; + + +class StubCache { + public: + struct Entry { + Name* key; + Code* value; + Map* map; + }; + + void Initialize(); + // Access cache for entry hash(name, map). + Code* Set(Name* name, Map* map, Code* code); + Code* Get(Name* name, Map* map, Code::Flags flags); + // Clear the lookup table (@ mark compact collection). + void Clear(); + // Collect all maps that match the name and flags. + void CollectMatchingMaps(SmallMapList* types, Handle name, + Code::Flags flags, Handle native_context, + Zone* zone); + // Generate code for probing the stub cache table. + // Arguments extra, extra2 and extra3 may be used to pass additional scratch + // registers. Set to no_reg if not needed. + void GenerateProbe(MacroAssembler* masm, Code::Flags flags, Register receiver, + Register name, Register scratch, Register extra, + Register extra2 = no_reg, Register extra3 = no_reg); + + enum Table { kPrimary, kSecondary }; + + SCTableReference key_reference(StubCache::Table table) { + return SCTableReference( + reinterpret_cast
(&first_entry(table)->key)); + } + + SCTableReference map_reference(StubCache::Table table) { + return SCTableReference( + reinterpret_cast
(&first_entry(table)->map)); + } + + SCTableReference value_reference(StubCache::Table table) { + return SCTableReference( + reinterpret_cast
(&first_entry(table)->value)); + } + + StubCache::Entry* first_entry(StubCache::Table table) { + switch (table) { + case StubCache::kPrimary: + return StubCache::primary_; + case StubCache::kSecondary: + return StubCache::secondary_; + } + UNREACHABLE(); + return NULL; + } + + Isolate* isolate() { return isolate_; } + + // Setting the entry size such that the index is shifted by Name::kHashShift + // is convenient; shifting down the length field (to extract the hash code) + // automatically discards the hash bit field. + static const int kCacheIndexShift = Name::kHashShift; + + private: + explicit StubCache(Isolate* isolate); + + // The stub cache has a primary and secondary level. The two levels have + // different hashing algorithms in order to avoid simultaneous collisions + // in both caches. Unlike a probing strategy (quadratic or otherwise) the + // update strategy on updates is fairly clear and simple: Any existing entry + // in the primary cache is moved to the secondary cache, and secondary cache + // entries are overwritten. + + // Hash algorithm for the primary table. This algorithm is replicated in + // assembler for every architecture. Returns an index into the table that + // is scaled by 1 << kCacheIndexShift. + static int PrimaryOffset(Name* name, Code::Flags flags, Map* map) { + STATIC_ASSERT(kCacheIndexShift == Name::kHashShift); + // Compute the hash of the name (use entire hash field). + DCHECK(name->HasHashCode()); + uint32_t field = name->hash_field(); + // Using only the low bits in 64-bit mode is unlikely to increase the + // risk of collision even if the heap is spread over an area larger than + // 4Gb (and not at all if it isn't). + uint32_t map_low32bits = + static_cast(reinterpret_cast(map)); + // We always set the in_loop bit to zero when generating the lookup code + // so do it here too so the hash codes match. + uint32_t iflags = + (static_cast(flags) & ~Code::kFlagsNotUsedInLookup); + // Base the offset on a simple combination of name, flags, and map. + uint32_t key = (map_low32bits + field) ^ iflags; + return key & ((kPrimaryTableSize - 1) << kCacheIndexShift); + } + + // Hash algorithm for the secondary table. This algorithm is replicated in + // assembler for every architecture. Returns an index into the table that + // is scaled by 1 << kCacheIndexShift. + static int SecondaryOffset(Name* name, Code::Flags flags, int seed) { + // Use the seed from the primary cache in the secondary cache. + uint32_t name_low32bits = + static_cast(reinterpret_cast(name)); + // We always set the in_loop bit to zero when generating the lookup code + // so do it here too so the hash codes match. + uint32_t iflags = + (static_cast(flags) & ~Code::kFlagsNotUsedInLookup); + uint32_t key = (seed - name_low32bits) + iflags; + return key & ((kSecondaryTableSize - 1) << kCacheIndexShift); + } + + // Compute the entry for a given offset in exactly the same way as + // we do in generated code. We generate an hash code that already + // ends in Name::kHashShift 0s. Then we multiply it so it is a multiple + // of sizeof(Entry). This makes it easier to avoid making mistakes + // in the hashed offset computations. + static Entry* entry(Entry* table, int offset) { + const int multiplier = sizeof(*table) >> Name::kHashShift; + return reinterpret_cast(reinterpret_cast
(table) + + offset * multiplier); + } + + static const int kPrimaryTableBits = 11; + static const int kPrimaryTableSize = (1 << kPrimaryTableBits); + static const int kSecondaryTableBits = 9; + static const int kSecondaryTableSize = (1 << kSecondaryTableBits); + + Entry primary_[kPrimaryTableSize]; + Entry secondary_[kSecondaryTableSize]; + Isolate* isolate_; + + friend class Isolate; + friend class SCTableReference; + + DISALLOW_COPY_AND_ASSIGN(StubCache); +}; +} +} // namespace v8::internal + +#endif // V8_STUB_CACHE_H_ diff --git a/src/x64/stub-cache-x64.cc b/src/ic/x64/ic-compiler-x64.cc similarity index 79% rename from src/x64/stub-cache-x64.cc rename to src/ic/x64/ic-compiler-x64.cc index 3619a9b09..5e6f22546 100644 --- a/src/x64/stub-cache-x64.cc +++ b/src/ic/x64/ic-compiler-x64.cc @@ -1,4 +1,4 @@ -// Copyright 2012 the V8 project authors. All rights reserved. +// Copyright 2014 the V8 project authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. @@ -6,10 +6,7 @@ #if V8_TARGET_ARCH_X64 -#include "src/arguments.h" -#include "src/codegen.h" -#include "src/ic-inl.h" -#include "src/stub-cache.h" +#include "src/ic/ic-compiler.h" namespace v8 { namespace internal { @@ -17,75 +14,6 @@ namespace internal { #define __ ACCESS_MASM(masm) -static void ProbeTable(Isolate* isolate, - MacroAssembler* masm, - Code::Flags flags, - StubCache::Table table, - Register receiver, - Register name, - // The offset is scaled by 4, based on - // kCacheIndexShift, which is two bits - Register offset) { - // We need to scale up the pointer by 2 when the offset is scaled by less - // than the pointer size. - DCHECK(kPointerSize == kInt64Size - ? kPointerSizeLog2 == StubCache::kCacheIndexShift + 1 - : kPointerSizeLog2 == StubCache::kCacheIndexShift); - ScaleFactor scale_factor = kPointerSize == kInt64Size ? times_2 : times_1; - - DCHECK_EQ(3 * kPointerSize, sizeof(StubCache::Entry)); - // The offset register holds the entry offset times four (due to masking - // and shifting optimizations). - ExternalReference key_offset(isolate->stub_cache()->key_reference(table)); - ExternalReference value_offset(isolate->stub_cache()->value_reference(table)); - Label miss; - - // Multiply by 3 because there are 3 fields per entry (name, code, map). - __ leap(offset, Operand(offset, offset, times_2, 0)); - - __ LoadAddress(kScratchRegister, key_offset); - - // Check that the key in the entry matches the name. - // Multiply entry offset by 16 to get the entry address. Since the - // offset register already holds the entry offset times four, multiply - // by a further four. - __ cmpl(name, Operand(kScratchRegister, offset, scale_factor, 0)); - __ j(not_equal, &miss); - - // Get the map entry from the cache. - // Use key_offset + kPointerSize * 2, rather than loading map_offset. - __ movp(kScratchRegister, - Operand(kScratchRegister, offset, scale_factor, kPointerSize * 2)); - __ cmpp(kScratchRegister, FieldOperand(receiver, HeapObject::kMapOffset)); - __ j(not_equal, &miss); - - // Get the code entry from the cache. - __ LoadAddress(kScratchRegister, value_offset); - __ movp(kScratchRegister, - Operand(kScratchRegister, offset, scale_factor, 0)); - - // Check that the flags match what we're looking for. - __ movl(offset, FieldOperand(kScratchRegister, Code::kFlagsOffset)); - __ andp(offset, Immediate(~Code::kFlagsNotUsedInLookup)); - __ cmpl(offset, Immediate(flags)); - __ j(not_equal, &miss); - -#ifdef DEBUG - if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) { - __ jmp(&miss); - } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) { - __ jmp(&miss); - } -#endif - - // Jump to the first instruction in the code stub. - __ addp(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag)); - __ jmp(kScratchRegister); - - __ bind(&miss); -} - - void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup( MacroAssembler* masm, Label* miss_label, Register receiver, Handle name, Register scratch0, Register scratch1) { @@ -119,83 +47,13 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup( __ j(not_equal, miss_label); Label done; - NameDictionaryLookupStub::GenerateNegativeLookup(masm, - miss_label, - &done, - properties, - name, - scratch1); + NameDictionaryLookupStub::GenerateNegativeLookup(masm, miss_label, &done, + properties, name, scratch1); __ bind(&done); __ DecrementCounter(counters->negative_lookups_miss(), 1); } -void StubCache::GenerateProbe(MacroAssembler* masm, - Code::Flags flags, - Register receiver, - Register name, - Register scratch, - Register extra, - Register extra2, - Register extra3) { - Isolate* isolate = masm->isolate(); - Label miss; - USE(extra); // The register extra is not used on the X64 platform. - USE(extra2); // The register extra2 is not used on the X64 platform. - USE(extra3); // The register extra2 is not used on the X64 platform. - // Make sure that code is valid. The multiplying code relies on the - // entry size being 3 * kPointerSize. - DCHECK(sizeof(Entry) == 3 * kPointerSize); - - // Make sure the flags do not name a specific type. - DCHECK(Code::ExtractTypeFromFlags(flags) == 0); - - // Make sure that there are no register conflicts. - DCHECK(!scratch.is(receiver)); - DCHECK(!scratch.is(name)); - - // Check scratch register is valid, extra and extra2 are unused. - DCHECK(!scratch.is(no_reg)); - DCHECK(extra2.is(no_reg)); - DCHECK(extra3.is(no_reg)); - - Counters* counters = masm->isolate()->counters(); - __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1); - - // Check that the receiver isn't a smi. - __ JumpIfSmi(receiver, &miss); - - // Get the map of the receiver and compute the hash. - __ movl(scratch, FieldOperand(name, Name::kHashFieldOffset)); - // Use only the low 32 bits of the map pointer. - __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset)); - __ xorp(scratch, Immediate(flags)); - // We mask out the last two bits because they are not part of the hash and - // they are always 01 for maps. Also in the two 'and' instructions below. - __ andp(scratch, Immediate((kPrimaryTableSize - 1) << kCacheIndexShift)); - - // Probe the primary table. - ProbeTable(isolate, masm, flags, kPrimary, receiver, name, scratch); - - // Primary miss: Compute hash for secondary probe. - __ movl(scratch, FieldOperand(name, Name::kHashFieldOffset)); - __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset)); - __ xorp(scratch, Immediate(flags)); - __ andp(scratch, Immediate((kPrimaryTableSize - 1) << kCacheIndexShift)); - __ subl(scratch, name); - __ addl(scratch, Immediate(flags)); - __ andp(scratch, Immediate((kSecondaryTableSize - 1) << kCacheIndexShift)); - - // Probe the secondary table. - ProbeTable(isolate, masm, flags, kSecondary, receiver, name, scratch); - - // Cache miss: Fall-through and let caller handle the miss by - // entering the runtime system. - __ bind(&miss); - __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1); -} - - void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype( MacroAssembler* masm, int index, Register prototype, Label* miss) { Isolate* isolate = masm->isolate(); @@ -227,10 +85,8 @@ void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype( } -static void PushInterceptorArguments(MacroAssembler* masm, - Register receiver, - Register holder, - Register name, +static void PushInterceptorArguments(MacroAssembler* masm, Register receiver, + Register holder, Register name, Handle holder_obj) { STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0); STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex == 1); @@ -248,12 +104,8 @@ static void PushInterceptorArguments(MacroAssembler* masm, static void CompileCallLoadPropertyWithInterceptor( - MacroAssembler* masm, - Register receiver, - Register holder, - Register name, - Handle holder_obj, - IC::UtilityId id) { + MacroAssembler* masm, Register receiver, Register holder, Register name, + Handle holder_obj, IC::UtilityId id) { PushInterceptorArguments(masm, receiver, holder, name, holder_obj); __ CallExternalReference(ExternalReference(IC_Utility(id), masm->isolate()), NamedLoadHandlerCompiler::kInterceptorArgsLength); @@ -272,7 +124,7 @@ void PropertyHandlerCompiler::GenerateFastApiCall( __ Push(receiver); // Write the arguments to stack frame. for (int i = 0; i < argc; i++) { - Register arg = values[argc-1-i]; + Register arg = values[argc - 1 - i]; DCHECK(!receiver.is(arg)); DCHECK(!scratch_in.is(arg)); __ Push(arg); @@ -289,16 +141,15 @@ void PropertyHandlerCompiler::GenerateFastApiCall( // Put holder in place. CallOptimization::HolderLookup holder_lookup; - Handle api_holder = optimization.LookupHolderOfExpectedType( - receiver_map, - &holder_lookup); + Handle api_holder = + optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup); switch (holder_lookup) { case CallOptimization::kHolderIsReceiver: __ Move(holder, receiver); break; case CallOptimization::kHolderFound: __ Move(holder, api_holder); - break; + break; case CallOptimization::kHolderNotFound: UNREACHABLE(); break; @@ -326,8 +177,8 @@ void PropertyHandlerCompiler::GenerateFastApiCall( // Put api_function_address in place. Address function_address = v8::ToCData
(api_call_info->callback()); - __ Move( - api_function_address, function_address, RelocInfo::EXTERNAL_REFERENCE); + __ Move(api_function_address, function_address, + RelocInfo::EXTERNAL_REFERENCE); // Jump to stub. CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc); @@ -338,8 +189,7 @@ void PropertyHandlerCompiler::GenerateFastApiCall( void PropertyHandlerCompiler::GenerateCheckPropertyCell( MacroAssembler* masm, Handle global, Handle name, Register scratch, Label* miss) { - Handle cell = - JSGlobalObject::EnsurePropertyCell(global, name); + Handle cell = JSGlobalObject::EnsurePropertyCell(global, name); DCHECK(cell->value()->IsTheHole()); __ Move(scratch, cell); __ Cmp(FieldOperand(scratch, Cell::kValueOffset), @@ -445,13 +295,8 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition( __ movp(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1); // Update the write barrier for the map field. - __ RecordWriteField(receiver_reg, - HeapObject::kMapOffset, - scratch1, - scratch2, - kDontSaveFPRegs, - OMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); + __ RecordWriteField(receiver_reg, HeapObject::kMapOffset, scratch1, scratch2, + kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK); if (details.type() == CONSTANT) { DCHECK(value_reg.is(rax)); @@ -468,8 +313,8 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition( index -= transition->inobject_properties(); // TODO(verwaest): Share this code as a code stub. - SmiCheck smi_check = representation.IsTagged() - ? INLINE_SMI_CHECK : OMIT_SMI_CHECK; + SmiCheck smi_check = + representation.IsTagged() ? INLINE_SMI_CHECK : OMIT_SMI_CHECK; if (index < 0) { // Set the property straight into the object. int offset = transition->instance_size() + (index * kPointerSize); @@ -484,9 +329,8 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition( if (!representation.IsDouble()) { __ movp(storage_reg, value_reg); } - __ RecordWriteField( - receiver_reg, offset, storage_reg, scratch1, kDontSaveFPRegs, - EMIT_REMEMBERED_SET, smi_check); + __ RecordWriteField(receiver_reg, offset, storage_reg, scratch1, + kDontSaveFPRegs, EMIT_REMEMBERED_SET, smi_check); } } else { // Write to the properties array. @@ -504,9 +348,8 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition( if (!representation.IsDouble()) { __ movp(storage_reg, value_reg); } - __ RecordWriteField( - scratch1, offset, storage_reg, receiver_reg, kDontSaveFPRegs, - EMIT_REMEMBERED_SET, smi_check); + __ RecordWriteField(scratch1, offset, storage_reg, receiver_reg, + kDontSaveFPRegs, EMIT_REMEMBERED_SET, smi_check); } } @@ -548,8 +391,8 @@ Register PropertyHandlerCompiler::CheckPrototypes( // Make sure there's no overlap between holder and object registers. DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg)); - DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) - && !scratch2.is(scratch1)); + DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) && + !scratch2.is(scratch1)); // Keep track of the current object in register reg. On the first // iteration, reg is an alias for object_reg, on later iterations, @@ -584,10 +427,10 @@ Register PropertyHandlerCompiler::CheckPrototypes( } DCHECK(current.is_null() || current->property_dictionary()->FindEntry(name) == - NameDictionary::kNotFound); + NameDictionary::kNotFound); - GenerateDictionaryNegativeLookup(masm(), miss, reg, name, - scratch1, scratch2); + GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1, + scratch2); __ movp(scratch1, FieldOperand(reg, HeapObject::kMapOffset)); reg = holder_reg; // From now on the object will be in holder_reg. @@ -616,9 +459,8 @@ Register PropertyHandlerCompiler::CheckPrototypes( if (current_map->IsJSGlobalProxyMap()) { __ CheckAccessGlobalProxy(reg, scratch2, miss); } else if (current_map->IsJSGlobalObjectMap()) { - GenerateCheckPropertyCell( - masm(), Handle::cast(current), name, - scratch2, miss); + GenerateCheckPropertyCell(masm(), Handle::cast(current), + name, scratch2, miss); } reg = holder_reg; // From now on the object will be in holder_reg. @@ -703,7 +545,7 @@ void NamedLoadHandlerCompiler::GenerateLoadCallback( __ Push(kScratchRegister); // return value __ Push(kScratchRegister); // return value default __ PushAddress(ExternalReference::isolate_address(isolate())); - __ Push(reg); // holder + __ Push(reg); // holder __ Push(name()); // name // Save a pointer to where we pushed the arguments pointer. This will be // passed as the const PropertyAccessorInfo& to the C++ callback. @@ -854,8 +696,8 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter( __ Push(value()); ParameterCount actual(1); ParameterCount expected(setter); - __ InvokeFunction(setter, expected, actual, - CALL_FUNCTION, NullCallWrapper()); + __ InvokeFunction(setter, expected, actual, CALL_FUNCTION, + NullCallWrapper()); } else { // If we generate a global code snippet for deoptimization only, remember // the place to continue after deoptimization. @@ -910,8 +752,7 @@ Handle PropertyICCompiler::CompileKeyedStorePolymorphic( } else { Label next_map; __ j(not_equal, &next_map, Label::kNear); - __ Move(transition_map(), - transitioned_maps->at(i), + __ Move(transition_map(), transitioned_maps->at(i), RelocInfo::EMBEDDED_OBJECT); __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET); __ bind(&next_map); @@ -931,7 +772,7 @@ Register* PropertyAccessCompiler::load_calling_convention() { // receiver, name, scratch1, scratch2, scratch3, scratch4. Register receiver = LoadIC::ReceiverRegister(); Register name = LoadIC::NameRegister(); - static Register registers[] = { receiver, name, rax, rbx, rdi, r8 }; + static Register registers[] = {receiver, name, rax, rbx, rdi, r8}; return registers; } @@ -941,7 +782,7 @@ Register* PropertyAccessCompiler::store_calling_convention() { Register receiver = KeyedStoreIC::ReceiverRegister(); Register name = KeyedStoreIC::NameRegister(); DCHECK(rbx.is(KeyedStoreIC::MapRegister())); - static Register registers[] = { receiver, name, rbx, rdi, r8 }; + static Register registers[] = {receiver, name, rbx, rdi, r8}; return registers; } @@ -974,8 +815,8 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter( __ Push(receiver); ParameterCount actual(0); ParameterCount expected(getter); - __ InvokeFunction(getter, expected, actual, - CALL_FUNCTION, NullCallWrapper()); + __ InvokeFunction(getter, expected, actual, CALL_FUNCTION, + NullCallWrapper()); } else { // If we generate a global code snippet for deoptimization only, remember // the place to continue after deoptimization. @@ -1069,7 +910,7 @@ Handle PropertyICCompiler::CompilePolymorphic(TypeHandleList* types, } DCHECK(number_of_handled_maps > 0); - __ bind(&miss); + __ bind(&miss); TailCallBuiltin(masm(), MissBuiltin(kind())); // Return the generated code. @@ -1128,7 +969,7 @@ void ElementHandlerCompiler::GenerateLoadDictionaryElement( #undef __ - -} } // namespace v8::internal +} +} // namespace v8::internal #endif // V8_TARGET_ARCH_X64 diff --git a/src/x64/ic-x64.cc b/src/ic/x64/ic-x64.cc similarity index 81% rename from src/x64/ic-x64.cc rename to src/ic/x64/ic-x64.cc index 69e14135b..24053df41 100644 --- a/src/x64/ic-x64.cc +++ b/src/ic/x64/ic-x64.cc @@ -7,9 +7,8 @@ #if V8_TARGET_ARCH_X64 #include "src/codegen.h" -#include "src/ic-inl.h" -#include "src/runtime.h" -#include "src/stub-cache.h" +#include "src/ic/ic.h" +#include "src/ic/stub-cache.h" namespace v8 { namespace internal { @@ -21,8 +20,7 @@ namespace internal { #define __ ACCESS_MASM(masm) -static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, - Register type, +static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type, Label* global_object) { // Register usage: // type: holds the receiver instance type on entry. @@ -42,13 +40,9 @@ static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, // and will jump to the miss_label in that case. // The generated code assumes that the receiver has slow properties, // is not a global object and does not have interceptors. -static void GenerateDictionaryLoad(MacroAssembler* masm, - Label* miss_label, - Register elements, - Register name, - Register r0, - Register r1, - Register result) { +static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label, + Register elements, Register name, + Register r0, Register r1, Register result) { // Register use: // // elements - holds the property dictionary on entry and is unchanged. @@ -64,13 +58,8 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label done; // Probe the dictionary. - NameDictionaryLookupStub::GeneratePositiveLookup(masm, - miss_label, - &done, - elements, - name, - r0, - r1); + NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss_label, &done, + elements, name, r0, r1); // If probing finds an entry in the dictionary, r1 contains the // index into the dictionary. Check that the value is a normal @@ -87,9 +76,8 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, // Get the value at the masked, scaled index. const int kValueOffset = kElementsStartOffset + kPointerSize; - __ movp(result, - Operand(elements, r1, times_pointer_size, - kValueOffset - kHeapObjectTag)); + __ movp(result, Operand(elements, r1, times_pointer_size, + kValueOffset - kHeapObjectTag)); } @@ -100,12 +88,9 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, // call if name is not an internalized string, and will jump to the miss_label // in that case. The generated code assumes that the receiver has slow // properties, is not a global object and does not have interceptors. -static void GenerateDictionaryStore(MacroAssembler* masm, - Label* miss_label, - Register elements, - Register name, - Register value, - Register scratch0, +static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss_label, + Register elements, Register name, + Register value, Register scratch0, Register scratch1) { // Register use: // @@ -121,13 +106,8 @@ static void GenerateDictionaryStore(MacroAssembler* masm, Label done; // Probe the dictionary. - NameDictionaryLookupStub::GeneratePositiveLookup(masm, - miss_label, - &done, - elements, - name, - scratch0, - scratch1); + NameDictionaryLookupStub::GeneratePositiveLookup( + masm, miss_label, &done, elements, name, scratch0, scratch1); // If probing finds an entry in the dictionary, scratch0 contains the // index into the dictionary. Check that the value is a normal @@ -139,20 +119,17 @@ static void GenerateDictionaryStore(MacroAssembler* masm, const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; const int kTypeAndReadOnlyMask = (PropertyDetails::TypeField::kMask | - PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize; - __ Test(Operand(elements, - scratch1, - times_pointer_size, + PropertyDetails::AttributesField::encode(READ_ONLY)) + << kSmiTagSize; + __ Test(Operand(elements, scratch1, times_pointer_size, kDetailsOffset - kHeapObjectTag), Smi::FromInt(kTypeAndReadOnlyMask)); __ j(not_zero, miss_label); // Store the value at the masked, scaled index. const int kValueOffset = kElementsStartOffset + kPointerSize; - __ leap(scratch1, Operand(elements, - scratch1, - times_pointer_size, - kValueOffset - kHeapObjectTag)); + __ leap(scratch1, Operand(elements, scratch1, times_pointer_size, + kValueOffset - kHeapObjectTag)); __ movp(Operand(scratch1, 0), value); // Update write barrier. Make sure not to clobber the value. @@ -164,10 +141,8 @@ static void GenerateDictionaryStore(MacroAssembler* masm, // Checks the receiver for special cases (value type, slow case bits). // Falls through for regular JS object. static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm, - Register receiver, - Register map, - int interceptor_bit, - Label* slow) { + Register receiver, Register map, + int interceptor_bit, Label* slow) { // Register use: // receiver - holds the receiver and is unchanged. // Scratch registers: @@ -185,23 +160,19 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm, __ j(below, slow); // Check bit field. - __ testb(FieldOperand(map, Map::kBitFieldOffset), - Immediate((1 << Map::kIsAccessCheckNeeded) | - (1 << interceptor_bit))); + __ testb( + FieldOperand(map, Map::kBitFieldOffset), + Immediate((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit))); __ j(not_zero, slow); } // Loads an indexed element from a fast case array. // If not_fast_array is NULL, doesn't perform the elements map check. -static void GenerateFastArrayLoad(MacroAssembler* masm, - Register receiver, - Register key, - Register elements, - Register scratch, - Register result, - Label* not_fast_array, - Label* out_of_range) { +static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver, + Register key, Register elements, + Register scratch, Register result, + Label* not_fast_array, Label* out_of_range) { // Register use: // // receiver - holds the receiver on entry. @@ -236,9 +207,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, __ j(above_equal, out_of_range); // Fast case: Do the load. SmiIndex index = masm->SmiToIndex(scratch, key, kPointerSizeLog2); - __ movp(scratch, FieldOperand(elements, - index.reg, - index.scale, + __ movp(scratch, FieldOperand(elements, index.reg, index.scale, FixedArray::kHeaderSize)); __ CompareRoot(scratch, Heap::kTheHoleValueRootIndex); // In case the loaded value is the_hole we have to consult GetProperty @@ -252,12 +221,9 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, // Checks whether a key is an array index string or a unique name. // Falls through if the key is a unique name. -static void GenerateKeyNameCheck(MacroAssembler* masm, - Register key, - Register map, - Register hash, - Label* index_string, - Label* not_unique) { +static void GenerateKeyNameCheck(MacroAssembler* masm, Register key, + Register map, Register hash, + Label* index_string, Label* not_unique) { // Register use: // key - holds the key and is unchanged. Assumed to be non-smi. // Scratch registers: @@ -285,7 +251,6 @@ static void GenerateKeyNameCheck(MacroAssembler* masm, } - void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { // The return address is on the stack. Label slow, check_name, index_smi, index_name, property_array_property; @@ -302,20 +267,13 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { // Now the key is known to be a smi. This place is also jumped to from below // where a numeric string is converted to a smi. - GenerateKeyedLoadReceiverCheck( - masm, receiver, rax, Map::kHasIndexedInterceptor, &slow); + GenerateKeyedLoadReceiverCheck(masm, receiver, rax, + Map::kHasIndexedInterceptor, &slow); // Check the receiver's map to see if it has fast elements. __ CheckFastElements(rax, &check_number_dictionary); - GenerateFastArrayLoad(masm, - receiver, - key, - rax, - rbx, - rax, - NULL, - &slow); + GenerateFastArrayLoad(masm, receiver, key, rax, rbx, rax, NULL, &slow); Counters* counters = masm->isolate()->counters(); __ IncrementCounter(counters->keyed_load_generic_smi(), 1); __ ret(0); @@ -341,8 +299,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { __ bind(&check_name); GenerateKeyNameCheck(masm, key, rax, rbx, &index_name, &slow); - GenerateKeyedLoadReceiverCheck( - masm, receiver, rax, Map::kHasNamedInterceptor, &slow); + GenerateKeyedLoadReceiverCheck(masm, receiver, rax, Map::kHasNamedInterceptor, + &slow); // If the receiver is a fast-case object, check the keyed lookup // cache. Otherwise probe the dictionary leaving result in key. @@ -367,8 +325,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { Label load_in_object_property; static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket; Label hit_on_nth_entry[kEntriesPerBucket]; - ExternalReference cache_keys - = ExternalReference::keyed_lookup_cache_keys(masm->isolate()); + ExternalReference cache_keys = + ExternalReference::keyed_lookup_cache_keys(masm->isolate()); for (int i = 0; i < kEntriesPerBucket - 1; i++) { Label try_next_entry; @@ -390,8 +348,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { __ j(not_equal, &slow); // Get field offset, which is a 32-bit integer. - ExternalReference cache_field_offsets - = ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate()); + ExternalReference cache_field_offsets = + ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate()); // Hit on nth entry. for (int i = kEntriesPerBucket - 1; i >= 0; i--) { @@ -420,8 +378,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { // Load property array property. __ bind(&property_array_property); __ movp(rax, FieldOperand(receiver, JSObject::kPropertiesOffset)); - __ movp(rax, FieldOperand(rax, rdi, times_pointer_size, - FixedArray::kHeaderSize)); + __ movp(rax, + FieldOperand(rax, rdi, times_pointer_size, FixedArray::kHeaderSize)); __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1); __ ret(0); @@ -454,10 +412,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) { Register result = rax; DCHECK(!scratch.is(receiver) && !scratch.is(index)); - StringCharAtGenerator char_at_generator(receiver, - index, - scratch, - result, + StringCharAtGenerator char_at_generator(receiver, index, scratch, result, &miss, // When not a string. &miss, // When not a number. &miss, // When index out of range. @@ -517,12 +472,8 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { static void KeyedStoreGenerateGenericHelper( - MacroAssembler* masm, - Label* fast_object, - Label* fast_double, - Label* slow, - KeyedStoreCheckMap check_map, - KeyedStoreIncrementLength increment_length) { + MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow, + KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length) { Label transition_smi_elements; Label finish_object_store, non_double_value, transition_double_elements; Label fast_double_without_map_check; @@ -547,10 +498,8 @@ static void KeyedStoreGenerateGenericHelper( // We have to go to the runtime if the current value is the hole because // there may be a callback on the element Label holecheck_passed1; - __ movp(kScratchRegister, FieldOperand(rbx, - key, - times_pointer_size, - FixedArray::kHeaderSize)); + __ movp(kScratchRegister, + FieldOperand(rbx, key, times_pointer_size, FixedArray::kHeaderSize)); __ CompareRoot(kScratchRegister, Heap::kTheHoleValueRootIndex); __ j(not_equal, &holecheck_passed1); __ JumpIfDictionaryInPrototypeChain(receiver, rdi, kScratchRegister, slow); @@ -584,8 +533,8 @@ static void KeyedStoreGenerateGenericHelper( __ movp(FieldOperand(rbx, key, times_pointer_size, FixedArray::kHeaderSize), value); __ movp(rdx, value); // Preserve the value which is returned. - __ RecordWriteArray( - rbx, rdx, key, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); + __ RecordWriteArray(rbx, rdx, key, kDontSaveFPRegs, EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); __ ret(0); __ bind(fast_double); @@ -626,24 +575,18 @@ static void KeyedStoreGenerateGenericHelper( // Value is a double. Transition FAST_SMI_ELEMENTS -> // FAST_DOUBLE_ELEMENTS and complete the store. __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, - FAST_DOUBLE_ELEMENTS, - rbx, - rdi, - slow); - AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, - FAST_DOUBLE_ELEMENTS); - ElementsTransitionGenerator::GenerateSmiToDouble( - masm, receiver, key, value, rbx, mode, slow); + FAST_DOUBLE_ELEMENTS, rbx, rdi, slow); + AllocationSiteMode mode = + AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS); + ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value, + rbx, mode, slow); __ movp(rbx, FieldOperand(receiver, JSObject::kElementsOffset)); __ jmp(&fast_double_without_map_check); __ bind(&non_double_value); // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS - __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, - FAST_ELEMENTS, - rbx, - rdi, - slow); + __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, rbx, + rdi, slow); mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS); ElementsTransitionGenerator::GenerateMapChangeElementsTransition( masm, receiver, key, value, rbx, mode, slow); @@ -655,14 +598,11 @@ static void KeyedStoreGenerateGenericHelper( // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS __ movp(rbx, FieldOperand(receiver, HeapObject::kMapOffset)); - __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, - FAST_ELEMENTS, - rbx, - rdi, - slow); + __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS, + rbx, rdi, slow); mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS); - ElementsTransitionGenerator::GenerateDoubleToObject( - masm, receiver, key, value, rbx, mode, slow); + ElementsTransitionGenerator::GenerateDoubleToObject(masm, receiver, key, + value, rbx, mode, slow); __ movp(rbx, FieldOperand(receiver, JSObject::kElementsOffset)); __ jmp(&finish_object_store); } @@ -746,21 +686,17 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, __ SmiCompareInteger32(FieldOperand(receiver, JSArray::kLengthOffset), key); __ j(below_equal, &extra); - KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double, - &slow, kCheckMap, kDontIncrementLength); + KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double, &slow, + kCheckMap, kDontIncrementLength); KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow, &slow, kDontCheckMap, kIncrementLength); } -static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm, - Register object, - Register key, - Register scratch1, - Register scratch2, - Register scratch3, - Label* unmapped_case, - Label* slow_case) { +static Operand GenerateMappedArgumentsLookup( + MacroAssembler* masm, Register object, Register key, Register scratch1, + Register scratch2, Register scratch3, Label* unmapped_case, + Label* slow_case) { Heap* heap = masm->isolate()->heap(); // Check that the receiver is a JSObject. Because of the elements @@ -790,10 +726,8 @@ static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm, // Load element index and check whether it is the hole. const int kHeaderSize = FixedArray::kHeaderSize + 2 * kPointerSize; __ SmiToInteger64(scratch3, key); - __ movp(scratch2, FieldOperand(scratch1, - scratch3, - times_pointer_size, - kHeaderSize)); + __ movp(scratch2, + FieldOperand(scratch1, scratch3, times_pointer_size, kHeaderSize)); __ CompareRoot(scratch2, Heap::kTheHoleValueRootIndex); __ j(equal, unmapped_case); @@ -802,9 +736,7 @@ static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm, // map in scratch1). __ movp(scratch1, FieldOperand(scratch1, FixedArray::kHeaderSize)); __ SmiToInteger64(scratch3, scratch2); - return FieldOperand(scratch1, - scratch3, - times_pointer_size, + return FieldOperand(scratch1, scratch3, times_pointer_size, Context::kHeaderSize); } @@ -827,9 +759,7 @@ static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm, __ cmpp(key, scratch); __ j(greater_equal, slow_case); __ SmiToInteger64(scratch, key); - return FieldOperand(backing_store, - scratch, - times_pointer_size, + return FieldOperand(backing_store, scratch, times_pointer_size, FixedArray::kHeaderSize); } @@ -842,9 +772,8 @@ void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) { DCHECK(key.is(rcx)); Label slow, notin; - Operand mapped_location = - GenerateMappedArgumentsLookup( - masm, receiver, key, rbx, rax, rdi, ¬in, &slow); + Operand mapped_location = GenerateMappedArgumentsLookup( + masm, receiver, key, rbx, rax, rdi, ¬in, &slow); __ movp(rax, mapped_location); __ Ret(); __ bind(¬in); @@ -875,11 +804,7 @@ void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) { __ movp(mapped_location, value); __ leap(r9, mapped_location); __ movp(r8, value); - __ RecordWrite(rbx, - r9, - r8, - kDontSaveFPRegs, - EMIT_REMEMBERED_SET, + __ RecordWrite(rbx, r9, r8, kDontSaveFPRegs, EMIT_REMEMBERED_SET, INLINE_SMI_CHECK); __ Ret(); __ bind(¬in); @@ -889,11 +814,7 @@ void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) { __ movp(unmapped_location, value); __ leap(r9, unmapped_location); __ movp(r8, value); - __ RecordWrite(rbx, - r9, - r8, - kDontSaveFPRegs, - EMIT_REMEMBERED_SET, + __ RecordWrite(rbx, r9, r8, kDontSaveFPRegs, EMIT_REMEMBERED_SET, INLINE_SMI_CHECK); __ Ret(); __ bind(&slow); @@ -911,8 +832,8 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) { // Probe the stub cache. Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( Code::ComputeHandlerFlags(Code::LOAD_IC)); - masm->isolate()->stub_cache()->GenerateProbe( - masm, flags, receiver, name, rbx, rax); + masm->isolate()->stub_cache()->GenerateProbe(masm, flags, receiver, name, rbx, + rax); GenerateMiss(masm); } @@ -941,9 +862,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) { static const Register LoadIC_TempRegister() { return rbx; } -static const Register KeyedLoadIC_TempRegister() { - return rbx; -} +static const Register KeyedLoadIC_TempRegister() { return rbx; } void LoadIC::GenerateMiss(MacroAssembler* masm) { @@ -954,7 +873,7 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) { __ PopReturnAddressTo(LoadIC_TempRegister()); __ Push(ReceiverRegister()); // receiver - __ Push(NameRegister()); // name + __ Push(NameRegister()); // name __ PushReturnAddressFrom(LoadIC_TempRegister()); // Perform tail call to the entry. @@ -969,7 +888,7 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { __ PopReturnAddressTo(LoadIC_TempRegister()); __ Push(ReceiverRegister()); // receiver - __ Push(NameRegister()); // name + __ Push(NameRegister()); // name __ PushReturnAddressFrom(LoadIC_TempRegister()); // Perform tail call to the entry. @@ -984,7 +903,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) { __ PopReturnAddressTo(KeyedLoadIC_TempRegister()); __ Push(ReceiverRegister()); // receiver - __ Push(NameRegister()); // name + __ Push(NameRegister()); // name __ PushReturnAddressFrom(KeyedLoadIC_TempRegister()); // Perform tail call to the entry. @@ -1016,9 +935,7 @@ const Register StoreIC::NameRegister() { return rcx; } const Register StoreIC::ValueRegister() { return rax; } -const Register KeyedStoreIC::MapRegister() { - return rbx; -} +const Register KeyedStoreIC::MapRegister() { return rbx; } void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { @@ -1026,7 +943,7 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { __ PopReturnAddressTo(KeyedLoadIC_TempRegister()); __ Push(ReceiverRegister()); // receiver - __ Push(NameRegister()); // name + __ Push(NameRegister()); // name __ PushReturnAddressFrom(KeyedLoadIC_TempRegister()); // Perform tail call to the entry. @@ -1040,8 +957,8 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { // Get the receiver from the stack and probe the stub cache. Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( Code::ComputeHandlerFlags(Code::STORE_IC)); - masm->isolate()->stub_cache()->GenerateProbe( - masm, flags, ReceiverRegister(), NameRegister(), rbx, no_reg); + masm->isolate()->stub_cache()->GenerateProbe(masm, flags, ReceiverRegister(), + NameRegister(), rbx, no_reg); // Cache miss: Jump to runtime. GenerateMiss(masm); @@ -1122,7 +1039,7 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm, __ Push(ReceiverRegister()); __ Push(NameRegister()); __ Push(ValueRegister()); - __ Push(Smi::FromInt(strict_mode)); // Strict mode. + __ Push(Smi::FromInt(strict_mode)); // Strict mode. __ PushReturnAddressFrom(rbx); // Do tail-call to runtime routine. @@ -1212,8 +1129,8 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { // condition code uses at the patched jump. uint8_t delta = *reinterpret_cast(delta_address); if (FLAG_trace_ic) { - PrintF("[ patching ic at %p, test=%p, delta=%d\n", - address, test_instruction_address, delta); + PrintF("[ patching ic at %p, test=%p, delta=%d\n", address, + test_instruction_address, delta); } // Patch with a short conditional jump. Enabling means switching from a short @@ -1221,17 +1138,17 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { // reverse operation of that. Address jmp_address = test_instruction_address - delta; DCHECK((check == ENABLE_INLINED_SMI_CHECK) - ? (*jmp_address == Assembler::kJncShortOpcode || - *jmp_address == Assembler::kJcShortOpcode) - : (*jmp_address == Assembler::kJnzShortOpcode || - *jmp_address == Assembler::kJzShortOpcode)); - Condition cc = (check == ENABLE_INLINED_SMI_CHECK) - ? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero) - : (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry); + ? (*jmp_address == Assembler::kJncShortOpcode || + *jmp_address == Assembler::kJcShortOpcode) + : (*jmp_address == Assembler::kJnzShortOpcode || + *jmp_address == Assembler::kJzShortOpcode)); + Condition cc = + (check == ENABLE_INLINED_SMI_CHECK) + ? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero) + : (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry); *jmp_address = static_cast(Assembler::kJccShortPrefix | cc); } - - -} } // namespace v8::internal +} +} // namespace v8::internal #endif // V8_TARGET_ARCH_X64 diff --git a/src/ic/x64/stub-cache-x64.cc b/src/ic/x64/stub-cache-x64.cc new file mode 100644 index 000000000..2f270dcb6 --- /dev/null +++ b/src/ic/x64/stub-cache-x64.cc @@ -0,0 +1,149 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/v8.h" + +#if V8_TARGET_ARCH_X64 + +#include "src/codegen.h" +#include "src/ic/stub-cache.h" + +namespace v8 { +namespace internal { + +#define __ ACCESS_MASM(masm) + + +static void ProbeTable(Isolate* isolate, MacroAssembler* masm, + Code::Flags flags, StubCache::Table table, + Register receiver, Register name, + // The offset is scaled by 4, based on + // kCacheIndexShift, which is two bits + Register offset) { + // We need to scale up the pointer by 2 when the offset is scaled by less + // than the pointer size. + DCHECK(kPointerSize == kInt64Size + ? kPointerSizeLog2 == StubCache::kCacheIndexShift + 1 + : kPointerSizeLog2 == StubCache::kCacheIndexShift); + ScaleFactor scale_factor = kPointerSize == kInt64Size ? times_2 : times_1; + + DCHECK_EQ(3 * kPointerSize, sizeof(StubCache::Entry)); + // The offset register holds the entry offset times four (due to masking + // and shifting optimizations). + ExternalReference key_offset(isolate->stub_cache()->key_reference(table)); + ExternalReference value_offset(isolate->stub_cache()->value_reference(table)); + Label miss; + + // Multiply by 3 because there are 3 fields per entry (name, code, map). + __ leap(offset, Operand(offset, offset, times_2, 0)); + + __ LoadAddress(kScratchRegister, key_offset); + + // Check that the key in the entry matches the name. + // Multiply entry offset by 16 to get the entry address. Since the + // offset register already holds the entry offset times four, multiply + // by a further four. + __ cmpl(name, Operand(kScratchRegister, offset, scale_factor, 0)); + __ j(not_equal, &miss); + + // Get the map entry from the cache. + // Use key_offset + kPointerSize * 2, rather than loading map_offset. + __ movp(kScratchRegister, + Operand(kScratchRegister, offset, scale_factor, kPointerSize * 2)); + __ cmpp(kScratchRegister, FieldOperand(receiver, HeapObject::kMapOffset)); + __ j(not_equal, &miss); + + // Get the code entry from the cache. + __ LoadAddress(kScratchRegister, value_offset); + __ movp(kScratchRegister, Operand(kScratchRegister, offset, scale_factor, 0)); + + // Check that the flags match what we're looking for. + __ movl(offset, FieldOperand(kScratchRegister, Code::kFlagsOffset)); + __ andp(offset, Immediate(~Code::kFlagsNotUsedInLookup)); + __ cmpl(offset, Immediate(flags)); + __ j(not_equal, &miss); + +#ifdef DEBUG + if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) { + __ jmp(&miss); + } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) { + __ jmp(&miss); + } +#endif + + // Jump to the first instruction in the code stub. + __ addp(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag)); + __ jmp(kScratchRegister); + + __ bind(&miss); +} + + +void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags, + Register receiver, Register name, + Register scratch, Register extra, Register extra2, + Register extra3) { + Isolate* isolate = masm->isolate(); + Label miss; + USE(extra); // The register extra is not used on the X64 platform. + USE(extra2); // The register extra2 is not used on the X64 platform. + USE(extra3); // The register extra2 is not used on the X64 platform. + // Make sure that code is valid. The multiplying code relies on the + // entry size being 3 * kPointerSize. + DCHECK(sizeof(Entry) == 3 * kPointerSize); + + // Make sure the flags do not name a specific type. + DCHECK(Code::ExtractTypeFromFlags(flags) == 0); + + // Make sure that there are no register conflicts. + DCHECK(!scratch.is(receiver)); + DCHECK(!scratch.is(name)); + + // Check scratch register is valid, extra and extra2 are unused. + DCHECK(!scratch.is(no_reg)); + DCHECK(extra2.is(no_reg)); + DCHECK(extra3.is(no_reg)); + + Counters* counters = masm->isolate()->counters(); + __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1); + + // Check that the receiver isn't a smi. + __ JumpIfSmi(receiver, &miss); + + // Get the map of the receiver and compute the hash. + __ movl(scratch, FieldOperand(name, Name::kHashFieldOffset)); + // Use only the low 32 bits of the map pointer. + __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset)); + __ xorp(scratch, Immediate(flags)); + // We mask out the last two bits because they are not part of the hash and + // they are always 01 for maps. Also in the two 'and' instructions below. + __ andp(scratch, Immediate((kPrimaryTableSize - 1) << kCacheIndexShift)); + + // Probe the primary table. + ProbeTable(isolate, masm, flags, kPrimary, receiver, name, scratch); + + // Primary miss: Compute hash for secondary probe. + __ movl(scratch, FieldOperand(name, Name::kHashFieldOffset)); + __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset)); + __ xorp(scratch, Immediate(flags)); + __ andp(scratch, Immediate((kPrimaryTableSize - 1) << kCacheIndexShift)); + __ subl(scratch, name); + __ addl(scratch, Immediate(flags)); + __ andp(scratch, Immediate((kSecondaryTableSize - 1) << kCacheIndexShift)); + + // Probe the secondary table. + ProbeTable(isolate, masm, flags, kSecondary, receiver, name, scratch); + + // Cache miss: Fall-through and let caller handle the miss by + // entering the runtime system. + __ bind(&miss); + __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1); +} + + +#undef __ +} +} // namespace v8::internal + +#endif // V8_TARGET_ARCH_X64 diff --git a/src/isolate.cc b/src/isolate.cc index 0c27074fd..fc8d34db9 100644 --- a/src/isolate.cc +++ b/src/isolate.cc @@ -19,6 +19,7 @@ #include "src/heap/sweeper-thread.h" #include "src/heap-profiler.h" #include "src/hydrogen.h" +#include "src/ic/stub-cache.h" #include "src/isolate-inl.h" #include "src/lithium-allocator.h" #include "src/log.h" @@ -30,7 +31,6 @@ #include "src/scopeinfo.h" #include "src/serialize.h" #include "src/simulator.h" -#include "src/stub-cache.h" #include "src/version.h" #include "src/vm-state-inl.h" diff --git a/src/runtime.cc b/src/runtime.cc index f7dd0e80f..2a58b071a 100644 --- a/src/runtime.cc +++ b/src/runtime.cc @@ -40,7 +40,6 @@ #include "src/scopeinfo.h" #include "src/smart-pointers.h" #include "src/string-search.h" -#include "src/stub-cache.h" #include "src/uri.h" #include "src/utils.h" #include "src/v8threads.h" diff --git a/src/serialize.cc b/src/serialize.cc index ea3fa9206..4cb05fad0 100644 --- a/src/serialize.cc +++ b/src/serialize.cc @@ -11,14 +11,14 @@ #include "src/deoptimizer.h" #include "src/execution.h" #include "src/global-handles.h" -#include "src/ic-inl.h" +#include "src/ic/ic.h" +#include "src/ic/stub-cache.h" #include "src/natives.h" #include "src/objects.h" #include "src/runtime.h" #include "src/serialize.h" #include "src/snapshot.h" #include "src/snapshot-source-sink.h" -#include "src/stub-cache.h" #include "src/v8threads.h" #include "src/version.h" diff --git a/src/type-info.cc b/src/type-info.cc index 1d2db7c54..cbf4ffdd5 100644 --- a/src/type-info.cc +++ b/src/type-info.cc @@ -7,12 +7,10 @@ #include "src/ast.h" #include "src/code-stubs.h" #include "src/compiler.h" -#include "src/ic.h" +#include "src/ic/stub-cache.h" #include "src/macro-assembler.h" -#include "src/stub-cache.h" #include "src/type-info.h" -#include "src/ic-inl.h" #include "src/objects-inl.h" namespace v8 { diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc index a18747d50..6516e3f59 100644 --- a/src/x64/builtins-x64.cc +++ b/src/x64/builtins-x64.cc @@ -9,7 +9,6 @@ #include "src/codegen.h" #include "src/deoptimizer.h" #include "src/full-codegen.h" -#include "src/stub-cache.h" namespace v8 { namespace internal { diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc index 5a30ab70a..694c77deb 100644 --- a/src/x64/code-stubs-x64.cc +++ b/src/x64/code-stubs-x64.cc @@ -8,9 +8,12 @@ #include "src/bootstrapper.h" #include "src/code-stubs.h" +#include "src/codegen.h" +#include "src/ic/ic-compiler.h" +#include "src/isolate.h" +#include "src/jsregexp.h" #include "src/regexp-macro-assembler.h" #include "src/runtime.h" -#include "src/stub-cache.h" namespace v8 { namespace internal { diff --git a/src/x64/code-stubs-x64.h b/src/x64/code-stubs-x64.h index 71fc5aba5..a020ce5a4 100644 --- a/src/x64/code-stubs-x64.h +++ b/src/x64/code-stubs-x64.h @@ -5,7 +5,7 @@ #ifndef V8_X64_CODE_STUBS_X64_H_ #define V8_X64_CODE_STUBS_X64_H_ -#include "src/ic-inl.h" +#include "src/code-stubs.h" namespace v8 { namespace internal { diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h index 8bfd7f4c5..0a551eef5 100644 --- a/src/x64/codegen-x64.h +++ b/src/x64/codegen-x64.h @@ -6,7 +6,7 @@ #define V8_X64_CODEGEN_X64_H_ #include "src/ast.h" -#include "src/ic-inl.h" +#include "src/macro-assembler.h" namespace v8 { namespace internal { diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc index 69807096c..7e8b70888 100644 --- a/src/x64/full-codegen-x64.cc +++ b/src/x64/full-codegen-x64.cc @@ -14,7 +14,6 @@ #include "src/isolate-inl.h" #include "src/parser.h" #include "src/scopes.h" -#include "src/stub-cache.h" namespace v8 { namespace internal { diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc index 34674a640..0a6ac17f9 100644 --- a/src/x64/lithium-codegen-x64.cc +++ b/src/x64/lithium-codegen-x64.cc @@ -8,7 +8,6 @@ #include "src/code-stubs.h" #include "src/hydrogen-osr.h" -#include "src/stub-cache.h" #include "src/x64/lithium-codegen-x64.h" namespace v8 { diff --git a/test/cctest/test-debug.cc b/test/cctest/test-debug.cc index 2680f4ea0..433190750 100644 --- a/test/cctest/test-debug.cc +++ b/test/cctest/test-debug.cc @@ -36,7 +36,6 @@ #include "src/debug.h" #include "src/deoptimizer.h" #include "src/frames.h" -#include "src/stub-cache.h" #include "src/utils.h" #include "test/cctest/cctest.h" diff --git a/test/cctest/test-deoptimization.cc b/test/cctest/test-deoptimization.cc index 3127acc6a..262ffc1c3 100644 --- a/test/cctest/test-deoptimization.cc +++ b/test/cctest/test-deoptimization.cc @@ -35,7 +35,6 @@ #include "src/debug.h" #include "src/deoptimizer.h" #include "src/isolate.h" -#include "src/stub-cache.h" #include "test/cctest/cctest.h" using ::v8::base::OS; diff --git a/test/cctest/test-disasm-ia32.cc b/test/cctest/test-disasm-ia32.cc index 8436df7c5..76452ef91 100644 --- a/test/cctest/test-disasm-ia32.cc +++ b/test/cctest/test-disasm-ia32.cc @@ -32,9 +32,9 @@ #include "src/debug.h" #include "src/disasm.h" #include "src/disassembler.h" +#include "src/ic/ic.h" #include "src/macro-assembler.h" #include "src/serialize.h" -#include "src/stub-cache.h" #include "test/cctest/cctest.h" using namespace v8::internal; diff --git a/test/cctest/test-disasm-x64.cc b/test/cctest/test-disasm-x64.cc index 4778b04bb..a842956ae 100644 --- a/test/cctest/test-disasm-x64.cc +++ b/test/cctest/test-disasm-x64.cc @@ -32,9 +32,9 @@ #include "src/debug.h" #include "src/disasm.h" #include "src/disassembler.h" +#include "src/ic/ic.h" #include "src/macro-assembler.h" #include "src/serialize.h" -#include "src/stub-cache.h" #include "test/cctest/cctest.h" using namespace v8::internal; diff --git a/test/cctest/test-heap.cc b/test/cctest/test-heap.cc index ab000dc6a..462979da5 100644 --- a/test/cctest/test-heap.cc +++ b/test/cctest/test-heap.cc @@ -34,8 +34,8 @@ #include "src/execution.h" #include "src/factory.h" #include "src/global-handles.h" +#include "src/ic/ic.h" #include "src/macro-assembler.h" -#include "src/stub-cache.h" #include "test/cctest/cctest.h" using namespace v8::internal; diff --git a/test/cctest/test-serialize.cc b/test/cctest/test-serialize.cc index 9ae90c477..7e46bbd53 100644 --- a/test/cctest/test-serialize.cc +++ b/test/cctest/test-serialize.cc @@ -35,7 +35,6 @@ #include "src/compilation-cache.h" #include "src/debug.h" #include "src/heap/spaces.h" -#include "src/ic-inl.h" #include "src/natives.h" #include "src/objects.h" #include "src/runtime.h" diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp index a52fd9337..c46345ea9 100644 --- a/tools/gyp/v8.gyp +++ b/tools/gyp/v8.gyp @@ -599,9 +599,11 @@ '../../src/i18n.h', '../../src/icu_util.cc', '../../src/icu_util.h', - '../../src/ic-inl.h', - '../../src/ic.cc', - '../../src/ic.h', + '../../src/ic/ic-inl.h', + '../../src/ic/ic.cc', + '../../src/ic/ic.h', + '../../src/ic/ic-compiler.cc', + '../../src/ic/ic-compiler.h', '../../src/interface.cc', '../../src/interface.h', '../../src/interpreter-irregexp.cc', @@ -705,8 +707,8 @@ '../../src/string-stream.h', '../../src/strtod.cc', '../../src/strtod.h', - '../../src/stub-cache.cc', - '../../src/stub-cache.h', + '../../src/ic/stub-cache.cc', + '../../src/ic/stub-cache.h', '../../src/token.cc', '../../src/token.h', '../../src/transitions-inl.h', @@ -772,7 +774,6 @@ '../../src/arm/frames-arm.cc', '../../src/arm/frames-arm.h', '../../src/arm/full-codegen-arm.cc', - '../../src/arm/ic-arm.cc', '../../src/arm/lithium-arm.cc', '../../src/arm/lithium-arm.h', '../../src/arm/lithium-codegen-arm.cc', @@ -784,11 +785,13 @@ '../../src/arm/regexp-macro-assembler-arm.cc', '../../src/arm/regexp-macro-assembler-arm.h', '../../src/arm/simulator-arm.cc', - '../../src/arm/stub-cache-arm.cc', '../../src/compiler/arm/code-generator-arm.cc', '../../src/compiler/arm/instruction-codes-arm.h', '../../src/compiler/arm/instruction-selector-arm.cc', '../../src/compiler/arm/linkage-arm.cc', + '../../src/ic/arm/ic-arm.cc', + '../../src/ic/arm/ic-compiler-arm.cc', + '../../src/ic/arm/stub-cache-arm.cc', ], }], ['v8_target_arch=="arm64"', { @@ -816,7 +819,6 @@ '../../src/arm64/frames-arm64.cc', '../../src/arm64/frames-arm64.h', '../../src/arm64/full-codegen-arm64.cc', - '../../src/arm64/ic-arm64.cc', '../../src/arm64/instructions-arm64.cc', '../../src/arm64/instructions-arm64.h', '../../src/arm64/instrument-arm64.cc', @@ -834,13 +836,15 @@ '../../src/arm64/regexp-macro-assembler-arm64.h', '../../src/arm64/simulator-arm64.cc', '../../src/arm64/simulator-arm64.h', - '../../src/arm64/stub-cache-arm64.cc', '../../src/arm64/utils-arm64.cc', '../../src/arm64/utils-arm64.h', '../../src/compiler/arm64/code-generator-arm64.cc', '../../src/compiler/arm64/instruction-codes-arm64.h', '../../src/compiler/arm64/instruction-selector-arm64.cc', '../../src/compiler/arm64/linkage-arm64.cc', + '../../src/ic/arm64/ic-arm64.cc', + '../../src/ic/arm64/ic-compiler-arm64.cc', + '../../src/ic/arm64/stub-cache-arm64.cc', ], }], ['v8_target_arch=="ia32"', { @@ -860,7 +864,6 @@ '../../src/ia32/frames-ia32.cc', '../../src/ia32/frames-ia32.h', '../../src/ia32/full-codegen-ia32.cc', - '../../src/ia32/ic-ia32.cc', '../../src/ia32/lithium-codegen-ia32.cc', '../../src/ia32/lithium-codegen-ia32.h', '../../src/ia32/lithium-gap-resolver-ia32.cc', @@ -871,11 +874,13 @@ '../../src/ia32/macro-assembler-ia32.h', '../../src/ia32/regexp-macro-assembler-ia32.cc', '../../src/ia32/regexp-macro-assembler-ia32.h', - '../../src/ia32/stub-cache-ia32.cc', '../../src/compiler/ia32/code-generator-ia32.cc', '../../src/compiler/ia32/instruction-codes-ia32.h', '../../src/compiler/ia32/instruction-selector-ia32.cc', '../../src/compiler/ia32/linkage-ia32.cc', + '../../src/ic/ia32/ic-ia32.cc', + '../../src/ic/ia32/ic-compiler-ia32.cc', + '../../src/ic/ia32/stub-cache-ia32.cc', ], }], ['v8_target_arch=="x87"', { @@ -994,7 +999,6 @@ '../../src/x64/frames-x64.cc', '../../src/x64/frames-x64.h', '../../src/x64/full-codegen-x64.cc', - '../../src/x64/ic-x64.cc', '../../src/x64/lithium-codegen-x64.cc', '../../src/x64/lithium-codegen-x64.h', '../../src/x64/lithium-gap-resolver-x64.cc', @@ -1005,11 +1009,13 @@ '../../src/x64/macro-assembler-x64.h', '../../src/x64/regexp-macro-assembler-x64.cc', '../../src/x64/regexp-macro-assembler-x64.h', - '../../src/x64/stub-cache-x64.cc', '../../src/compiler/x64/code-generator-x64.cc', '../../src/compiler/x64/instruction-codes-x64.h', '../../src/compiler/x64/instruction-selector-x64.cc', '../../src/compiler/x64/linkage-x64.cc', + '../../src/ic/x64/ic-x64.cc', + '../../src/ic/x64/ic-compiler-x64.cc', + '../../src/ic/x64/stub-cache-x64.cc', ], }], ['OS=="linux"', { -- 2.34.1