"src/i18n.h",
"src/icu_util.cc",
"src/icu_util.h",
- "src/ic-inl.h",
- "src/ic.cc",
- "src/ic.h",
+ "src/ic/ic-inl.h",
+ "src/ic/ic.cc",
+ "src/ic/ic.h",
+ "src/ic/stub-cache.cc",
+ "src/ic/stub-cache.h",
"src/interface.cc",
"src/interface.h",
"src/interpreter-irregexp.cc",
"src/string-stream.h",
"src/strtod.cc",
"src/strtod.h",
- "src/stub-cache.cc",
- "src/stub-cache.h",
"src/token.cc",
"src/token.h",
"src/transitions-inl.h",
"src/ia32/frames-ia32.cc",
"src/ia32/frames-ia32.h",
"src/ia32/full-codegen-ia32.cc",
- "src/ia32/ic-ia32.cc",
"src/ia32/lithium-codegen-ia32.cc",
"src/ia32/lithium-codegen-ia32.h",
"src/ia32/lithium-gap-resolver-ia32.cc",
"src/ia32/macro-assembler-ia32.h",
"src/ia32/regexp-macro-assembler-ia32.cc",
"src/ia32/regexp-macro-assembler-ia32.h",
- "src/ia32/stub-cache-ia32.cc",
"src/compiler/ia32/code-generator-ia32.cc",
"src/compiler/ia32/instruction-codes-ia32.h",
"src/compiler/ia32/instruction-selector-ia32.cc",
"src/compiler/ia32/linkage-ia32.cc",
+ "src/ic/ia32/ic-ia32.cc",
+ "src/ic/ia32/handler-ia32.cc",
+ "src/ic/ia32/stub-cache-ia32.cc",
]
} else if (v8_target_arch == "x64") {
sources += [
"src/x64/frames-x64.cc",
"src/x64/frames-x64.h",
"src/x64/full-codegen-x64.cc",
- "src/x64/ic-x64.cc",
"src/x64/lithium-codegen-x64.cc",
"src/x64/lithium-codegen-x64.h",
"src/x64/lithium-gap-resolver-x64.cc",
"src/x64/macro-assembler-x64.h",
"src/x64/regexp-macro-assembler-x64.cc",
"src/x64/regexp-macro-assembler-x64.h",
- "src/x64/stub-cache-x64.cc",
"src/compiler/x64/code-generator-x64.cc",
"src/compiler/x64/instruction-codes-x64.h",
"src/compiler/x64/instruction-selector-x64.cc",
"src/compiler/x64/linkage-x64.cc",
+ "src/ic/x64/ic-x64.cc",
+ "src/ic/x64/ic-compiler-x64.cc",
+ "src/ic/x64/stub-cache-x64.cc",
]
} else if (v8_target_arch == "arm") {
sources += [
"src/arm/frames-arm.cc",
"src/arm/frames-arm.h",
"src/arm/full-codegen-arm.cc",
- "src/arm/ic-arm.cc",
"src/arm/lithium-arm.cc",
"src/arm/lithium-arm.h",
"src/arm/lithium-codegen-arm.cc",
"src/arm/regexp-macro-assembler-arm.cc",
"src/arm/regexp-macro-assembler-arm.h",
"src/arm/simulator-arm.cc",
- "src/arm/stub-cache-arm.cc",
"src/compiler/arm/code-generator-arm.cc",
"src/compiler/arm/instruction-codes-arm.h",
"src/compiler/arm/instruction-selector-arm.cc",
"src/compiler/arm/linkage-arm.cc",
+ "src/ic/arm/ic-arm.cc",
+ "src/ic/arm/ic-compiler-arm.cc",
+ "src/ic/arm/stub-cache-arm.cc",
]
} else if (v8_target_arch == "arm64") {
sources += [
"src/arm64/frames-arm64.cc",
"src/arm64/frames-arm64.h",
"src/arm64/full-codegen-arm64.cc",
- "src/arm64/ic-arm64.cc",
"src/arm64/instructions-arm64.cc",
"src/arm64/instructions-arm64.h",
"src/arm64/instrument-arm64.cc",
"src/arm64/regexp-macro-assembler-arm64.h",
"src/arm64/simulator-arm64.cc",
"src/arm64/simulator-arm64.h",
- "src/arm64/stub-cache-arm64.cc",
"src/arm64/utils-arm64.cc",
"src/arm64/utils-arm64.h",
"src/compiler/arm64/code-generator-arm64.cc",
"src/compiler/arm64/instruction-codes-arm64.h",
"src/compiler/arm64/instruction-selector-arm64.cc",
"src/compiler/arm64/linkage-arm64.cc",
+ "src/ic/arm64/ic-arm64.cc",
+ "src/ic/arm64/ic-compiler-arm64.cc",
+ "src/ic/arm64/stub-cache-arm64.cc",
]
} else if (v8_target_arch == "mipsel") {
sources += [
#include "src/deoptimizer.h"
#include "src/full-codegen.h"
#include "src/runtime.h"
-#include "src/stub-cache.h"
namespace v8 {
namespace internal {
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
+#include "src/codegen.h"
+#include "src/ic/ic-compiler.h"
+#include "src/isolate.h"
+#include "src/jsregexp.h"
#include "src/regexp-macro-assembler.h"
-#include "src/stub-cache.h"
+#include "src/runtime.h"
namespace v8 {
namespace internal {
#ifndef V8_ARM_CODE_STUBS_ARM_H_
#define V8_ARM_CODE_STUBS_ARM_H_
-#include "src/ic-inl.h"
+#include "src/code-stubs.h"
namespace v8 {
namespace internal {
#define V8_ARM_CODEGEN_ARM_H_
#include "src/ast.h"
-#include "src/ic-inl.h"
+#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
#include "src/isolate-inl.h"
#include "src/parser.h"
#include "src/scopes.h"
-#include "src/stub-cache.h"
#include "src/arm/code-stubs-arm.h"
#include "src/arm/macro-assembler-arm.h"
+++ /dev/null
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#if V8_TARGET_ARCH_ARM
-
-#include "src/arm/assembler-arm.h"
-#include "src/code-stubs.h"
-#include "src/codegen.h"
-#include "src/disasm.h"
-#include "src/ic-inl.h"
-#include "src/runtime.h"
-#include "src/stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-
-// ----------------------------------------------------------------------------
-// Static IC stub generators.
-//
-
-#define __ ACCESS_MASM(masm)
-
-
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
- Register type,
- Label* global_object) {
- // Register usage:
- // type: holds the receiver instance type on entry.
- __ cmp(type, Operand(JS_GLOBAL_OBJECT_TYPE));
- __ b(eq, global_object);
- __ cmp(type, Operand(JS_BUILTINS_OBJECT_TYPE));
- __ b(eq, global_object);
- __ cmp(type, Operand(JS_GLOBAL_PROXY_TYPE));
- __ b(eq, global_object);
-}
-
-
-// Helper function used from LoadIC GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-// label is done.
-// name: Property name. It is not clobbered if a jump to the miss label is
-// done
-// result: Register for the result. It is only updated if a jump to the miss
-// label is not done. Can be the same as elements or name clobbering
-// one of these in the case of not jumping to the miss label.
-// The two scratch registers need to be different from elements, name and
-// result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm,
- Label* miss,
- Register elements,
- Register name,
- Register result,
- Register scratch1,
- Register scratch2) {
- // Main use of the scratch registers.
- // scratch1: Used as temporary and to hold the capacity of the property
- // dictionary.
- // scratch2: Used as temporary.
- Label done;
-
- // Probe the dictionary.
- NameDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss,
- &done,
- elements,
- name,
- scratch1,
- scratch2);
-
- // If probing finds an entry check that the value is a normal
- // property.
- __ bind(&done); // scratch2 == elements + 4 * index
- const int kElementsStartOffset = NameDictionary::kHeaderSize +
- NameDictionary::kElementsStartIndex * kPointerSize;
- const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
- __ tst(scratch1, Operand(PropertyDetails::TypeField::kMask << kSmiTagSize));
- __ b(ne, miss);
-
- // Get the value at the masked, scaled index and return.
- __ ldr(result,
- FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
-}
-
-
-// Helper function used from StoreIC::GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-// label is done.
-// name: Property name. It is not clobbered if a jump to the miss label is
-// done
-// value: The value to store.
-// The two scratch registers need to be different from elements, name and
-// result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryStore(MacroAssembler* masm,
- Label* miss,
- Register elements,
- Register name,
- Register value,
- Register scratch1,
- Register scratch2) {
- // Main use of the scratch registers.
- // scratch1: Used as temporary and to hold the capacity of the property
- // dictionary.
- // scratch2: Used as temporary.
- Label done;
-
- // Probe the dictionary.
- NameDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss,
- &done,
- elements,
- name,
- scratch1,
- scratch2);
-
- // If probing finds an entry in the dictionary check that the value
- // is a normal property that is not read only.
- __ bind(&done); // scratch2 == elements + 4 * index
- const int kElementsStartOffset = NameDictionary::kHeaderSize +
- NameDictionary::kElementsStartIndex * kPointerSize;
- const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- const int kTypeAndReadOnlyMask =
- (PropertyDetails::TypeField::kMask |
- PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
- __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
- __ tst(scratch1, Operand(kTypeAndReadOnlyMask));
- __ b(ne, miss);
-
- // Store the value at the masked, scaled index and return.
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ add(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
- __ str(value, MemOperand(scratch2));
-
- // Update the write barrier. Make sure not to clobber the value.
- __ mov(scratch1, value);
- __ RecordWrite(
- elements, scratch2, scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs);
-}
-
-
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
- Register receiver,
- Register map,
- Register scratch,
- int interceptor_bit,
- Label* slow) {
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, slow);
- // Get the map of the receiver.
- __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- // Check bit field.
- __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
- __ tst(scratch,
- Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
- __ b(ne, slow);
- // Check that the object is some kind of JS object EXCEPT JS Value type.
- // In the case that the object is a value-wrapper object,
- // we enter the runtime system to make sure that indexing into string
- // objects work as intended.
- DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
- __ ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ cmp(scratch, Operand(JS_OBJECT_TYPE));
- __ b(lt, slow);
-}
-
-
-// Loads an indexed element from a fast case array.
-// If not_fast_array is NULL, doesn't perform the elements map check.
-static void GenerateFastArrayLoad(MacroAssembler* masm,
- Register receiver,
- Register key,
- Register elements,
- Register scratch1,
- Register scratch2,
- Register result,
- Label* not_fast_array,
- Label* out_of_range) {
- // Register use:
- //
- // receiver - holds the receiver on entry.
- // Unchanged unless 'result' is the same register.
- //
- // key - holds the smi key on entry.
- // Unchanged unless 'result' is the same register.
- //
- // elements - holds the elements of the receiver on exit.
- //
- // result - holds the result on exit if the load succeeded.
- // Allowed to be the the same as 'receiver' or 'key'.
- // Unchanged on bailout so 'receiver' and 'key' can be safely
- // used by further computation.
- //
- // Scratch registers:
- //
- // scratch1 - used to hold elements map and elements length.
- // Holds the elements map if not_fast_array branch is taken.
- //
- // scratch2 - used to hold the loaded value.
-
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- if (not_fast_array != NULL) {
- // Check that the object is in fast mode and writable.
- __ ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- __ cmp(scratch1, ip);
- __ b(ne, not_fast_array);
- } else {
- __ AssertFastElements(elements);
- }
- // Check that the key (index) is within bounds.
- __ ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ cmp(key, Operand(scratch1));
- __ b(hs, out_of_range);
- // Fast case: Do the load.
- __ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ ldr(scratch2, MemOperand::PointerAddressFromSmiKey(scratch1, key));
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(scratch2, ip);
- // In case the loaded value is the_hole we have to consult GetProperty
- // to ensure the prototype chain is searched.
- __ b(eq, out_of_range);
- __ mov(result, scratch2);
-}
-
-
-// Checks whether a key is an array index string or a unique name.
-// Falls through if a key is a unique name.
-static void GenerateKeyNameCheck(MacroAssembler* masm,
- Register key,
- Register map,
- Register hash,
- Label* index_string,
- Label* not_unique) {
- // The key is not a smi.
- Label unique;
- // Is it a name?
- __ CompareObjectType(key, map, hash, LAST_UNIQUE_NAME_TYPE);
- __ b(hi, not_unique);
- STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
- __ b(eq, &unique);
-
- // Is the string an array index, with cached numeric value?
- __ ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
- __ tst(hash, Operand(Name::kContainsCachedArrayIndexMask));
- __ b(eq, index_string);
-
- // Is the string internalized? We know it's a string, so a single
- // bit test is enough.
- // map: key map
- __ ldrb(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kInternalizedTag == 0);
- __ tst(hash, Operand(kIsNotInternalizedMask));
- __ b(ne, not_unique);
-
- __ bind(&unique);
-}
-
-
-void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
- // The return address is in lr.
- Register receiver = ReceiverRegister();
- Register name = NameRegister();
- DCHECK(receiver.is(r1));
- DCHECK(name.is(r2));
-
- // Probe the stub cache.
- Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::LOAD_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, receiver, name, r3, r4, r5, r6);
-
- // Cache miss: Jump to runtime.
- GenerateMiss(masm);
-}
-
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
- Register dictionary = r0;
- DCHECK(!dictionary.is(ReceiverRegister()));
- DCHECK(!dictionary.is(NameRegister()));
-
- Label slow;
-
- __ ldr(dictionary,
- FieldMemOperand(ReceiverRegister(), JSObject::kPropertiesOffset));
- GenerateDictionaryLoad(masm, &slow, dictionary, NameRegister(), r0, r3, r4);
- __ Ret();
-
- // Dictionary load failed, go slow (but don't miss).
- __ bind(&slow);
- GenerateRuntimeGetProperty(masm);
-}
-
-
-// A register that isn't one of the parameters to the load ic.
-static const Register LoadIC_TempRegister() { return r3; }
-
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
- // The return address is in lr.
- Isolate* isolate = masm->isolate();
-
- __ IncrementCounter(isolate->counters()->load_miss(), 1, r3, r4);
-
- __ mov(LoadIC_TempRegister(), ReceiverRegister());
- __ Push(LoadIC_TempRegister(), NameRegister());
-
- // Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
- __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
- // The return address is in lr.
-
- __ mov(LoadIC_TempRegister(), ReceiverRegister());
- __ Push(LoadIC_TempRegister(), NameRegister());
-
- __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
-}
-
-
-static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
- Register object,
- Register key,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* unmapped_case,
- Label* slow_case) {
- Heap* heap = masm->isolate()->heap();
-
- // Check that the receiver is a JSObject. Because of the map check
- // later, we do not need to check for interceptors or whether it
- // requires access checks.
- __ JumpIfSmi(object, slow_case);
- // Check that the object is some kind of JSObject.
- __ CompareObjectType(object, scratch1, scratch2, FIRST_JS_RECEIVER_TYPE);
- __ b(lt, slow_case);
-
- // Check that the key is a positive smi.
- __ tst(key, Operand(0x80000001));
- __ b(ne, slow_case);
-
- // Load the elements into scratch1 and check its map.
- Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
- __ ldr(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
- __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
-
- // Check if element is in the range of mapped arguments. If not, jump
- // to the unmapped lookup with the parameter map in scratch1.
- __ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
- __ sub(scratch2, scratch2, Operand(Smi::FromInt(2)));
- __ cmp(key, Operand(scratch2));
- __ b(cs, unmapped_case);
-
- // Load element index and check whether it is the hole.
- const int kOffset =
- FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
-
- __ mov(scratch3, Operand(kPointerSize >> 1));
- __ mul(scratch3, key, scratch3);
- __ add(scratch3, scratch3, Operand(kOffset));
-
- __ ldr(scratch2, MemOperand(scratch1, scratch3));
- __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
- __ cmp(scratch2, scratch3);
- __ b(eq, unmapped_case);
-
- // Load value from context and return it. We can reuse scratch1 because
- // we do not jump to the unmapped lookup (which requires the parameter
- // map in scratch1).
- __ ldr(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
- __ mov(scratch3, Operand(kPointerSize >> 1));
- __ mul(scratch3, scratch2, scratch3);
- __ add(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag));
- return MemOperand(scratch1, scratch3);
-}
-
-
-static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
- Register key,
- Register parameter_map,
- Register scratch,
- Label* slow_case) {
- // Element is in arguments backing store, which is referenced by the
- // second element of the parameter_map. The parameter_map register
- // must be loaded with the parameter map of the arguments object and is
- // overwritten.
- const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
- Register backing_store = parameter_map;
- __ ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
- Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
- __ CheckMap(backing_store, scratch, fixed_array_map, slow_case,
- DONT_DO_SMI_CHECK);
- __ ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
- __ cmp(key, Operand(scratch));
- __ b(cs, slow_case);
- __ mov(scratch, Operand(kPointerSize >> 1));
- __ mul(scratch, key, scratch);
- __ add(scratch,
- scratch,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- return MemOperand(backing_store, scratch);
-}
-
-
-void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
- // The return address is in lr.
- Register receiver = ReceiverRegister();
- Register key = NameRegister();
- DCHECK(receiver.is(r1));
- DCHECK(key.is(r2));
-
- Label slow, notin;
- MemOperand mapped_location =
- GenerateMappedArgumentsLookup(
- masm, receiver, key, r0, r3, r4, ¬in, &slow);
- __ ldr(r0, mapped_location);
- __ Ret();
- __ bind(¬in);
- // The unmapped lookup expects that the parameter map is in r0.
- MemOperand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, key, r0, r3, &slow);
- __ ldr(r0, unmapped_location);
- __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
- __ cmp(r0, r3);
- __ b(eq, &slow);
- __ Ret();
- __ bind(&slow);
- GenerateMiss(masm);
-}
-
-
-void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
- Register receiver = ReceiverRegister();
- Register key = NameRegister();
- Register value = ValueRegister();
- DCHECK(receiver.is(r1));
- DCHECK(key.is(r2));
- DCHECK(value.is(r0));
-
- Label slow, notin;
- MemOperand mapped_location = GenerateMappedArgumentsLookup(
- masm, receiver, key, r3, r4, r5, ¬in, &slow);
- __ str(value, mapped_location);
- __ add(r6, r3, r5);
- __ mov(r9, value);
- __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
- __ Ret();
- __ bind(¬in);
- // The unmapped lookup expects that the parameter map is in r3.
- MemOperand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, key, r3, r4, &slow);
- __ str(value, unmapped_location);
- __ add(r6, r3, r4);
- __ mov(r9, value);
- __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
- __ Ret();
- __ bind(&slow);
- GenerateMiss(masm);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
- // The return address is in lr.
- Isolate* isolate = masm->isolate();
-
- __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, r3, r4);
-
- __ Push(ReceiverRegister(), NameRegister());
-
- // Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
-
- __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-// IC register specifications
-const Register LoadIC::ReceiverRegister() { return r1; }
-const Register LoadIC::NameRegister() { return r2; }
-
-
-const Register LoadIC::SlotRegister() {
- DCHECK(FLAG_vector_ics);
- return r0;
-}
-
-
-const Register LoadIC::VectorRegister() {
- DCHECK(FLAG_vector_ics);
- return r3;
-}
-
-
-const Register StoreIC::ReceiverRegister() { return r1; }
-const Register StoreIC::NameRegister() { return r2; }
-const Register StoreIC::ValueRegister() { return r0; }
-
-
-const Register KeyedStoreIC::MapRegister() {
- return r3;
-}
-
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
- // The return address is in lr.
-
- __ Push(ReceiverRegister(), NameRegister());
-
- __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
-}
-
-
-void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
- // The return address is in lr.
- Label slow, check_name, index_smi, index_name, property_array_property;
- Label probe_dictionary, check_number_dictionary;
-
- Register key = NameRegister();
- Register receiver = ReceiverRegister();
- DCHECK(key.is(r2));
- DCHECK(receiver.is(r1));
-
- Isolate* isolate = masm->isolate();
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, &check_name);
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from below
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(
- masm, receiver, r0, r3, Map::kHasIndexedInterceptor, &slow);
-
- // Check the receiver's map to see if it has fast elements.
- __ CheckFastElements(r0, r3, &check_number_dictionary);
-
- GenerateFastArrayLoad(
- masm, receiver, key, r0, r3, r4, r0, NULL, &slow);
- __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, r4, r3);
- __ Ret();
-
- __ bind(&check_number_dictionary);
- __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ ldr(r3, FieldMemOperand(r4, JSObject::kMapOffset));
-
- // Check whether the elements is a number dictionary.
- // r3: elements map
- // r4: elements
- __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(r3, ip);
- __ b(ne, &slow);
- __ SmiUntag(r0, key);
- __ LoadFromNumberDictionary(&slow, r4, key, r0, r0, r3, r5);
- __ Ret();
-
- // Slow case, key and receiver still in r2 and r1.
- __ bind(&slow);
- __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(),
- 1, r4, r3);
- GenerateRuntimeGetProperty(masm);
-
- __ bind(&check_name);
- GenerateKeyNameCheck(masm, key, r0, r3, &index_name, &slow);
-
- GenerateKeyedLoadReceiverCheck(
- masm, receiver, r0, r3, Map::kHasNamedInterceptor, &slow);
-
- // If the receiver is a fast-case object, check the keyed lookup
- // cache. Otherwise probe the dictionary.
- __ ldr(r3, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(r4, ip);
- __ b(eq, &probe_dictionary);
-
- // Load the map of the receiver, compute the keyed lookup cache hash
- // based on 32 bits of the map pointer and the name hash.
- __ ldr(r0, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ mov(r3, Operand(r0, ASR, KeyedLookupCache::kMapHashShift));
- __ ldr(r4, FieldMemOperand(key, Name::kHashFieldOffset));
- __ eor(r3, r3, Operand(r4, ASR, Name::kHashShift));
- int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
- __ And(r3, r3, Operand(mask));
-
- // Load the key (consisting of map and unique name) from the cache and
- // check for match.
- Label load_in_object_property;
- static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
- Label hit_on_nth_entry[kEntriesPerBucket];
- ExternalReference cache_keys =
- ExternalReference::keyed_lookup_cache_keys(isolate);
-
- __ mov(r4, Operand(cache_keys));
- __ add(r4, r4, Operand(r3, LSL, kPointerSizeLog2 + 1));
-
- for (int i = 0; i < kEntriesPerBucket - 1; i++) {
- Label try_next_entry;
- // Load map and move r4 to next entry.
- __ ldr(r5, MemOperand(r4, kPointerSize * 2, PostIndex));
- __ cmp(r0, r5);
- __ b(ne, &try_next_entry);
- __ ldr(r5, MemOperand(r4, -kPointerSize)); // Load name
- __ cmp(key, r5);
- __ b(eq, &hit_on_nth_entry[i]);
- __ bind(&try_next_entry);
- }
-
- // Last entry: Load map and move r4 to name.
- __ ldr(r5, MemOperand(r4, kPointerSize, PostIndex));
- __ cmp(r0, r5);
- __ b(ne, &slow);
- __ ldr(r5, MemOperand(r4));
- __ cmp(key, r5);
- __ b(ne, &slow);
-
- // Get field offset.
- // r0 : receiver's map
- // r3 : lookup cache index
- ExternalReference cache_field_offsets =
- ExternalReference::keyed_lookup_cache_field_offsets(isolate);
-
- // Hit on nth entry.
- for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
- __ bind(&hit_on_nth_entry[i]);
- __ mov(r4, Operand(cache_field_offsets));
- if (i != 0) {
- __ add(r3, r3, Operand(i));
- }
- __ ldr(r5, MemOperand(r4, r3, LSL, kPointerSizeLog2));
- __ ldrb(r6, FieldMemOperand(r0, Map::kInObjectPropertiesOffset));
- __ sub(r5, r5, r6, SetCC);
- __ b(ge, &property_array_property);
- if (i != 0) {
- __ jmp(&load_in_object_property);
- }
- }
-
- // Load in-object property.
- __ bind(&load_in_object_property);
- __ ldrb(r6, FieldMemOperand(r0, Map::kInstanceSizeOffset));
- __ add(r6, r6, r5); // Index from start of object.
- __ sub(receiver, receiver, Operand(kHeapObjectTag)); // Remove the heap tag.
- __ ldr(r0, MemOperand(receiver, r6, LSL, kPointerSizeLog2));
- __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
- 1, r4, r3);
- __ Ret();
-
- // Load property array property.
- __ bind(&property_array_property);
- __ ldr(receiver, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- __ add(receiver, receiver, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ ldr(r0, MemOperand(receiver, r5, LSL, kPointerSizeLog2));
- __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
- 1, r4, r3);
- __ Ret();
-
- // Do a quick inline probe of the receiver's dictionary, if it
- // exists.
- __ bind(&probe_dictionary);
- // r3: elements
- __ ldr(r0, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
- GenerateGlobalInstanceTypeCheck(masm, r0, &slow);
- // Load the property to r0.
- GenerateDictionaryLoad(masm, &slow, r3, key, r0, r5, r4);
- __ IncrementCounter(
- isolate->counters()->keyed_load_generic_symbol(), 1, r4, r3);
- __ Ret();
-
- __ bind(&index_name);
- __ IndexFromHash(r3, key);
- // Now jump to the place where smi keys are handled.
- __ jmp(&index_smi);
-}
-
-
-void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
- // Return address is in lr.
- Label miss;
-
- Register receiver = ReceiverRegister();
- Register index = NameRegister();
- Register scratch = r3;
- Register result = r0;
- DCHECK(!scratch.is(receiver) && !scratch.is(index));
-
- StringCharAtGenerator char_at_generator(receiver,
- index,
- scratch,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- &miss, // When index out of range.
- STRING_INDEX_IS_ARRAY_INDEX);
- char_at_generator.GenerateFast(masm);
- __ Ret();
-
- StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm, call_helper);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
- // Return address is in lr.
- Label slow;
-
- Register receiver = ReceiverRegister();
- Register key = NameRegister();
- Register scratch1 = r3;
- Register scratch2 = r4;
- DCHECK(!scratch1.is(receiver) && !scratch1.is(key));
- DCHECK(!scratch2.is(receiver) && !scratch2.is(key));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &slow);
-
- // Check that the key is an array index, that is Uint32.
- __ NonNegativeSmiTst(key);
- __ b(ne, &slow);
-
- // Get the map of the receiver.
- __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
-
- // Check that it has indexed interceptor and access checks
- // are not enabled for this object.
- __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset));
- __ and_(scratch2, scratch2, Operand(kSlowCaseBitFieldMask));
- __ cmp(scratch2, Operand(1 << Map::kHasIndexedInterceptor));
- __ b(ne, &slow);
-
- // Everything is fine, call runtime.
- __ Push(receiver, key); // Receiver, key.
-
- // Perform tail call to the entry.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(kLoadElementWithInterceptor),
- masm->isolate()),
- 2, 1);
-
- __ bind(&slow);
- GenerateMiss(masm);
-}
-
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
- // Push receiver, key and value for runtime call.
- __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
-
- ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void StoreIC::GenerateSlow(MacroAssembler* masm) {
- // Push receiver, key and value for runtime call.
- __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
-
- // The slow case calls into the runtime to complete the store without causing
- // an IC miss that would otherwise cause a transition to the generic stub.
- ExternalReference ref =
- ExternalReference(IC_Utility(kStoreIC_Slow), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
- // Push receiver, key and value for runtime call.
- __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
-
- // The slow case calls into the runtime to complete the store without causing
- // an IC miss that would otherwise cause a transition to the generic stub.
- ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictMode strict_mode) {
- // Push receiver, key and value for runtime call.
- __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
-
- __ mov(r0, Operand(Smi::FromInt(strict_mode))); // Strict mode.
- __ Push(r0);
-
- __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
-}
-
-
-static void KeyedStoreGenerateGenericHelper(
- MacroAssembler* masm,
- Label* fast_object,
- Label* fast_double,
- Label* slow,
- KeyedStoreCheckMap check_map,
- KeyedStoreIncrementLength increment_length,
- Register value,
- Register key,
- Register receiver,
- Register receiver_map,
- Register elements_map,
- Register elements) {
- Label transition_smi_elements;
- Label finish_object_store, non_double_value, transition_double_elements;
- Label fast_double_without_map_check;
-
- // Fast case: Do the store, could be either Object or double.
- __ bind(fast_object);
- Register scratch_value = r4;
- Register address = r5;
- if (check_map == kCheckMap) {
- __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ cmp(elements_map,
- Operand(masm->isolate()->factory()->fixed_array_map()));
- __ b(ne, fast_double);
- }
-
- // HOLECHECK: guards "A[i] = V"
- // We have to go to the runtime if the current value is the hole because
- // there may be a callback on the element
- Label holecheck_passed1;
- __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ ldr(scratch_value,
- MemOperand::PointerAddressFromSmiKey(address, key, PreIndex));
- __ cmp(scratch_value, Operand(masm->isolate()->factory()->the_hole_value()));
- __ b(ne, &holecheck_passed1);
- __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
- slow);
-
- __ bind(&holecheck_passed1);
-
- // Smi stores don't require further checks.
- Label non_smi_value;
- __ JumpIfNotSmi(value, &non_smi_value);
-
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ add(scratch_value, key, Operand(Smi::FromInt(1)));
- __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
- }
- // It's irrelevant whether array is smi-only or not when writing a smi.
- __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ str(value, MemOperand::PointerAddressFromSmiKey(address, key));
- __ Ret();
-
- __ bind(&non_smi_value);
- // Escape to elements kind transition case.
- __ CheckFastObjectElements(receiver_map, scratch_value,
- &transition_smi_elements);
-
- // Fast elements array, store the value to the elements backing store.
- __ bind(&finish_object_store);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ add(scratch_value, key, Operand(Smi::FromInt(1)));
- __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
- }
- __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(address, address, Operand::PointerOffsetFromSmiKey(key));
- __ str(value, MemOperand(address));
- // Update write barrier for the elements array address.
- __ mov(scratch_value, value); // Preserve the value which is returned.
- __ RecordWrite(elements,
- address,
- scratch_value,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ Ret();
-
- __ bind(fast_double);
- if (check_map == kCheckMap) {
- // Check for fast double array case. If this fails, call through to the
- // runtime.
- __ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex);
- __ b(ne, slow);
- }
-
- // HOLECHECK: guards "A[i] double hole?"
- // We have to see if the double version of the hole is present. If so
- // go to the runtime.
- __ add(address, elements,
- Operand((FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32))
- - kHeapObjectTag));
- __ ldr(scratch_value,
- MemOperand(address, key, LSL, kPointerSizeLog2, PreIndex));
- __ cmp(scratch_value, Operand(kHoleNanUpper32));
- __ b(ne, &fast_double_without_map_check);
- __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
- slow);
-
- __ bind(&fast_double_without_map_check);
- __ StoreNumberToDoubleElements(value, key, elements, r3, d0,
- &transition_double_elements);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ add(scratch_value, key, Operand(Smi::FromInt(1)));
- __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
- }
- __ Ret();
-
- __ bind(&transition_smi_elements);
- // Transition the array appropriately depending on the value type.
- __ ldr(r4, FieldMemOperand(value, HeapObject::kMapOffset));
- __ CompareRoot(r4, Heap::kHeapNumberMapRootIndex);
- __ b(ne, &non_double_value);
-
- // Value is a double. Transition FAST_SMI_ELEMENTS ->
- // FAST_DOUBLE_ELEMENTS and complete the store.
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS,
- receiver_map,
- r4,
- slow);
- AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS);
- ElementsTransitionGenerator::GenerateSmiToDouble(
- masm, receiver, key, value, receiver_map, mode, slow);
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ jmp(&fast_double_without_map_check);
-
- __ bind(&non_double_value);
- // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- receiver_map,
- r4,
- slow);
- mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
- masm, receiver, key, value, receiver_map, mode, slow);
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ jmp(&finish_object_store);
-
- __ bind(&transition_double_elements);
- // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
- // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
- // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
- FAST_ELEMENTS,
- receiver_map,
- r4,
- slow);
- mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateDoubleToObject(
- masm, receiver, key, value, receiver_map, mode, slow);
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ jmp(&finish_object_store);
-}
-
-
-void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
- StrictMode strict_mode) {
- // ---------- S t a t e --------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -----------------------------------
- Label slow, fast_object, fast_object_grow;
- Label fast_double, fast_double_grow;
- Label array, extra, check_if_double_array;
-
- // Register usage.
- Register value = ValueRegister();
- Register key = NameRegister();
- Register receiver = ReceiverRegister();
- DCHECK(receiver.is(r1));
- DCHECK(key.is(r2));
- DCHECK(value.is(r0));
- Register receiver_map = r3;
- Register elements_map = r6;
- Register elements = r9; // Elements array of the receiver.
- // r4 and r5 are used as general scratch registers.
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, &slow);
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, &slow);
- // Get the map of the object.
- __ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks and is not observed.
- // The generic stub does not perform map checks or handle observed objects.
- __ ldrb(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
- __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
- __ b(ne, &slow);
- // Check if the object is a JS array or not.
- __ ldrb(r4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
- __ cmp(r4, Operand(JS_ARRAY_TYPE));
- __ b(eq, &array);
- // Check that the object is some kind of JSObject.
- __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
- __ b(lt, &slow);
-
- // Object case: Check key against length in the elements array.
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- // Check array bounds. Both the key and the length of FixedArray are smis.
- __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ cmp(key, Operand(ip));
- __ b(lo, &fast_object);
-
- // Slow case, handle jump to runtime.
- __ bind(&slow);
- // Entry registers are intact.
- // r0: value.
- // r1: key.
- // r2: receiver.
- GenerateRuntimeSetProperty(masm, strict_mode);
-
- // Extra capacity case: Check if there is extra capacity to
- // perform the store and update the length. Used for adding one
- // element to the array by writing to array[array.length].
- __ bind(&extra);
- // Condition code from comparing key and array length is still available.
- __ b(ne, &slow); // Only support writing to writing to array[array.length].
- // Check for room in the elements backing store.
- // Both the key and the length of FixedArray are smis.
- __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ cmp(key, Operand(ip));
- __ b(hs, &slow);
- __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ cmp(elements_map,
- Operand(masm->isolate()->factory()->fixed_array_map()));
- __ b(ne, &check_if_double_array);
- __ jmp(&fast_object_grow);
-
- __ bind(&check_if_double_array);
- __ cmp(elements_map,
- Operand(masm->isolate()->factory()->fixed_double_array_map()));
- __ b(ne, &slow);
- __ jmp(&fast_double_grow);
-
- // Array case: Get the length and the elements array from the JS
- // array. Check that the array is in fast mode (and writable); if it
- // is the length is always a smi.
- __ bind(&array);
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-
- // Check the key against the length in the array.
- __ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ cmp(key, Operand(ip));
- __ b(hs, &extra);
-
- KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
- &slow, kCheckMap, kDontIncrementLength,
- value, key, receiver, receiver_map,
- elements_map, elements);
- KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
- &slow, kDontCheckMap, kIncrementLength,
- value, key, receiver, receiver_map,
- elements_map, elements);
-}
-
-
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
- Register receiver = ReceiverRegister();
- Register name = NameRegister();
- DCHECK(receiver.is(r1));
- DCHECK(name.is(r2));
- DCHECK(ValueRegister().is(r0));
-
- // Get the receiver from the stack and probe the stub cache.
- Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::STORE_IC));
-
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, receiver, name, r3, r4, r5, r6);
-
- // Cache miss: Jump to runtime.
- GenerateMiss(masm);
-}
-
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
- __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
-
- // Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
- Label miss;
- Register receiver = ReceiverRegister();
- Register name = NameRegister();
- Register value = ValueRegister();
- Register dictionary = r3;
- DCHECK(receiver.is(r1));
- DCHECK(name.is(r2));
- DCHECK(value.is(r0));
-
- __ ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-
- GenerateDictionaryStore(masm, &miss, dictionary, name, value, r4, r5);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->store_normal_hit(),
- 1, r4, r5);
- __ Ret();
-
- __ bind(&miss);
- __ IncrementCounter(counters->store_normal_miss(), 1, r4, r5);
- GenerateMiss(masm);
-}
-
-
-void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictMode strict_mode) {
- __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
-
- __ mov(r0, Operand(Smi::FromInt(strict_mode)));
- __ Push(r0);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
-}
-
-
-#undef __
-
-
-Condition CompareIC::ComputeCondition(Token::Value op) {
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- return eq;
- case Token::LT:
- return lt;
- case Token::GT:
- return gt;
- case Token::LTE:
- return le;
- case Token::GTE:
- return ge;
- default:
- UNREACHABLE();
- return kNoCondition;
- }
-}
-
-
-bool CompareIC::HasInlinedSmiCode(Address address) {
- // The address of the instruction following the call.
- Address cmp_instruction_address =
- Assembler::return_address_from_call_start(address);
-
- // If the instruction following the call is not a cmp rx, #yyy, nothing
- // was inlined.
- Instr instr = Assembler::instr_at(cmp_instruction_address);
- return Assembler::IsCmpImmediate(instr);
-}
-
-
-void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
- Address cmp_instruction_address =
- Assembler::return_address_from_call_start(address);
-
- // If the instruction following the call is not a cmp rx, #yyy, nothing
- // was inlined.
- Instr instr = Assembler::instr_at(cmp_instruction_address);
- if (!Assembler::IsCmpImmediate(instr)) {
- return;
- }
-
- // The delta to the start of the map check instruction and the
- // condition code uses at the patched jump.
- int delta = Assembler::GetCmpImmediateRawImmediate(instr);
- delta +=
- Assembler::GetCmpImmediateRegister(instr).code() * kOff12Mask;
- // If the delta is 0 the instruction is cmp r0, #0 which also signals that
- // nothing was inlined.
- if (delta == 0) {
- return;
- }
-
- if (FLAG_trace_ic) {
- PrintF("[ patching ic at %p, cmp=%p, delta=%d\n",
- address, cmp_instruction_address, delta);
- }
-
- Address patch_address =
- cmp_instruction_address - delta * Instruction::kInstrSize;
- Instr instr_at_patch = Assembler::instr_at(patch_address);
- Instr branch_instr =
- Assembler::instr_at(patch_address + Instruction::kInstrSize);
- // This is patching a conditional "jump if not smi/jump if smi" site.
- // Enabling by changing from
- // cmp rx, rx
- // b eq/ne, <target>
- // to
- // tst rx, #kSmiTagMask
- // b ne/eq, <target>
- // and vice-versa to be disabled again.
- CodePatcher patcher(patch_address, 2);
- Register reg = Assembler::GetRn(instr_at_patch);
- if (check == ENABLE_INLINED_SMI_CHECK) {
- DCHECK(Assembler::IsCmpRegister(instr_at_patch));
- DCHECK_EQ(Assembler::GetRn(instr_at_patch).code(),
- Assembler::GetRm(instr_at_patch).code());
- patcher.masm()->tst(reg, Operand(kSmiTagMask));
- } else {
- DCHECK(check == DISABLE_INLINED_SMI_CHECK);
- DCHECK(Assembler::IsTstImmediate(instr_at_patch));
- patcher.masm()->cmp(reg, reg);
- }
- DCHECK(Assembler::IsBranch(branch_instr));
- if (Assembler::GetCondition(branch_instr) == eq) {
- patcher.EmitCondition(ne);
- } else {
- DCHECK(Assembler::GetCondition(branch_instr) == ne);
- patcher.EmitCondition(eq);
- }
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
#include "src/arm/lithium-gap-resolver-arm.h"
#include "src/code-stubs.h"
#include "src/hydrogen-osr.h"
-#include "src/stub-cache.h"
namespace v8 {
namespace internal {
+++ /dev/null
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#if V8_TARGET_ARCH_ARM
-
-#include "src/codegen.h"
-#include "src/ic-inl.h"
-#include "src/stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-
-static void ProbeTable(Isolate* isolate,
- MacroAssembler* masm,
- Code::Flags flags,
- StubCache::Table table,
- Register receiver,
- Register name,
- // Number of the cache entry, not scaled.
- Register offset,
- Register scratch,
- Register scratch2,
- Register offset_scratch) {
- ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
- ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
- ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
-
- uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
- uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
- uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address());
-
- // Check the relative positions of the address fields.
- DCHECK(value_off_addr > key_off_addr);
- DCHECK((value_off_addr - key_off_addr) % 4 == 0);
- DCHECK((value_off_addr - key_off_addr) < (256 * 4));
- DCHECK(map_off_addr > key_off_addr);
- DCHECK((map_off_addr - key_off_addr) % 4 == 0);
- DCHECK((map_off_addr - key_off_addr) < (256 * 4));
-
- Label miss;
- Register base_addr = scratch;
- scratch = no_reg;
-
- // Multiply by 3 because there are 3 fields per entry (name, code, map).
- __ add(offset_scratch, offset, Operand(offset, LSL, 1));
-
- // Calculate the base address of the entry.
- __ mov(base_addr, Operand(key_offset));
- __ add(base_addr, base_addr, Operand(offset_scratch, LSL, kPointerSizeLog2));
-
- // Check that the key in the entry matches the name.
- __ ldr(ip, MemOperand(base_addr, 0));
- __ cmp(name, ip);
- __ b(ne, &miss);
-
- // Check the map matches.
- __ ldr(ip, MemOperand(base_addr, map_off_addr - key_off_addr));
- __ ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ cmp(ip, scratch2);
- __ b(ne, &miss);
-
- // Get the code entry from the cache.
- Register code = scratch2;
- scratch2 = no_reg;
- __ ldr(code, MemOperand(base_addr, value_off_addr - key_off_addr));
-
- // Check that the flags match what we're looking for.
- Register flags_reg = base_addr;
- base_addr = no_reg;
- __ ldr(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
- // It's a nice optimization if this constant is encodable in the bic insn.
-
- uint32_t mask = Code::kFlagsNotUsedInLookup;
- DCHECK(__ ImmediateFitsAddrMode1Instruction(mask));
- __ bic(flags_reg, flags_reg, Operand(mask));
- __ cmp(flags_reg, Operand(flags));
- __ b(ne, &miss);
-
-#ifdef DEBUG
- if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
- __ jmp(&miss);
- } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
- __ jmp(&miss);
- }
-#endif
-
- // Jump to the first instruction in the code stub.
- __ add(pc, code, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- // Miss: fall through.
- __ bind(&miss);
-}
-
-
-void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
- MacroAssembler* masm, Label* miss_label, Register receiver,
- Handle<Name> name, Register scratch0, Register scratch1) {
- DCHECK(name->IsUniqueName());
- DCHECK(!receiver.is(scratch0));
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
- __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-
- Label done;
-
- const int kInterceptorOrAccessCheckNeededMask =
- (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
-
- // Bail out if the receiver has a named interceptor or requires access checks.
- Register map = scratch1;
- __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ ldrb(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
- __ tst(scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
- __ b(ne, miss_label);
-
- // Check that receiver is a JSObject.
- __ ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ cmp(scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
- __ b(lt, miss_label);
-
- // Load properties array.
- Register properties = scratch0;
- __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- // Check that the properties array is a dictionary.
- __ ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset));
- Register tmp = properties;
- __ LoadRoot(tmp, Heap::kHashTableMapRootIndex);
- __ cmp(map, tmp);
- __ b(ne, miss_label);
-
- // Restore the temporarily used register.
- __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-
-
- NameDictionaryLookupStub::GenerateNegativeLookup(masm,
- miss_label,
- &done,
- receiver,
- properties,
- name,
- scratch1);
- __ bind(&done);
- __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-}
-
-
-void StubCache::GenerateProbe(MacroAssembler* masm,
- Code::Flags flags,
- Register receiver,
- Register name,
- Register scratch,
- Register extra,
- Register extra2,
- Register extra3) {
- Isolate* isolate = masm->isolate();
- Label miss;
-
- // Make sure that code is valid. The multiplying code relies on the
- // entry size being 12.
- DCHECK(sizeof(Entry) == 12);
-
- // Make sure the flags does not name a specific type.
- DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
-
- // Make sure that there are no register conflicts.
- DCHECK(!scratch.is(receiver));
- DCHECK(!scratch.is(name));
- DCHECK(!extra.is(receiver));
- DCHECK(!extra.is(name));
- DCHECK(!extra.is(scratch));
- DCHECK(!extra2.is(receiver));
- DCHECK(!extra2.is(name));
- DCHECK(!extra2.is(scratch));
- DCHECK(!extra2.is(extra));
-
- // Check scratch, extra and extra2 registers are valid.
- DCHECK(!scratch.is(no_reg));
- DCHECK(!extra.is(no_reg));
- DCHECK(!extra2.is(no_reg));
- DCHECK(!extra3.is(no_reg));
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1,
- extra2, extra3);
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Get the map of the receiver and compute the hash.
- __ ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
- __ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ add(scratch, scratch, Operand(ip));
- uint32_t mask = kPrimaryTableSize - 1;
- // We shift out the last two bits because they are not part of the hash and
- // they are always 01 for maps.
- __ mov(scratch, Operand(scratch, LSR, kCacheIndexShift));
- // Mask down the eor argument to the minimum to keep the immediate
- // ARM-encodable.
- __ eor(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask));
- // Prefer and_ to ubfx here because ubfx takes 2 cycles.
- __ and_(scratch, scratch, Operand(mask));
-
- // Probe the primary table.
- ProbeTable(isolate,
- masm,
- flags,
- kPrimary,
- receiver,
- name,
- scratch,
- extra,
- extra2,
- extra3);
-
- // Primary miss: Compute hash for secondary probe.
- __ sub(scratch, scratch, Operand(name, LSR, kCacheIndexShift));
- uint32_t mask2 = kSecondaryTableSize - 1;
- __ add(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask2));
- __ and_(scratch, scratch, Operand(mask2));
-
- // Probe the secondary table.
- ProbeTable(isolate,
- masm,
- flags,
- kSecondary,
- receiver,
- name,
- scratch,
- extra,
- extra2,
- extra3);
-
- // Cache miss: Fall-through and let caller handle the miss by
- // entering the runtime system.
- __ bind(&miss);
- __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1,
- extra2, extra3);
-}
-
-
-void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm, int index, Register prototype, Label* miss) {
- Isolate* isolate = masm->isolate();
- // Get the global function with the given index.
- Handle<JSFunction> function(
- JSFunction::cast(isolate->native_context()->get(index)));
-
- // Check we're still in the same context.
- Register scratch = prototype;
- const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
- __ ldr(scratch, MemOperand(cp, offset));
- __ ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
- __ ldr(scratch, MemOperand(scratch, Context::SlotOffset(index)));
- __ Move(ip, function);
- __ cmp(ip, scratch);
- __ b(ne, miss);
-
- // Load its initial map. The global functions all have initial maps.
- __ Move(prototype, Handle<Map>(function->initial_map()));
- // Load the prototype from the initial map.
- __ ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
-}
-
-
-void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
- MacroAssembler* masm, Register receiver, Register scratch1,
- Register scratch2, Label* miss_label) {
- __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
- __ mov(r0, scratch1);
- __ Ret();
-}
-
-
-// Generate code to check that a global property cell is empty. Create
-// the property cell at compilation time if no cell exists for the
-// property.
-void PropertyHandlerCompiler::GenerateCheckPropertyCell(
- MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
- Register scratch, Label* miss) {
- Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
- DCHECK(cell->value()->IsTheHole());
- __ mov(scratch, Operand(cell));
- __ ldr(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(scratch, ip);
- __ b(ne, miss);
-}
-
-
-static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
- Register holder, Register name,
- Handle<JSObject> holder_obj) {
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex == 1);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 2);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 3);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 4);
- __ push(name);
- Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
- DCHECK(!masm->isolate()->heap()->InNewSpace(*interceptor));
- Register scratch = name;
- __ mov(scratch, Operand(interceptor));
- __ push(scratch);
- __ push(receiver);
- __ push(holder);
-}
-
-
-static void CompileCallLoadPropertyWithInterceptor(
- MacroAssembler* masm, Register receiver, Register holder, Register name,
- Handle<JSObject> holder_obj, IC::UtilityId id) {
- PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
- __ CallExternalReference(ExternalReference(IC_Utility(id), masm->isolate()),
- NamedLoadHandlerCompiler::kInterceptorArgsLength);
-}
-
-
-// Generate call to api function.
-void PropertyHandlerCompiler::GenerateFastApiCall(
- MacroAssembler* masm, const CallOptimization& optimization,
- Handle<Map> receiver_map, Register receiver, Register scratch_in,
- bool is_store, int argc, Register* values) {
- DCHECK(!receiver.is(scratch_in));
- __ push(receiver);
- // Write the arguments to stack frame.
- for (int i = 0; i < argc; i++) {
- Register arg = values[argc - 1 - i];
- DCHECK(!receiver.is(arg));
- DCHECK(!scratch_in.is(arg));
- __ push(arg);
- }
- DCHECK(optimization.is_simple_api_call());
-
- // Abi for CallApiFunctionStub.
- Register callee = r0;
- Register call_data = r4;
- Register holder = r2;
- Register api_function_address = r1;
-
- // Put holder in place.
- CallOptimization::HolderLookup holder_lookup;
- Handle<JSObject> api_holder =
- optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
- switch (holder_lookup) {
- case CallOptimization::kHolderIsReceiver:
- __ Move(holder, receiver);
- break;
- case CallOptimization::kHolderFound:
- __ Move(holder, api_holder);
- break;
- case CallOptimization::kHolderNotFound:
- UNREACHABLE();
- break;
- }
-
- Isolate* isolate = masm->isolate();
- Handle<JSFunction> function = optimization.constant_function();
- Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- Handle<Object> call_data_obj(api_call_info->data(), isolate);
-
- // Put callee in place.
- __ Move(callee, function);
-
- bool call_data_undefined = false;
- // Put call_data in place.
- if (isolate->heap()->InNewSpace(*call_data_obj)) {
- __ Move(call_data, api_call_info);
- __ ldr(call_data, FieldMemOperand(call_data, CallHandlerInfo::kDataOffset));
- } else if (call_data_obj->IsUndefined()) {
- call_data_undefined = true;
- __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
- } else {
- __ Move(call_data, call_data_obj);
- }
-
- // Put api_function_address in place.
- Address function_address = v8::ToCData<Address>(api_call_info->callback());
- ApiFunction fun(function_address);
- ExternalReference::Type type = ExternalReference::DIRECT_API_CALL;
- ExternalReference ref = ExternalReference(&fun, type, masm->isolate());
- __ mov(api_function_address, Operand(ref));
-
- // Jump to stub.
- CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc);
- __ TailCallStub(&stub);
-}
-
-
-void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
- Handle<Code> code) {
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
- Handle<Name> name) {
- if (!label->is_unused()) {
- __ bind(label);
- __ mov(this->name(), Operand(name));
- }
-}
-
-
-// Generate StoreTransition code, value is passed in r0 register.
-// When leaving generated code after success, the receiver_reg and name_reg
-// may be clobbered. Upon branch to miss_label, the receiver and name
-// registers have their original values.
-void NamedStoreHandlerCompiler::GenerateStoreTransition(
- Handle<Map> transition, Handle<Name> name, Register receiver_reg,
- Register storage_reg, Register value_reg, Register scratch1,
- Register scratch2, Register scratch3, Label* miss_label, Label* slow) {
- // r0 : value
- Label exit;
-
- int descriptor = transition->LastAdded();
- DescriptorArray* descriptors = transition->instance_descriptors();
- PropertyDetails details = descriptors->GetDetails(descriptor);
- Representation representation = details.representation();
- DCHECK(!representation.IsNone());
-
- if (details.type() == CONSTANT) {
- Handle<Object> constant(descriptors->GetValue(descriptor), isolate());
- __ Move(scratch1, constant);
- __ cmp(value_reg, scratch1);
- __ b(ne, miss_label);
- } else if (representation.IsSmi()) {
- __ JumpIfNotSmi(value_reg, miss_label);
- } else if (representation.IsHeapObject()) {
- __ JumpIfSmi(value_reg, miss_label);
- HeapType* field_type = descriptors->GetFieldType(descriptor);
- HeapType::Iterator<Map> it = field_type->Classes();
- if (!it.Done()) {
- __ ldr(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
- Label do_store;
- while (true) {
- __ CompareMap(scratch1, it.Current(), &do_store);
- it.Advance();
- if (it.Done()) {
- __ b(ne, miss_label);
- break;
- }
- __ b(eq, &do_store);
- }
- __ bind(&do_store);
- }
- } else if (representation.IsDouble()) {
- Label do_store, heap_number;
- __ LoadRoot(scratch3, Heap::kMutableHeapNumberMapRootIndex);
- __ AllocateHeapNumber(storage_reg, scratch1, scratch2, scratch3, slow,
- TAG_RESULT, MUTABLE);
-
- __ JumpIfNotSmi(value_reg, &heap_number);
- __ SmiUntag(scratch1, value_reg);
- __ vmov(s0, scratch1);
- __ vcvt_f64_s32(d0, s0);
- __ jmp(&do_store);
-
- __ bind(&heap_number);
- __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex,
- miss_label, DONT_DO_SMI_CHECK);
- __ vldr(d0, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
-
- __ bind(&do_store);
- __ vstr(d0, FieldMemOperand(storage_reg, HeapNumber::kValueOffset));
- }
-
- // Stub never generated for objects that require access checks.
- DCHECK(!transition->is_access_check_needed());
-
- // Perform map transition for the receiver if necessary.
- if (details.type() == FIELD &&
- Map::cast(transition->GetBackPointer())->unused_property_fields() == 0) {
- // The properties must be extended before we can store the value.
- // We jump to a runtime call that extends the properties array.
- __ push(receiver_reg);
- __ mov(r2, Operand(transition));
- __ Push(r2, r0);
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
- isolate()),
- 3, 1);
- return;
- }
-
- // Update the map of the object.
- __ mov(scratch1, Operand(transition));
- __ str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
-
- // Update the write barrier for the map field.
- __ RecordWriteField(receiver_reg,
- HeapObject::kMapOffset,
- scratch1,
- scratch2,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-
- if (details.type() == CONSTANT) {
- DCHECK(value_reg.is(r0));
- __ Ret();
- return;
- }
-
- int index = transition->instance_descriptors()->GetFieldIndex(
- transition->LastAdded());
-
- // Adjust for the number of properties stored in the object. Even in the
- // face of a transition we can use the old map here because the size of the
- // object and the number of in-object properties is not going to change.
- index -= transition->inobject_properties();
-
- // TODO(verwaest): Share this code as a code stub.
- SmiCheck smi_check = representation.IsTagged()
- ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
- if (index < 0) {
- // Set the property straight into the object.
- int offset = transition->instance_size() + (index * kPointerSize);
- if (representation.IsDouble()) {
- __ str(storage_reg, FieldMemOperand(receiver_reg, offset));
- } else {
- __ str(value_reg, FieldMemOperand(receiver_reg, offset));
- }
-
- if (!representation.IsSmi()) {
- // Update the write barrier for the array address.
- if (!representation.IsDouble()) {
- __ mov(storage_reg, value_reg);
- }
- __ RecordWriteField(receiver_reg,
- offset,
- storage_reg,
- scratch1,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- smi_check);
- }
- } else {
- // Write to the properties array.
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
- // Get the properties array
- __ ldr(scratch1,
- FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
- if (representation.IsDouble()) {
- __ str(storage_reg, FieldMemOperand(scratch1, offset));
- } else {
- __ str(value_reg, FieldMemOperand(scratch1, offset));
- }
-
- if (!representation.IsSmi()) {
- // Update the write barrier for the array address.
- if (!representation.IsDouble()) {
- __ mov(storage_reg, value_reg);
- }
- __ RecordWriteField(scratch1,
- offset,
- storage_reg,
- receiver_reg,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- smi_check);
- }
- }
-
- // Return the value (register r0).
- DCHECK(value_reg.is(r0));
- __ bind(&exit);
- __ Ret();
-}
-
-
-void NamedStoreHandlerCompiler::GenerateStoreField(LookupIterator* lookup,
- Register value_reg,
- Label* miss_label) {
- DCHECK(lookup->representation().IsHeapObject());
- __ JumpIfSmi(value_reg, miss_label);
- HeapType::Iterator<Map> it = lookup->GetFieldType()->Classes();
- __ ldr(scratch1(), FieldMemOperand(value_reg, HeapObject::kMapOffset));
- Label do_store;
- while (true) {
- __ CompareMap(scratch1(), it.Current(), &do_store);
- it.Advance();
- if (it.Done()) {
- __ b(ne, miss_label);
- break;
- }
- __ b(eq, &do_store);
- }
- __ bind(&do_store);
-
- StoreFieldStub stub(isolate(), lookup->GetFieldIndex(),
- lookup->representation());
- GenerateTailCall(masm(), stub.GetCode());
-}
-
-
-Register PropertyHandlerCompiler::CheckPrototypes(
- Register object_reg, Register holder_reg, Register scratch1,
- Register scratch2, Handle<Name> name, Label* miss,
- PrototypeCheckType check) {
- Handle<Map> receiver_map(IC::TypeToMap(*type(), isolate()));
-
- // Make sure there's no overlap between holder and object registers.
- DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
- DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
- && !scratch2.is(scratch1));
-
- // Keep track of the current object in register reg.
- Register reg = object_reg;
- int depth = 0;
-
- Handle<JSObject> current = Handle<JSObject>::null();
- if (type()->IsConstant()) {
- current = Handle<JSObject>::cast(type()->AsConstant()->Value());
- }
- Handle<JSObject> prototype = Handle<JSObject>::null();
- Handle<Map> current_map = receiver_map;
- Handle<Map> holder_map(holder()->map());
- // Traverse the prototype chain and check the maps in the prototype chain for
- // fast and global objects or do negative lookup for normal objects.
- while (!current_map.is_identical_to(holder_map)) {
- ++depth;
-
- // Only global objects and objects that do not require access
- // checks are allowed in stubs.
- DCHECK(current_map->IsJSGlobalProxyMap() ||
- !current_map->is_access_check_needed());
-
- prototype = handle(JSObject::cast(current_map->prototype()));
- if (current_map->is_dictionary_map() &&
- !current_map->IsJSGlobalObjectMap()) {
- DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
- if (!name->IsUniqueName()) {
- DCHECK(name->IsString());
- name = factory()->InternalizeString(Handle<String>::cast(name));
- }
- DCHECK(current.is_null() ||
- current->property_dictionary()->FindEntry(name) ==
- NameDictionary::kNotFound);
-
- GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
- scratch1, scratch2);
-
- __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
- reg = holder_reg; // From now on the object will be in holder_reg.
- __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
- } else {
- Register map_reg = scratch1;
- if (depth != 1 || check == CHECK_ALL_MAPS) {
- // CheckMap implicitly loads the map of |reg| into |map_reg|.
- __ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK);
- } else {
- __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
- }
-
- // Check access rights to the global object. This has to happen after
- // the map check so that we know that the object is actually a global
- // object.
- // This allows us to install generated handlers for accesses to the
- // global proxy (as opposed to using slow ICs). See corresponding code
- // in LookupForRead().
- if (current_map->IsJSGlobalProxyMap()) {
- __ CheckAccessGlobalProxy(reg, scratch2, miss);
- } else if (current_map->IsJSGlobalObjectMap()) {
- GenerateCheckPropertyCell(
- masm(), Handle<JSGlobalObject>::cast(current), name,
- scratch2, miss);
- }
-
- reg = holder_reg; // From now on the object will be in holder_reg.
-
- // Two possible reasons for loading the prototype from the map:
- // (1) Can't store references to new space in code.
- // (2) Handler is shared for all receivers with the same prototype
- // map (but not necessarily the same prototype instance).
- bool load_prototype_from_map =
- heap()->InNewSpace(*prototype) || depth == 1;
- if (load_prototype_from_map) {
- __ ldr(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
- } else {
- __ mov(reg, Operand(prototype));
- }
- }
-
- // Go to the next object in the prototype chain.
- current = prototype;
- current_map = handle(current->map());
- }
-
- // Log the check depth.
- LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
-
- if (depth != 0 || check == CHECK_ALL_MAPS) {
- // Check the holder map.
- __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK);
- }
-
- // Perform security check for access to the global object.
- DCHECK(current_map->IsJSGlobalProxyMap() ||
- !current_map->is_access_check_needed());
- if (current_map->IsJSGlobalProxyMap()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
- }
-
- // Return the register containing the holder.
- return reg;
-}
-
-
-void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
- if (!miss->is_unused()) {
- Label success;
- __ b(&success);
- __ bind(miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
- __ bind(&success);
- }
-}
-
-
-void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
- if (!miss->is_unused()) {
- Label success;
- __ b(&success);
- GenerateRestoreName(miss, name);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
- __ bind(&success);
- }
-}
-
-
-void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
- // Return the constant value.
- __ Move(r0, value);
- __ Ret();
-}
-
-
-void NamedLoadHandlerCompiler::GenerateLoadCallback(
- Register reg, Handle<ExecutableAccessorInfo> callback) {
- // Build AccessorInfo::args_ list on the stack and push property name below
- // the exit frame to make GC aware of them and store pointers to them.
- STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
- STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
- STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
- STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
- STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
- DCHECK(!scratch2().is(reg));
- DCHECK(!scratch3().is(reg));
- DCHECK(!scratch4().is(reg));
- __ push(receiver());
- if (heap()->InNewSpace(callback->data())) {
- __ Move(scratch3(), callback);
- __ ldr(scratch3(), FieldMemOperand(scratch3(),
- ExecutableAccessorInfo::kDataOffset));
- } else {
- __ Move(scratch3(), Handle<Object>(callback->data(), isolate()));
- }
- __ push(scratch3());
- __ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex);
- __ mov(scratch4(), scratch3());
- __ Push(scratch3(), scratch4());
- __ mov(scratch4(),
- Operand(ExternalReference::isolate_address(isolate())));
- __ Push(scratch4(), reg);
- __ mov(scratch2(), sp); // scratch2 = PropertyAccessorInfo::args_
- __ push(name());
-
- // Abi for CallApiGetter
- Register getter_address_reg = r2;
-
- Address getter_address = v8::ToCData<Address>(callback->getter());
- ApiFunction fun(getter_address);
- ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
- ExternalReference ref = ExternalReference(&fun, type, isolate());
- __ mov(getter_address_reg, Operand(ref));
-
- CallApiGetterStub stub(isolate());
- __ TailCallStub(&stub);
-}
-
-
-void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
- LookupIterator* it, Register holder_reg) {
- DCHECK(holder()->HasNamedInterceptor());
- DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
-
- // Compile the interceptor call, followed by inline code to load the
- // property from further up the prototype chain if the call fails.
- // Check that the maps haven't changed.
- DCHECK(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
-
- // Preserve the receiver register explicitly whenever it is different from the
- // holder and it is needed should the interceptor return without any result.
- // The ACCESSOR case needs the receiver to be passed into C++ code, the FIELD
- // case might cause a miss during the prototype check.
- bool must_perform_prototype_check =
- !holder().is_identical_to(it->GetHolder<JSObject>());
- bool must_preserve_receiver_reg =
- !receiver().is(holder_reg) &&
- (it->property_kind() == LookupIterator::ACCESSOR ||
- must_perform_prototype_check);
-
- // Save necessary data before invoking an interceptor.
- // Requires a frame to make GC aware of pushed pointers.
- {
- FrameAndConstantPoolScope frame_scope(masm(), StackFrame::INTERNAL);
- if (must_preserve_receiver_reg) {
- __ Push(receiver(), holder_reg, this->name());
- } else {
- __ Push(holder_reg, this->name());
- }
- // Invoke an interceptor. Note: map checks from receiver to
- // interceptor's holder has been compiled before (see a caller
- // of this method.)
- CompileCallLoadPropertyWithInterceptor(
- masm(), receiver(), holder_reg, this->name(), holder(),
- IC::kLoadPropertyWithInterceptorOnly);
-
- // Check if interceptor provided a value for property. If it's
- // the case, return immediately.
- Label interceptor_failed;
- __ LoadRoot(scratch1(), Heap::kNoInterceptorResultSentinelRootIndex);
- __ cmp(r0, scratch1());
- __ b(eq, &interceptor_failed);
- frame_scope.GenerateLeaveFrame();
- __ Ret();
-
- __ bind(&interceptor_failed);
- __ pop(this->name());
- __ pop(holder_reg);
- if (must_preserve_receiver_reg) {
- __ pop(receiver());
- }
- // Leave the internal frame.
- }
-
- GenerateLoadPostInterceptor(it, holder_reg);
-}
-
-
-void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
- // Call the runtime system to load the interceptor.
- DCHECK(holder()->HasNamedInterceptor());
- DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
- PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
- holder());
-
- ExternalReference ref = ExternalReference(
- IC_Utility(IC::kLoadPropertyWithInterceptor), isolate());
- __ TailCallExternalReference(
- ref, NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
-}
-
-
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
- Handle<JSObject> object, Handle<Name> name,
- Handle<ExecutableAccessorInfo> callback) {
- Register holder_reg = Frontend(receiver(), name);
-
- __ push(receiver()); // receiver
- __ push(holder_reg);
- __ mov(ip, Operand(callback)); // callback info
- __ push(ip);
- __ mov(ip, Operand(name));
- __ Push(ip, value());
-
- // Do tail-call to the runtime system.
- ExternalReference store_callback_property =
- ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
- __ TailCallExternalReference(store_callback_property, 5, 1);
-
- // Return the generated code.
- return GetCode(kind(), Code::FAST, name);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
- MacroAssembler* masm, Handle<HeapType> type, Register receiver,
- Handle<JSFunction> setter) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -----------------------------------
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-
- // Save value register, so we can restore it later.
- __ push(value());
-
- if (!setter.is_null()) {
- // Call the JavaScript setter with receiver and value on the stack.
- if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
- // Swap in the global receiver.
- __ ldr(receiver,
- FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
- }
- __ Push(receiver, value());
- ParameterCount actual(1);
- ParameterCount expected(setter);
- __ InvokeFunction(setter, expected, actual,
- CALL_FUNCTION, NullCallWrapper());
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // We have to return the passed value, not the return value of the setter.
- __ pop(r0);
-
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- }
- __ Ret();
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
- Handle<Name> name) {
- __ Push(receiver(), this->name(), value());
-
- // Do tail-call to the runtime system.
- ExternalReference store_ic_property = ExternalReference(
- IC_Utility(IC::kStorePropertyWithInterceptor), isolate());
- __ TailCallExternalReference(store_ic_property, 3, 1);
-
- // Return the generated code.
- return GetCode(kind(), Code::FAST, name);
-}
-
-
-Register* PropertyAccessCompiler::load_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3, scratch4.
- Register receiver = LoadIC::ReceiverRegister();
- Register name = LoadIC::NameRegister();
- static Register registers[] = { receiver, name, r3, r0, r4, r5 };
- return registers;
-}
-
-
-Register* PropertyAccessCompiler::store_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3.
- Register receiver = StoreIC::ReceiverRegister();
- Register name = StoreIC::NameRegister();
- DCHECK(r3.is(KeyedStoreIC::MapRegister()));
- static Register registers[] = { receiver, name, r3, r4, r5 };
- return registers;
-}
-
-
-Register NamedStoreHandlerCompiler::value() { return StoreIC::ValueRegister(); }
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
- MacroAssembler* masm, Handle<HeapType> type, Register receiver,
- Handle<JSFunction> getter) {
- // ----------- S t a t e -------------
- // -- r0 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-
- if (!getter.is_null()) {
- // Call the JavaScript getter with the receiver on the stack.
- if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
- // Swap in the global receiver.
- __ ldr(receiver,
- FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
- }
- __ push(receiver);
- ParameterCount actual(0);
- ParameterCount expected(getter);
- __ InvokeFunction(getter, expected, actual,
- CALL_FUNCTION, NullCallWrapper());
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- }
- __ Ret();
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
- Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
- Label miss;
- FrontendHeader(receiver(), name, &miss);
-
- // Get the value from the cell.
- Register result = StoreIC::ValueRegister();
- __ mov(result, Operand(cell));
- __ ldr(result, FieldMemOperand(result, Cell::kValueOffset));
-
- // Check for deleted property if property can actually be deleted.
- if (is_configurable) {
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(result, ip);
- __ b(eq, &miss);
- }
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->named_load_global_stub(), 1, r1, r3);
- __ Ret();
-
- FrontendFooter(name, &miss);
-
- // Return the generated code.
- return GetCode(kind(), Code::NORMAL, name);
-}
-
-
-Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
- CodeHandleList* handlers,
- Handle<Name> name,
- Code::StubType type,
- IcCheckType check) {
- Label miss;
-
- if (check == PROPERTY &&
- (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
- // In case we are compiling an IC for dictionary loads and stores, just
- // check whether the name is unique.
- if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
- __ JumpIfNotUniqueName(this->name(), &miss);
- } else {
- __ cmp(this->name(), Operand(name));
- __ b(ne, &miss);
- }
- }
-
- Label number_case;
- Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
- __ JumpIfSmi(receiver(), smi_target);
-
- // Polymorphic keyed stores may use the map register
- Register map_reg = scratch1();
- DCHECK(kind() != Code::KEYED_STORE_IC ||
- map_reg.is(KeyedStoreIC::MapRegister()));
-
- int receiver_count = types->length();
- int number_of_handled_maps = 0;
- __ ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
- for (int current = 0; current < receiver_count; ++current) {
- Handle<HeapType> type = types->at(current);
- Handle<Map> map = IC::TypeToMap(*type, isolate());
- if (!map->is_deprecated()) {
- number_of_handled_maps++;
- __ mov(ip, Operand(map));
- __ cmp(map_reg, ip);
- if (type->Is(HeapType::Number())) {
- DCHECK(!number_case.is_unused());
- __ bind(&number_case);
- }
- __ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq);
- }
- }
- DCHECK(number_of_handled_maps != 0);
-
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- InlineCacheState state =
- number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
- return GetCode(kind(), type, name, state);
-}
-
-
-Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
- MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
- MapHandleList* transitioned_maps) {
- Label miss;
- __ JumpIfSmi(receiver(), &miss);
-
- int receiver_count = receiver_maps->length();
- __ ldr(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
- for (int i = 0; i < receiver_count; ++i) {
- __ mov(ip, Operand(receiver_maps->at(i)));
- __ cmp(scratch1(), ip);
- if (transitioned_maps->at(i).is_null()) {
- __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq);
- } else {
- Label next_map;
- __ b(ne, &next_map);
- __ mov(transition_map(), Operand(transitioned_maps->at(i)));
- __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, al);
- __ bind(&next_map);
- }
- }
-
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void ElementHandlerCompiler::GenerateLoadDictionaryElement(
- MacroAssembler* masm) {
- // The return address is in lr.
- Label slow, miss;
-
- Register key = LoadIC::NameRegister();
- Register receiver = LoadIC::ReceiverRegister();
- DCHECK(receiver.is(r1));
- DCHECK(key.is(r2));
-
- __ UntagAndJumpIfNotSmi(r6, key, &miss);
- __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ LoadFromNumberDictionary(&slow, r4, key, r0, r6, r3, r5);
- __ Ret();
-
- __ bind(&slow);
- __ IncrementCounter(
- masm->isolate()->counters()->keyed_load_external_array_slow(),
- 1, r2, r3);
-
- TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
-
- // Miss case, call the runtime.
- __ bind(&miss);
-
- TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
#include "src/deoptimizer.h"
#include "src/full-codegen.h"
#include "src/runtime.h"
-#include "src/stub-cache.h"
namespace v8 {
namespace internal {
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
+#include "src/codegen.h"
+#include "src/ic/ic-compiler.h"
+#include "src/isolate.h"
+#include "src/jsregexp.h"
#include "src/regexp-macro-assembler.h"
-#include "src/stub-cache.h"
+#include "src/runtime.h"
namespace v8 {
namespace internal {
#ifndef V8_ARM64_CODE_STUBS_ARM64_H_
#define V8_ARM64_CODE_STUBS_ARM64_H_
-#include "src/ic-inl.h"
+#include "src/code-stubs.h"
namespace v8 {
namespace internal {
#define V8_ARM64_CODEGEN_ARM64_H_
#include "src/ast.h"
-#include "src/ic-inl.h"
+#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
#include "src/isolate-inl.h"
#include "src/parser.h"
#include "src/scopes.h"
-#include "src/stub-cache.h"
#include "src/arm64/code-stubs-arm64.h"
#include "src/arm64/macro-assembler-arm64.h"
+++ /dev/null
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#if V8_TARGET_ARCH_ARM64
-
-#include "src/arm64/assembler-arm64.h"
-#include "src/code-stubs.h"
-#include "src/codegen.h"
-#include "src/disasm.h"
-#include "src/ic-inl.h"
-#include "src/runtime.h"
-#include "src/stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-
-#define __ ACCESS_MASM(masm)
-
-
-// "type" holds an instance type on entry and is not clobbered.
-// Generated code branch on "global_object" if type is any kind of global
-// JS object.
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
- Register type,
- Label* global_object) {
- __ Cmp(type, JS_GLOBAL_OBJECT_TYPE);
- __ Ccmp(type, JS_BUILTINS_OBJECT_TYPE, ZFlag, ne);
- __ Ccmp(type, JS_GLOBAL_PROXY_TYPE, ZFlag, ne);
- __ B(eq, global_object);
-}
-
-
-// Helper function used from LoadIC GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-// label is done.
-// name: Property name. It is not clobbered if a jump to the miss label is
-// done
-// result: Register for the result. It is only updated if a jump to the miss
-// label is not done.
-// The scratch registers need to be different from elements, name and result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm,
- Label* miss,
- Register elements,
- Register name,
- Register result,
- Register scratch1,
- Register scratch2) {
- DCHECK(!AreAliased(elements, name, scratch1, scratch2));
- DCHECK(!AreAliased(result, scratch1, scratch2));
-
- Label done;
-
- // Probe the dictionary.
- NameDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss,
- &done,
- elements,
- name,
- scratch1,
- scratch2);
-
- // If probing finds an entry check that the value is a normal property.
- __ Bind(&done);
-
- static const int kElementsStartOffset = NameDictionary::kHeaderSize +
- NameDictionary::kElementsStartIndex * kPointerSize;
- static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- __ Ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
- __ Tst(scratch1, Smi::FromInt(PropertyDetails::TypeField::kMask));
- __ B(ne, miss);
-
- // Get the value at the masked, scaled index and return.
- __ Ldr(result,
- FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
-}
-
-
-// Helper function used from StoreIC::GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-// label is done.
-// name: Property name. It is not clobbered if a jump to the miss label is
-// done
-// value: The value to store (never clobbered).
-//
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryStore(MacroAssembler* masm,
- Label* miss,
- Register elements,
- Register name,
- Register value,
- Register scratch1,
- Register scratch2) {
- DCHECK(!AreAliased(elements, name, value, scratch1, scratch2));
-
- Label done;
-
- // Probe the dictionary.
- NameDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss,
- &done,
- elements,
- name,
- scratch1,
- scratch2);
-
- // If probing finds an entry in the dictionary check that the value
- // is a normal property that is not read only.
- __ Bind(&done);
-
- static const int kElementsStartOffset = NameDictionary::kHeaderSize +
- NameDictionary::kElementsStartIndex * kPointerSize;
- static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- static const int kTypeAndReadOnlyMask =
- PropertyDetails::TypeField::kMask |
- PropertyDetails::AttributesField::encode(READ_ONLY);
- __ Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset));
- __ Tst(scratch1, kTypeAndReadOnlyMask);
- __ B(ne, miss);
-
- // Store the value at the masked, scaled index and return.
- static const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ Add(scratch2, scratch2, kValueOffset - kHeapObjectTag);
- __ Str(value, MemOperand(scratch2));
-
- // Update the write barrier. Make sure not to clobber the value.
- __ Mov(scratch1, value);
- __ RecordWrite(
- elements, scratch2, scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs);
-}
-
-
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object and return the map of the
-// receiver in 'map_scratch' if the receiver is not a SMI.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
- Register receiver,
- Register map_scratch,
- Register scratch,
- int interceptor_bit,
- Label* slow) {
- DCHECK(!AreAliased(map_scratch, scratch));
-
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, slow);
- // Get the map of the receiver.
- __ Ldr(map_scratch, FieldMemOperand(receiver, HeapObject::kMapOffset));
- // Check bit field.
- __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kBitFieldOffset));
- __ Tbnz(scratch, Map::kIsAccessCheckNeeded, slow);
- __ Tbnz(scratch, interceptor_bit, slow);
-
- // Check that the object is some kind of JS object EXCEPT JS Value type.
- // In the case that the object is a value-wrapper object, we enter the
- // runtime system to make sure that indexing into string objects work
- // as intended.
- STATIC_ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
- __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset));
- __ Cmp(scratch, JS_OBJECT_TYPE);
- __ B(lt, slow);
-}
-
-
-// Loads an indexed element from a fast case array.
-// If not_fast_array is NULL, doesn't perform the elements map check.
-//
-// receiver - holds the receiver on entry.
-// Unchanged unless 'result' is the same register.
-//
-// key - holds the smi key on entry.
-// Unchanged unless 'result' is the same register.
-//
-// elements - holds the elements of the receiver on exit.
-//
-// elements_map - holds the elements map on exit if the not_fast_array branch is
-// taken. Otherwise, this is used as a scratch register.
-//
-// result - holds the result on exit if the load succeeded.
-// Allowed to be the the same as 'receiver' or 'key'.
-// Unchanged on bailout so 'receiver' and 'key' can be safely
-// used by further computation.
-static void GenerateFastArrayLoad(MacroAssembler* masm,
- Register receiver,
- Register key,
- Register elements,
- Register elements_map,
- Register scratch2,
- Register result,
- Label* not_fast_array,
- Label* slow) {
- DCHECK(!AreAliased(receiver, key, elements, elements_map, scratch2));
-
- // Check for fast array.
- __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- if (not_fast_array != NULL) {
- // Check that the object is in fast mode and writable.
- __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ JumpIfNotRoot(elements_map, Heap::kFixedArrayMapRootIndex,
- not_fast_array);
- } else {
- __ AssertFastElements(elements);
- }
-
- // The elements_map register is only used for the not_fast_array path, which
- // was handled above. From this point onward it is a scratch register.
- Register scratch1 = elements_map;
-
- // Check that the key (index) is within bounds.
- __ Ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ Cmp(key, scratch1);
- __ B(hs, slow);
-
- // Fast case: Do the load.
- __ Add(scratch1, elements, FixedArray::kHeaderSize - kHeapObjectTag);
- __ SmiUntag(scratch2, key);
- __ Ldr(scratch2, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
-
- // In case the loaded value is the_hole we have to consult GetProperty
- // to ensure the prototype chain is searched.
- __ JumpIfRoot(scratch2, Heap::kTheHoleValueRootIndex, slow);
-
- // Move the value to the result register.
- // 'result' can alias with 'receiver' or 'key' but these two must be
- // preserved if we jump to 'slow'.
- __ Mov(result, scratch2);
-}
-
-
-// Checks whether a key is an array index string or a unique name.
-// Falls through if a key is a unique name.
-// The map of the key is returned in 'map_scratch'.
-// If the jump to 'index_string' is done the hash of the key is left
-// in 'hash_scratch'.
-static void GenerateKeyNameCheck(MacroAssembler* masm,
- Register key,
- Register map_scratch,
- Register hash_scratch,
- Label* index_string,
- Label* not_unique) {
- DCHECK(!AreAliased(key, map_scratch, hash_scratch));
-
- // Is the key a name?
- Label unique;
- __ JumpIfObjectType(key, map_scratch, hash_scratch, LAST_UNIQUE_NAME_TYPE,
- not_unique, hi);
- STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
- __ B(eq, &unique);
-
- // Is the string an array index with cached numeric value?
- __ Ldr(hash_scratch.W(), FieldMemOperand(key, Name::kHashFieldOffset));
- __ TestAndBranchIfAllClear(hash_scratch,
- Name::kContainsCachedArrayIndexMask,
- index_string);
-
- // Is the string internalized? We know it's a string, so a single bit test is
- // enough.
- __ Ldrb(hash_scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kInternalizedTag == 0);
- __ TestAndBranchIfAnySet(hash_scratch, kIsNotInternalizedMask, not_unique);
-
- __ Bind(&unique);
- // Fall through if the key is a unique name.
-}
-
-
-// Neither 'object' nor 'key' are modified by this function.
-//
-// If the 'unmapped_case' or 'slow_case' exit is taken, the 'map' register is
-// left with the object's elements map. Otherwise, it is used as a scratch
-// register.
-static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
- Register object,
- Register key,
- Register map,
- Register scratch1,
- Register scratch2,
- Label* unmapped_case,
- Label* slow_case) {
- DCHECK(!AreAliased(object, key, map, scratch1, scratch2));
-
- Heap* heap = masm->isolate()->heap();
-
- // Check that the receiver is a JSObject. Because of the elements
- // map check later, we do not need to check for interceptors or
- // whether it requires access checks.
- __ JumpIfSmi(object, slow_case);
- // Check that the object is some kind of JSObject.
- __ JumpIfObjectType(object, map, scratch1, FIRST_JS_RECEIVER_TYPE,
- slow_case, lt);
-
- // Check that the key is a positive smi.
- __ JumpIfNotSmi(key, slow_case);
- __ Tbnz(key, kXSignBit, slow_case);
-
- // Load the elements object and check its map.
- Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
- __ Ldr(map, FieldMemOperand(object, JSObject::kElementsOffset));
- __ CheckMap(map, scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
-
- // Check if element is in the range of mapped arguments. If not, jump
- // to the unmapped lookup.
- __ Ldr(scratch1, FieldMemOperand(map, FixedArray::kLengthOffset));
- __ Sub(scratch1, scratch1, Smi::FromInt(2));
- __ Cmp(key, scratch1);
- __ B(hs, unmapped_case);
-
- // Load element index and check whether it is the hole.
- static const int offset =
- FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
-
- __ Add(scratch1, map, offset);
- __ SmiUntag(scratch2, key);
- __ Ldr(scratch1, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
- __ JumpIfRoot(scratch1, Heap::kTheHoleValueRootIndex, unmapped_case);
-
- // Load value from context and return it.
- __ Ldr(scratch2, FieldMemOperand(map, FixedArray::kHeaderSize));
- __ SmiUntag(scratch1);
- __ Lsl(scratch1, scratch1, kPointerSizeLog2);
- __ Add(scratch1, scratch1, Context::kHeaderSize - kHeapObjectTag);
- // The base of the result (scratch2) is passed to RecordWrite in
- // KeyedStoreIC::GenerateSloppyArguments and it must be a HeapObject.
- return MemOperand(scratch2, scratch1);
-}
-
-
-// The 'parameter_map' register must be loaded with the parameter map of the
-// arguments object and is overwritten.
-static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
- Register key,
- Register parameter_map,
- Register scratch,
- Label* slow_case) {
- DCHECK(!AreAliased(key, parameter_map, scratch));
-
- // Element is in arguments backing store, which is referenced by the
- // second element of the parameter_map.
- const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
- Register backing_store = parameter_map;
- __ Ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
- Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
- __ CheckMap(
- backing_store, scratch, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
- __ Ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
- __ Cmp(key, scratch);
- __ B(hs, slow_case);
-
- __ Add(backing_store,
- backing_store,
- FixedArray::kHeaderSize - kHeapObjectTag);
- __ SmiUntag(scratch, key);
- return MemOperand(backing_store, scratch, LSL, kPointerSizeLog2);
-}
-
-
-void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
- // The return address is in lr.
- Register receiver = ReceiverRegister();
- Register name = NameRegister();
- DCHECK(receiver.is(x1));
- DCHECK(name.is(x2));
-
- // Probe the stub cache.
- Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::LOAD_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, receiver, name, x3, x4, x5, x6);
-
- // Cache miss: Jump to runtime.
- GenerateMiss(masm);
-}
-
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
- Register dictionary = x0;
- DCHECK(!dictionary.is(ReceiverRegister()));
- DCHECK(!dictionary.is(NameRegister()));
- Label slow;
-
- __ Ldr(dictionary,
- FieldMemOperand(ReceiverRegister(), JSObject::kPropertiesOffset));
- GenerateDictionaryLoad(masm, &slow, dictionary, NameRegister(), x0, x3, x4);
- __ Ret();
-
- // Dictionary load failed, go slow (but don't miss).
- __ Bind(&slow);
- GenerateRuntimeGetProperty(masm);
-}
-
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
- // The return address is in lr.
- Isolate* isolate = masm->isolate();
- ASM_LOCATION("LoadIC::GenerateMiss");
-
- __ IncrementCounter(isolate->counters()->load_miss(), 1, x3, x4);
-
- // Perform tail call to the entry.
- __ Push(ReceiverRegister(), NameRegister());
- ExternalReference ref =
- ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
- __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
- // The return address is in lr.
- __ Push(ReceiverRegister(), NameRegister());
- __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
-}
-
-
-void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
- // The return address is in lr.
- Register result = x0;
- Register receiver = ReceiverRegister();
- Register key = NameRegister();
- DCHECK(receiver.is(x1));
- DCHECK(key.is(x2));
-
- Label miss, unmapped;
-
- Register map_scratch = x0;
- MemOperand mapped_location = GenerateMappedArgumentsLookup(
- masm, receiver, key, map_scratch, x3, x4, &unmapped, &miss);
- __ Ldr(result, mapped_location);
- __ Ret();
-
- __ Bind(&unmapped);
- // Parameter map is left in map_scratch when a jump on unmapped is done.
- MemOperand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, key, map_scratch, x3, &miss);
- __ Ldr(result, unmapped_location);
- __ JumpIfRoot(result, Heap::kTheHoleValueRootIndex, &miss);
- __ Ret();
-
- __ Bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
- ASM_LOCATION("KeyedStoreIC::GenerateSloppyArguments");
- Label slow, notin;
- Register value = ValueRegister();
- Register key = NameRegister();
- Register receiver = ReceiverRegister();
- DCHECK(receiver.is(x1));
- DCHECK(key.is(x2));
- DCHECK(value.is(x0));
-
- Register map = x3;
-
- // These registers are used by GenerateMappedArgumentsLookup to build a
- // MemOperand. They are live for as long as the MemOperand is live.
- Register mapped1 = x4;
- Register mapped2 = x5;
-
- MemOperand mapped =
- GenerateMappedArgumentsLookup(masm, receiver, key, map,
- mapped1, mapped2,
- ¬in, &slow);
- Operand mapped_offset = mapped.OffsetAsOperand();
- __ Str(value, mapped);
- __ Add(x10, mapped.base(), mapped_offset);
- __ Mov(x11, value);
- __ RecordWrite(mapped.base(), x10, x11, kLRHasNotBeenSaved, kDontSaveFPRegs);
- __ Ret();
-
- __ Bind(¬in);
-
- // These registers are used by GenerateMappedArgumentsLookup to build a
- // MemOperand. They are live for as long as the MemOperand is live.
- Register unmapped1 = map; // This is assumed to alias 'map'.
- Register unmapped2 = x4;
- MemOperand unmapped =
- GenerateUnmappedArgumentsLookup(masm, key, unmapped1, unmapped2, &slow);
- Operand unmapped_offset = unmapped.OffsetAsOperand();
- __ Str(value, unmapped);
- __ Add(x10, unmapped.base(), unmapped_offset);
- __ Mov(x11, value);
- __ RecordWrite(unmapped.base(), x10, x11,
- kLRHasNotBeenSaved, kDontSaveFPRegs);
- __ Ret();
- __ Bind(&slow);
- GenerateMiss(masm);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
- // The return address is in lr.
- Isolate* isolate = masm->isolate();
-
- __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, x10, x11);
-
- __ Push(ReceiverRegister(), NameRegister());
-
- // Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
-
- __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-// IC register specifications
-const Register LoadIC::ReceiverRegister() { return x1; }
-const Register LoadIC::NameRegister() { return x2; }
-
-const Register LoadIC::SlotRegister() {
- DCHECK(FLAG_vector_ics);
- return x0;
-}
-
-
-const Register LoadIC::VectorRegister() {
- DCHECK(FLAG_vector_ics);
- return x3;
-}
-
-
-const Register StoreIC::ReceiverRegister() { return x1; }
-const Register StoreIC::NameRegister() { return x2; }
-const Register StoreIC::ValueRegister() { return x0; }
-
-
-const Register KeyedStoreIC::MapRegister() {
- return x3;
-}
-
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
- // The return address is in lr.
- __ Push(ReceiverRegister(), NameRegister());
- __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
-}
-
-
-static void GenerateKeyedLoadWithSmiKey(MacroAssembler* masm,
- Register key,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- Label *slow) {
- DCHECK(!AreAliased(
- key, receiver, scratch1, scratch2, scratch3, scratch4, scratch5));
-
- Isolate* isolate = masm->isolate();
- Label check_number_dictionary;
- // If we can load the value, it should be returned in x0.
- Register result = x0;
-
- GenerateKeyedLoadReceiverCheck(
- masm, receiver, scratch1, scratch2, Map::kHasIndexedInterceptor, slow);
-
- // Check the receiver's map to see if it has fast elements.
- __ CheckFastElements(scratch1, scratch2, &check_number_dictionary);
-
- GenerateFastArrayLoad(
- masm, receiver, key, scratch3, scratch2, scratch1, result, NULL, slow);
- __ IncrementCounter(
- isolate->counters()->keyed_load_generic_smi(), 1, scratch1, scratch2);
- __ Ret();
-
- __ Bind(&check_number_dictionary);
- __ Ldr(scratch3, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ Ldr(scratch2, FieldMemOperand(scratch3, JSObject::kMapOffset));
-
- // Check whether we have a number dictionary.
- __ JumpIfNotRoot(scratch2, Heap::kHashTableMapRootIndex, slow);
-
- __ LoadFromNumberDictionary(
- slow, scratch3, key, result, scratch1, scratch2, scratch4, scratch5);
- __ Ret();
-}
-
-static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm,
- Register key,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- Label *slow) {
- DCHECK(!AreAliased(
- key, receiver, scratch1, scratch2, scratch3, scratch4, scratch5));
-
- Isolate* isolate = masm->isolate();
- Label probe_dictionary, property_array_property;
- // If we can load the value, it should be returned in x0.
- Register result = x0;
-
- GenerateKeyedLoadReceiverCheck(
- masm, receiver, scratch1, scratch2, Map::kHasNamedInterceptor, slow);
-
- // If the receiver is a fast-case object, check the keyed lookup cache.
- // Otherwise probe the dictionary.
- __ Ldr(scratch2, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- __ Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset));
- __ JumpIfRoot(scratch3, Heap::kHashTableMapRootIndex, &probe_dictionary);
-
- // We keep the map of the receiver in scratch1.
- Register receiver_map = scratch1;
-
- // Load the map of the receiver, compute the keyed lookup cache hash
- // based on 32 bits of the map pointer and the name hash.
- __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ Mov(scratch2, Operand(receiver_map, ASR, KeyedLookupCache::kMapHashShift));
- __ Ldr(scratch3.W(), FieldMemOperand(key, Name::kHashFieldOffset));
- __ Eor(scratch2, scratch2, Operand(scratch3, ASR, Name::kHashShift));
- int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
- __ And(scratch2, scratch2, mask);
-
- // Load the key (consisting of map and unique name) from the cache and
- // check for match.
- Label load_in_object_property;
- static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
- Label hit_on_nth_entry[kEntriesPerBucket];
- ExternalReference cache_keys =
- ExternalReference::keyed_lookup_cache_keys(isolate);
-
- __ Mov(scratch3, cache_keys);
- __ Add(scratch3, scratch3, Operand(scratch2, LSL, kPointerSizeLog2 + 1));
-
- for (int i = 0; i < kEntriesPerBucket - 1; i++) {
- Label try_next_entry;
- // Load map and make scratch3 pointing to the next entry.
- __ Ldr(scratch4, MemOperand(scratch3, kPointerSize * 2, PostIndex));
- __ Cmp(receiver_map, scratch4);
- __ B(ne, &try_next_entry);
- __ Ldr(scratch4, MemOperand(scratch3, -kPointerSize)); // Load name
- __ Cmp(key, scratch4);
- __ B(eq, &hit_on_nth_entry[i]);
- __ Bind(&try_next_entry);
- }
-
- // Last entry.
- __ Ldr(scratch4, MemOperand(scratch3, kPointerSize, PostIndex));
- __ Cmp(receiver_map, scratch4);
- __ B(ne, slow);
- __ Ldr(scratch4, MemOperand(scratch3));
- __ Cmp(key, scratch4);
- __ B(ne, slow);
-
- // Get field offset.
- ExternalReference cache_field_offsets =
- ExternalReference::keyed_lookup_cache_field_offsets(isolate);
-
- // Hit on nth entry.
- for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
- __ Bind(&hit_on_nth_entry[i]);
- __ Mov(scratch3, cache_field_offsets);
- if (i != 0) {
- __ Add(scratch2, scratch2, i);
- }
- __ Ldr(scratch4.W(), MemOperand(scratch3, scratch2, LSL, 2));
- __ Ldrb(scratch5,
- FieldMemOperand(receiver_map, Map::kInObjectPropertiesOffset));
- __ Subs(scratch4, scratch4, scratch5);
- __ B(ge, &property_array_property);
- if (i != 0) {
- __ B(&load_in_object_property);
- }
- }
-
- // Load in-object property.
- __ Bind(&load_in_object_property);
- __ Ldrb(scratch5, FieldMemOperand(receiver_map, Map::kInstanceSizeOffset));
- __ Add(scratch5, scratch5, scratch4); // Index from start of object.
- __ Sub(receiver, receiver, kHeapObjectTag); // Remove the heap tag.
- __ Ldr(result, MemOperand(receiver, scratch5, LSL, kPointerSizeLog2));
- __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
- 1, scratch1, scratch2);
- __ Ret();
-
- // Load property array property.
- __ Bind(&property_array_property);
- __ Ldr(scratch1, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- __ Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag);
- __ Ldr(result, MemOperand(scratch1, scratch4, LSL, kPointerSizeLog2));
- __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
- 1, scratch1, scratch2);
- __ Ret();
-
- // Do a quick inline probe of the receiver's dictionary, if it exists.
- __ Bind(&probe_dictionary);
- __ Ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- GenerateGlobalInstanceTypeCheck(masm, scratch1, slow);
- // Load the property.
- GenerateDictionaryLoad(masm, slow, scratch2, key, result, scratch1, scratch3);
- __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(),
- 1, scratch1, scratch2);
- __ Ret();
-}
-
-
-void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
- // The return address is in lr.
- Label slow, check_name, index_smi, index_name;
-
- Register key = NameRegister();
- Register receiver = ReceiverRegister();
- DCHECK(key.is(x2));
- DCHECK(receiver.is(x1));
-
- __ JumpIfNotSmi(key, &check_name);
- __ Bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from below
- // where a numeric string is converted to a smi.
- GenerateKeyedLoadWithSmiKey(masm, key, receiver, x7, x3, x4, x5, x6, &slow);
-
- // Slow case.
- __ Bind(&slow);
- __ IncrementCounter(
- masm->isolate()->counters()->keyed_load_generic_slow(), 1, x4, x3);
- GenerateRuntimeGetProperty(masm);
-
- __ Bind(&check_name);
- GenerateKeyNameCheck(masm, key, x0, x3, &index_name, &slow);
-
- GenerateKeyedLoadWithNameKey(masm, key, receiver, x7, x3, x4, x5, x6, &slow);
-
- __ Bind(&index_name);
- __ IndexFromHash(x3, key);
- // Now jump to the place where smi keys are handled.
- __ B(&index_smi);
-}
-
-
-void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
- // Return address is in lr.
- Label miss;
-
- Register receiver = ReceiverRegister();
- Register index = NameRegister();
- Register result = x0;
- Register scratch = x3;
- DCHECK(!scratch.is(receiver) && !scratch.is(index));
-
- StringCharAtGenerator char_at_generator(receiver,
- index,
- scratch,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- &miss, // When index out of range.
- STRING_INDEX_IS_ARRAY_INDEX);
- char_at_generator.GenerateFast(masm);
- __ Ret();
-
- StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm, call_helper);
-
- __ Bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
- // Return address is in lr.
- Label slow;
-
- Register receiver = ReceiverRegister();
- Register key = NameRegister();
- Register scratch1 = x3;
- Register scratch2 = x4;
- DCHECK(!AreAliased(scratch1, scratch2, receiver, key));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &slow);
-
- // Check that the key is an array index, that is Uint32.
- __ TestAndBranchIfAnySet(key, kSmiTagMask | kSmiSignMask, &slow);
-
- // Get the map of the receiver.
- Register map = scratch1;
- __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-
- // Check that it has indexed interceptor and access checks
- // are not enabled for this object.
- __ Ldrb(scratch2, FieldMemOperand(map, Map::kBitFieldOffset));
- DCHECK(kSlowCaseBitFieldMask ==
- ((1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor)));
- __ Tbnz(scratch2, Map::kIsAccessCheckNeeded, &slow);
- __ Tbz(scratch2, Map::kHasIndexedInterceptor, &slow);
-
- // Everything is fine, call runtime.
- __ Push(receiver, key);
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(kLoadElementWithInterceptor),
- masm->isolate()),
- 2, 1);
-
- __ Bind(&slow);
- GenerateMiss(masm);
-}
-
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
- ASM_LOCATION("KeyedStoreIC::GenerateMiss");
-
- // Push receiver, key and value for runtime call.
- __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
-
- ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
- ASM_LOCATION("KeyedStoreIC::GenerateSlow");
-
- // Push receiver, key and value for runtime call.
- __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
-
- // The slow case calls into the runtime to complete the store without causing
- // an IC miss that would otherwise cause a transition to the generic stub.
- ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictMode strict_mode) {
- ASM_LOCATION("KeyedStoreIC::GenerateRuntimeSetProperty");
-
- // Push receiver, key and value for runtime call.
- __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
-
- // Push strict_mode for runtime call.
- __ Mov(x10, Smi::FromInt(strict_mode));
- __ Push(x10);
-
- __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
-}
-
-
-static void KeyedStoreGenerateGenericHelper(
- MacroAssembler* masm,
- Label* fast_object,
- Label* fast_double,
- Label* slow,
- KeyedStoreCheckMap check_map,
- KeyedStoreIncrementLength increment_length,
- Register value,
- Register key,
- Register receiver,
- Register receiver_map,
- Register elements_map,
- Register elements) {
- DCHECK(!AreAliased(
- value, key, receiver, receiver_map, elements_map, elements, x10, x11));
-
- Label transition_smi_elements;
- Label transition_double_elements;
- Label fast_double_without_map_check;
- Label non_double_value;
- Label finish_store;
-
- __ Bind(fast_object);
- if (check_map == kCheckMap) {
- __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ Cmp(elements_map,
- Operand(masm->isolate()->factory()->fixed_array_map()));
- __ B(ne, fast_double);
- }
-
- // HOLECHECK: guards "A[i] = V"
- // We have to go to the runtime if the current value is the hole because there
- // may be a callback on the element.
- Label holecheck_passed;
- __ Add(x10, elements, FixedArray::kHeaderSize - kHeapObjectTag);
- __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
- __ Ldr(x11, MemOperand(x10));
- __ JumpIfNotRoot(x11, Heap::kTheHoleValueRootIndex, &holecheck_passed);
- __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
- __ bind(&holecheck_passed);
-
- // Smi stores don't require further checks.
- __ JumpIfSmi(value, &finish_store);
-
- // Escape to elements kind transition case.
- __ CheckFastObjectElements(receiver_map, x10, &transition_smi_elements);
-
- __ Bind(&finish_store);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ Add(x10, key, Smi::FromInt(1));
- __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
- }
-
- Register address = x11;
- __ Add(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
- __ Add(address, address, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
- __ Str(value, MemOperand(address));
-
- Label dont_record_write;
- __ JumpIfSmi(value, &dont_record_write);
-
- // Update write barrier for the elements array address.
- __ Mov(x10, value); // Preserve the value which is returned.
- __ RecordWrite(elements,
- address,
- x10,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-
- __ Bind(&dont_record_write);
- __ Ret();
-
-
- __ Bind(fast_double);
- if (check_map == kCheckMap) {
- // Check for fast double array case. If this fails, call through to the
- // runtime.
- __ JumpIfNotRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex, slow);
- }
-
- // HOLECHECK: guards "A[i] double hole?"
- // We have to see if the double version of the hole is present. If so go to
- // the runtime.
- __ Add(x10, elements, FixedDoubleArray::kHeaderSize - kHeapObjectTag);
- __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
- __ Ldr(x11, MemOperand(x10));
- __ CompareAndBranch(x11, kHoleNanInt64, ne, &fast_double_without_map_check);
- __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
-
- __ Bind(&fast_double_without_map_check);
- __ StoreNumberToDoubleElements(value,
- key,
- elements,
- x10,
- d0,
- &transition_double_elements);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ Add(x10, key, Smi::FromInt(1));
- __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
- }
- __ Ret();
-
-
- __ Bind(&transition_smi_elements);
- // Transition the array appropriately depending on the value type.
- __ Ldr(x10, FieldMemOperand(value, HeapObject::kMapOffset));
- __ JumpIfNotRoot(x10, Heap::kHeapNumberMapRootIndex, &non_double_value);
-
- // Value is a double. Transition FAST_SMI_ELEMENTS ->
- // FAST_DOUBLE_ELEMENTS and complete the store.
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS,
- receiver_map,
- x10,
- x11,
- slow);
- AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS);
- ElementsTransitionGenerator::GenerateSmiToDouble(
- masm, receiver, key, value, receiver_map, mode, slow);
- __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ B(&fast_double_without_map_check);
-
- __ Bind(&non_double_value);
- // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS.
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- receiver_map,
- x10,
- x11,
- slow);
-
- mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
- masm, receiver, key, value, receiver_map, mode, slow);
-
- __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ B(&finish_store);
-
- __ Bind(&transition_double_elements);
- // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
- // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
- // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
- FAST_ELEMENTS,
- receiver_map,
- x10,
- x11,
- slow);
- mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateDoubleToObject(
- masm, receiver, key, value, receiver_map, mode, slow);
- __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ B(&finish_store);
-}
-
-
-void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
- StrictMode strict_mode) {
- ASM_LOCATION("KeyedStoreIC::GenerateGeneric");
- Label slow;
- Label array;
- Label fast_object;
- Label extra;
- Label fast_object_grow;
- Label fast_double_grow;
- Label fast_double;
-
- Register value = ValueRegister();
- Register key = NameRegister();
- Register receiver = ReceiverRegister();
- DCHECK(receiver.is(x1));
- DCHECK(key.is(x2));
- DCHECK(value.is(x0));
-
- Register receiver_map = x3;
- Register elements = x4;
- Register elements_map = x5;
-
- __ JumpIfNotSmi(key, &slow);
- __ JumpIfSmi(receiver, &slow);
- __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-
- // Check that the receiver does not require access checks and is not observed.
- // The generic stub does not perform map checks or handle observed objects.
- __ Ldrb(x10, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
- __ TestAndBranchIfAnySet(
- x10, (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kIsObserved), &slow);
-
- // Check if the object is a JS array or not.
- Register instance_type = x10;
- __ CompareInstanceType(receiver_map, instance_type, JS_ARRAY_TYPE);
- __ B(eq, &array);
- // Check that the object is some kind of JSObject.
- __ Cmp(instance_type, FIRST_JS_OBJECT_TYPE);
- __ B(lt, &slow);
-
- // Object case: Check key against length in the elements array.
- __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- // Check array bounds. Both the key and the length of FixedArray are smis.
- __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset));
- __ Cmp(x10, Operand::UntagSmi(key));
- __ B(hi, &fast_object);
-
-
- __ Bind(&slow);
- // Slow case, handle jump to runtime.
- // Live values:
- // x0: value
- // x1: key
- // x2: receiver
- GenerateRuntimeSetProperty(masm, strict_mode);
-
-
- __ Bind(&extra);
- // Extra capacity case: Check if there is extra capacity to
- // perform the store and update the length. Used for adding one
- // element to the array by writing to array[array.length].
-
- // Check for room in the elements backing store.
- // Both the key and the length of FixedArray are smis.
- __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset));
- __ Cmp(x10, Operand::UntagSmi(key));
- __ B(ls, &slow);
-
- __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ Cmp(elements_map, Operand(masm->isolate()->factory()->fixed_array_map()));
- __ B(eq, &fast_object_grow);
- __ Cmp(elements_map,
- Operand(masm->isolate()->factory()->fixed_double_array_map()));
- __ B(eq, &fast_double_grow);
- __ B(&slow);
-
-
- __ Bind(&array);
- // Array case: Get the length and the elements array from the JS
- // array. Check that the array is in fast mode (and writable); if it
- // is the length is always a smi.
-
- __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-
- // Check the key against the length in the array.
- __ Ldrsw(x10, UntagSmiFieldMemOperand(receiver, JSArray::kLengthOffset));
- __ Cmp(x10, Operand::UntagSmi(key));
- __ B(eq, &extra); // We can handle the case where we are appending 1 element.
- __ B(lo, &slow);
-
- KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
- &slow, kCheckMap, kDontIncrementLength,
- value, key, receiver, receiver_map,
- elements_map, elements);
- KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
- &slow, kDontCheckMap, kIncrementLength,
- value, key, receiver, receiver_map,
- elements_map, elements);
-}
-
-
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
- Register receiver = ReceiverRegister();
- Register name = NameRegister();
- DCHECK(!AreAliased(receiver, name, ValueRegister(), x3, x4, x5, x6));
-
- // Probe the stub cache.
- Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, receiver, name, x3, x4, x5, x6);
-
- // Cache miss: Jump to runtime.
- GenerateMiss(masm);
-}
-
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
- __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
-
- // Tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
- Label miss;
- Register value = ValueRegister();
- Register receiver = ReceiverRegister();
- Register name = NameRegister();
- Register dictionary = x3;
- DCHECK(!AreAliased(value, receiver, name, x3, x4, x5));
-
- __ Ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-
- GenerateDictionaryStore(masm, &miss, dictionary, name, value, x4, x5);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->store_normal_hit(), 1, x4, x5);
- __ Ret();
-
- // Cache miss: Jump to runtime.
- __ Bind(&miss);
- __ IncrementCounter(counters->store_normal_miss(), 1, x4, x5);
- GenerateMiss(masm);
-}
-
-
-void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictMode strict_mode) {
- ASM_LOCATION("StoreIC::GenerateRuntimeSetProperty");
-
- __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
-
- __ Mov(x10, Smi::FromInt(strict_mode));
- __ Push(x10);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
-}
-
-
-void StoreIC::GenerateSlow(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- x0 : value
- // -- x1 : receiver
- // -- x2 : name
- // -- lr : return address
- // -----------------------------------
-
- // Push receiver, name and value for runtime call.
- __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
-
- // The slow case calls into the runtime to complete the store without causing
- // an IC miss that would otherwise cause a transition to the generic stub.
- ExternalReference ref =
- ExternalReference(IC_Utility(kStoreIC_Slow), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-Condition CompareIC::ComputeCondition(Token::Value op) {
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- return eq;
- case Token::LT:
- return lt;
- case Token::GT:
- return gt;
- case Token::LTE:
- return le;
- case Token::GTE:
- return ge;
- default:
- UNREACHABLE();
- return al;
- }
-}
-
-
-bool CompareIC::HasInlinedSmiCode(Address address) {
- // The address of the instruction following the call.
- Address info_address =
- Assembler::return_address_from_call_start(address);
-
- InstructionSequence* patch_info = InstructionSequence::At(info_address);
- return patch_info->IsInlineData();
-}
-
-
-// Activate a SMI fast-path by patching the instructions generated by
-// JumpPatchSite::EmitJumpIf(Not)Smi(), using the information encoded by
-// JumpPatchSite::EmitPatchInfo().
-void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
- // The patch information is encoded in the instruction stream using
- // instructions which have no side effects, so we can safely execute them.
- // The patch information is encoded directly after the call to the helper
- // function which is requesting this patch operation.
- Address info_address =
- Assembler::return_address_from_call_start(address);
- InlineSmiCheckInfo info(info_address);
-
- // Check and decode the patch information instruction.
- if (!info.HasSmiCheck()) {
- return;
- }
-
- if (FLAG_trace_ic) {
- PrintF("[ Patching ic at %p, marker=%p, SMI check=%p\n",
- address, info_address, reinterpret_cast<void*>(info.SmiCheck()));
- }
-
- // Patch and activate code generated by JumpPatchSite::EmitJumpIfNotSmi()
- // and JumpPatchSite::EmitJumpIfSmi().
- // Changing
- // tb(n)z xzr, #0, <target>
- // to
- // tb(!n)z test_reg, #0, <target>
- Instruction* to_patch = info.SmiCheck();
- PatchingAssembler patcher(to_patch, 1);
- DCHECK(to_patch->IsTestBranch());
- DCHECK(to_patch->ImmTestBranchBit5() == 0);
- DCHECK(to_patch->ImmTestBranchBit40() == 0);
-
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagMask == 1);
-
- int branch_imm = to_patch->ImmTestBranch();
- Register smi_reg;
- if (check == ENABLE_INLINED_SMI_CHECK) {
- DCHECK(to_patch->Rt() == xzr.code());
- smi_reg = info.SmiRegister();
- } else {
- DCHECK(check == DISABLE_INLINED_SMI_CHECK);
- DCHECK(to_patch->Rt() != xzr.code());
- smi_reg = xzr;
- }
-
- if (to_patch->Mask(TestBranchMask) == TBZ) {
- // This is JumpIfNotSmi(smi_reg, branch_imm).
- patcher.tbnz(smi_reg, 0, branch_imm);
- } else {
- DCHECK(to_patch->Mask(TestBranchMask) == TBNZ);
- // This is JumpIfSmi(smi_reg, branch_imm).
- patcher.tbz(smi_reg, 0, branch_imm);
- }
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM64
#include "src/arm64/lithium-gap-resolver-arm64.h"
#include "src/code-stubs.h"
#include "src/hydrogen-osr.h"
-#include "src/stub-cache.h"
namespace v8 {
namespace internal {
+++ /dev/null
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#if V8_TARGET_ARCH_ARM64
-
-#include "src/codegen.h"
-#include "src/ic-inl.h"
-#include "src/stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-
-#define __ ACCESS_MASM(masm)
-
-
-void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
- MacroAssembler* masm, Label* miss_label, Register receiver,
- Handle<Name> name, Register scratch0, Register scratch1) {
- DCHECK(!AreAliased(receiver, scratch0, scratch1));
- DCHECK(name->IsUniqueName());
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
- __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-
- Label done;
-
- const int kInterceptorOrAccessCheckNeededMask =
- (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
-
- // Bail out if the receiver has a named interceptor or requires access checks.
- Register map = scratch1;
- __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ Ldrb(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
- __ Tst(scratch0, kInterceptorOrAccessCheckNeededMask);
- __ B(ne, miss_label);
-
- // Check that receiver is a JSObject.
- __ Ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ Cmp(scratch0, FIRST_SPEC_OBJECT_TYPE);
- __ B(lt, miss_label);
-
- // Load properties array.
- Register properties = scratch0;
- __ Ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- // Check that the properties array is a dictionary.
- __ Ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset));
- __ JumpIfNotRoot(map, Heap::kHashTableMapRootIndex, miss_label);
-
- NameDictionaryLookupStub::GenerateNegativeLookup(masm,
- miss_label,
- &done,
- receiver,
- properties,
- name,
- scratch1);
- __ Bind(&done);
- __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-}
-
-
-// Probe primary or secondary table.
-// If the entry is found in the cache, the generated code jump to the first
-// instruction of the stub in the cache.
-// If there is a miss the code fall trough.
-//
-// 'receiver', 'name' and 'offset' registers are preserved on miss.
-static void ProbeTable(Isolate* isolate,
- MacroAssembler* masm,
- Code::Flags flags,
- StubCache::Table table,
- Register receiver,
- Register name,
- Register offset,
- Register scratch,
- Register scratch2,
- Register scratch3) {
- // Some code below relies on the fact that the Entry struct contains
- // 3 pointers (name, code, map).
- STATIC_ASSERT(sizeof(StubCache::Entry) == (3 * kPointerSize));
-
- ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
- ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
- ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
-
- uintptr_t key_off_addr = reinterpret_cast<uintptr_t>(key_offset.address());
- uintptr_t value_off_addr =
- reinterpret_cast<uintptr_t>(value_offset.address());
- uintptr_t map_off_addr = reinterpret_cast<uintptr_t>(map_offset.address());
-
- Label miss;
-
- DCHECK(!AreAliased(name, offset, scratch, scratch2, scratch3));
-
- // Multiply by 3 because there are 3 fields per entry.
- __ Add(scratch3, offset, Operand(offset, LSL, 1));
-
- // Calculate the base address of the entry.
- __ Mov(scratch, key_offset);
- __ Add(scratch, scratch, Operand(scratch3, LSL, kPointerSizeLog2));
-
- // Check that the key in the entry matches the name.
- __ Ldr(scratch2, MemOperand(scratch));
- __ Cmp(name, scratch2);
- __ B(ne, &miss);
-
- // Check the map matches.
- __ Ldr(scratch2, MemOperand(scratch, map_off_addr - key_off_addr));
- __ Ldr(scratch3, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ Cmp(scratch2, scratch3);
- __ B(ne, &miss);
-
- // Get the code entry from the cache.
- __ Ldr(scratch, MemOperand(scratch, value_off_addr - key_off_addr));
-
- // Check that the flags match what we're looking for.
- __ Ldr(scratch2.W(), FieldMemOperand(scratch, Code::kFlagsOffset));
- __ Bic(scratch2.W(), scratch2.W(), Code::kFlagsNotUsedInLookup);
- __ Cmp(scratch2.W(), flags);
- __ B(ne, &miss);
-
-#ifdef DEBUG
- if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
- __ B(&miss);
- } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
- __ B(&miss);
- }
-#endif
-
- // Jump to the first instruction in the code stub.
- __ Add(scratch, scratch, Code::kHeaderSize - kHeapObjectTag);
- __ Br(scratch);
-
- // Miss: fall through.
- __ Bind(&miss);
-}
-
-
-void StubCache::GenerateProbe(MacroAssembler* masm,
- Code::Flags flags,
- Register receiver,
- Register name,
- Register scratch,
- Register extra,
- Register extra2,
- Register extra3) {
- Isolate* isolate = masm->isolate();
- Label miss;
-
- // Make sure the flags does not name a specific type.
- DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
-
- // Make sure that there are no register conflicts.
- DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
-
- // Make sure extra and extra2 registers are valid.
- DCHECK(!extra.is(no_reg));
- DCHECK(!extra2.is(no_reg));
- DCHECK(!extra3.is(no_reg));
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1,
- extra2, extra3);
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Compute the hash for primary table.
- __ Ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
- __ Ldr(extra, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ Add(scratch, scratch, extra);
- __ Eor(scratch, scratch, flags);
- // We shift out the last two bits because they are not part of the hash.
- __ Ubfx(scratch, scratch, kCacheIndexShift,
- CountTrailingZeros(kPrimaryTableSize, 64));
-
- // Probe the primary table.
- ProbeTable(isolate, masm, flags, kPrimary, receiver, name,
- scratch, extra, extra2, extra3);
-
- // Primary miss: Compute hash for secondary table.
- __ Sub(scratch, scratch, Operand(name, LSR, kCacheIndexShift));
- __ Add(scratch, scratch, flags >> kCacheIndexShift);
- __ And(scratch, scratch, kSecondaryTableSize - 1);
-
- // Probe the secondary table.
- ProbeTable(isolate, masm, flags, kSecondary, receiver, name,
- scratch, extra, extra2, extra3);
-
- // Cache miss: Fall-through and let caller handle the miss by
- // entering the runtime system.
- __ Bind(&miss);
- __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1,
- extra2, extra3);
-}
-
-
-void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm, int index, Register prototype, Label* miss) {
- Isolate* isolate = masm->isolate();
- // Get the global function with the given index.
- Handle<JSFunction> function(
- JSFunction::cast(isolate->native_context()->get(index)));
-
- // Check we're still in the same context.
- Register scratch = prototype;
- __ Ldr(scratch, GlobalObjectMemOperand());
- __ Ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
- __ Ldr(scratch, ContextMemOperand(scratch, index));
- __ Cmp(scratch, Operand(function));
- __ B(ne, miss);
-
- // Load its initial map. The global functions all have initial maps.
- __ Mov(prototype, Operand(Handle<Map>(function->initial_map())));
- // Load the prototype from the initial map.
- __ Ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
-}
-
-
-void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
- MacroAssembler* masm, Register receiver, Register scratch1,
- Register scratch2, Label* miss_label) {
- __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
- // TryGetFunctionPrototype can't put the result directly in x0 because the
- // 3 inputs registers can't alias and we call this function from
- // LoadIC::GenerateFunctionPrototype, where receiver is x0. So we explicitly
- // move the result in x0.
- __ Mov(x0, scratch1);
- __ Ret();
-}
-
-
-// Generate code to check that a global property cell is empty. Create
-// the property cell at compilation time if no cell exists for the
-// property.
-void PropertyHandlerCompiler::GenerateCheckPropertyCell(
- MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
- Register scratch, Label* miss) {
- Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
- DCHECK(cell->value()->IsTheHole());
- __ Mov(scratch, Operand(cell));
- __ Ldr(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
- __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, miss);
-}
-
-
-static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
- Register holder, Register name,
- Handle<JSObject> holder_obj) {
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex == 1);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 2);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 3);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 4);
-
- __ Push(name);
- Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
- DCHECK(!masm->isolate()->heap()->InNewSpace(*interceptor));
- Register scratch = name;
- __ Mov(scratch, Operand(interceptor));
- __ Push(scratch, receiver, holder);
-}
-
-
-static void CompileCallLoadPropertyWithInterceptor(
- MacroAssembler* masm, Register receiver, Register holder, Register name,
- Handle<JSObject> holder_obj, IC::UtilityId id) {
- PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
-
- __ CallExternalReference(ExternalReference(IC_Utility(id), masm->isolate()),
- NamedLoadHandlerCompiler::kInterceptorArgsLength);
-}
-
-
-// Generate call to api function.
-void PropertyHandlerCompiler::GenerateFastApiCall(
- MacroAssembler* masm, const CallOptimization& optimization,
- Handle<Map> receiver_map, Register receiver, Register scratch,
- bool is_store, int argc, Register* values) {
- DCHECK(!AreAliased(receiver, scratch));
-
- MacroAssembler::PushPopQueue queue(masm);
- queue.Queue(receiver);
- // Write the arguments to the stack frame.
- for (int i = 0; i < argc; i++) {
- Register arg = values[argc - 1 - i];
- DCHECK(!AreAliased(receiver, scratch, arg));
- queue.Queue(arg);
- }
- queue.PushQueued();
-
- DCHECK(optimization.is_simple_api_call());
-
- // Abi for CallApiFunctionStub.
- Register callee = x0;
- Register call_data = x4;
- Register holder = x2;
- Register api_function_address = x1;
-
- // Put holder in place.
- CallOptimization::HolderLookup holder_lookup;
- Handle<JSObject> api_holder =
- optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
- switch (holder_lookup) {
- case CallOptimization::kHolderIsReceiver:
- __ Mov(holder, receiver);
- break;
- case CallOptimization::kHolderFound:
- __ LoadObject(holder, api_holder);
- break;
- case CallOptimization::kHolderNotFound:
- UNREACHABLE();
- break;
- }
-
- Isolate* isolate = masm->isolate();
- Handle<JSFunction> function = optimization.constant_function();
- Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- Handle<Object> call_data_obj(api_call_info->data(), isolate);
-
- // Put callee in place.
- __ LoadObject(callee, function);
-
- bool call_data_undefined = false;
- // Put call_data in place.
- if (isolate->heap()->InNewSpace(*call_data_obj)) {
- __ LoadObject(call_data, api_call_info);
- __ Ldr(call_data, FieldMemOperand(call_data, CallHandlerInfo::kDataOffset));
- } else if (call_data_obj->IsUndefined()) {
- call_data_undefined = true;
- __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
- } else {
- __ LoadObject(call_data, call_data_obj);
- }
-
- // Put api_function_address in place.
- Address function_address = v8::ToCData<Address>(api_call_info->callback());
- ApiFunction fun(function_address);
- ExternalReference ref = ExternalReference(
- &fun, ExternalReference::DIRECT_API_CALL, masm->isolate());
- __ Mov(api_function_address, ref);
-
- // Jump to stub.
- CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc);
- __ TailCallStub(&stub);
-}
-
-
-void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
- Handle<Code> code) {
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
- Handle<Name> name) {
- if (!label->is_unused()) {
- __ Bind(label);
- __ Mov(this->name(), Operand(name));
- }
-}
-
-
-// Generate StoreTransition code, value is passed in x0 register.
-// When leaving generated code after success, the receiver_reg and storage_reg
-// may be clobbered. Upon branch to miss_label, the receiver and name registers
-// have their original values.
-void NamedStoreHandlerCompiler::GenerateStoreTransition(
- Handle<Map> transition, Handle<Name> name, Register receiver_reg,
- Register storage_reg, Register value_reg, Register scratch1,
- Register scratch2, Register scratch3, Label* miss_label, Label* slow) {
- Label exit;
-
- DCHECK(!AreAliased(receiver_reg, storage_reg, value_reg,
- scratch1, scratch2, scratch3));
-
- // We don't need scratch3.
- scratch3 = NoReg;
-
- int descriptor = transition->LastAdded();
- DescriptorArray* descriptors = transition->instance_descriptors();
- PropertyDetails details = descriptors->GetDetails(descriptor);
- Representation representation = details.representation();
- DCHECK(!representation.IsNone());
-
- if (details.type() == CONSTANT) {
- Handle<Object> constant(descriptors->GetValue(descriptor), isolate());
- __ LoadObject(scratch1, constant);
- __ Cmp(value_reg, scratch1);
- __ B(ne, miss_label);
- } else if (representation.IsSmi()) {
- __ JumpIfNotSmi(value_reg, miss_label);
- } else if (representation.IsHeapObject()) {
- __ JumpIfSmi(value_reg, miss_label);
- HeapType* field_type = descriptors->GetFieldType(descriptor);
- HeapType::Iterator<Map> it = field_type->Classes();
- if (!it.Done()) {
- __ Ldr(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
- Label do_store;
- while (true) {
- __ CompareMap(scratch1, it.Current());
- it.Advance();
- if (it.Done()) {
- __ B(ne, miss_label);
- break;
- }
- __ B(eq, &do_store);
- }
- __ Bind(&do_store);
- }
- } else if (representation.IsDouble()) {
- UseScratchRegisterScope temps(masm());
- DoubleRegister temp_double = temps.AcquireD();
- __ SmiUntagToDouble(temp_double, value_reg, kSpeculativeUntag);
-
- Label do_store;
- __ JumpIfSmi(value_reg, &do_store);
-
- __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex,
- miss_label, DONT_DO_SMI_CHECK);
- __ Ldr(temp_double, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
-
- __ Bind(&do_store);
- __ AllocateHeapNumber(storage_reg, slow, scratch1, scratch2, temp_double,
- NoReg, MUTABLE);
- }
-
- // Stub never generated for objects that require access checks.
- DCHECK(!transition->is_access_check_needed());
-
- // Perform map transition for the receiver if necessary.
- if (details.type() == FIELD &&
- Map::cast(transition->GetBackPointer())->unused_property_fields() == 0) {
- // The properties must be extended before we can store the value.
- // We jump to a runtime call that extends the properties array.
- __ Mov(scratch1, Operand(transition));
- __ Push(receiver_reg, scratch1, value_reg);
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
- isolate()),
- 3, 1);
- return;
- }
-
- // Update the map of the object.
- __ Mov(scratch1, Operand(transition));
- __ Str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
-
- // Update the write barrier for the map field.
- __ RecordWriteField(receiver_reg,
- HeapObject::kMapOffset,
- scratch1,
- scratch2,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-
- if (details.type() == CONSTANT) {
- DCHECK(value_reg.is(x0));
- __ Ret();
- return;
- }
-
- int index = transition->instance_descriptors()->GetFieldIndex(
- transition->LastAdded());
-
- // Adjust for the number of properties stored in the object. Even in the
- // face of a transition we can use the old map here because the size of the
- // object and the number of in-object properties is not going to change.
- index -= transition->inobject_properties();
-
- // TODO(verwaest): Share this code as a code stub.
- SmiCheck smi_check = representation.IsTagged()
- ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
- Register prop_reg = representation.IsDouble() ? storage_reg : value_reg;
- if (index < 0) {
- // Set the property straight into the object.
- int offset = transition->instance_size() + (index * kPointerSize);
- __ Str(prop_reg, FieldMemOperand(receiver_reg, offset));
-
- if (!representation.IsSmi()) {
- // Update the write barrier for the array address.
- if (!representation.IsDouble()) {
- __ Mov(storage_reg, value_reg);
- }
- __ RecordWriteField(receiver_reg,
- offset,
- storage_reg,
- scratch1,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- smi_check);
- }
- } else {
- // Write to the properties array.
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
- // Get the properties array
- __ Ldr(scratch1,
- FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ Str(prop_reg, FieldMemOperand(scratch1, offset));
-
- if (!representation.IsSmi()) {
- // Update the write barrier for the array address.
- if (!representation.IsDouble()) {
- __ Mov(storage_reg, value_reg);
- }
- __ RecordWriteField(scratch1,
- offset,
- storage_reg,
- receiver_reg,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- smi_check);
- }
- }
-
- __ Bind(&exit);
- // Return the value (register x0).
- DCHECK(value_reg.is(x0));
- __ Ret();
-}
-
-
-void NamedStoreHandlerCompiler::GenerateStoreField(LookupIterator* lookup,
- Register value_reg,
- Label* miss_label) {
- DCHECK(lookup->representation().IsHeapObject());
- __ JumpIfSmi(value_reg, miss_label);
- HeapType::Iterator<Map> it = lookup->GetFieldType()->Classes();
- __ Ldr(scratch1(), FieldMemOperand(value_reg, HeapObject::kMapOffset));
- Label do_store;
- while (true) {
- __ CompareMap(scratch1(), it.Current());
- it.Advance();
- if (it.Done()) {
- __ B(ne, miss_label);
- break;
- }
- __ B(eq, &do_store);
- }
- __ Bind(&do_store);
-
- StoreFieldStub stub(isolate(), lookup->GetFieldIndex(),
- lookup->representation());
- GenerateTailCall(masm(), stub.GetCode());
-}
-
-
-Register PropertyHandlerCompiler::CheckPrototypes(
- Register object_reg, Register holder_reg, Register scratch1,
- Register scratch2, Handle<Name> name, Label* miss,
- PrototypeCheckType check) {
- Handle<Map> receiver_map(IC::TypeToMap(*type(), isolate()));
-
- // object_reg and holder_reg registers can alias.
- DCHECK(!AreAliased(object_reg, scratch1, scratch2));
- DCHECK(!AreAliased(holder_reg, scratch1, scratch2));
-
- // Keep track of the current object in register reg.
- Register reg = object_reg;
- int depth = 0;
-
- Handle<JSObject> current = Handle<JSObject>::null();
- if (type()->IsConstant()) {
- current = Handle<JSObject>::cast(type()->AsConstant()->Value());
- }
- Handle<JSObject> prototype = Handle<JSObject>::null();
- Handle<Map> current_map = receiver_map;
- Handle<Map> holder_map(holder()->map());
- // Traverse the prototype chain and check the maps in the prototype chain for
- // fast and global objects or do negative lookup for normal objects.
- while (!current_map.is_identical_to(holder_map)) {
- ++depth;
-
- // Only global objects and objects that do not require access
- // checks are allowed in stubs.
- DCHECK(current_map->IsJSGlobalProxyMap() ||
- !current_map->is_access_check_needed());
-
- prototype = handle(JSObject::cast(current_map->prototype()));
- if (current_map->is_dictionary_map() &&
- !current_map->IsJSGlobalObjectMap()) {
- DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
- if (!name->IsUniqueName()) {
- DCHECK(name->IsString());
- name = factory()->InternalizeString(Handle<String>::cast(name));
- }
- DCHECK(current.is_null() ||
- (current->property_dictionary()->FindEntry(name) ==
- NameDictionary::kNotFound));
-
- GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
- scratch1, scratch2);
-
- __ Ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
- reg = holder_reg; // From now on the object will be in holder_reg.
- __ Ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
- } else {
- // Two possible reasons for loading the prototype from the map:
- // (1) Can't store references to new space in code.
- // (2) Handler is shared for all receivers with the same prototype
- // map (but not necessarily the same prototype instance).
- bool load_prototype_from_map =
- heap()->InNewSpace(*prototype) || depth == 1;
- Register map_reg = scratch1;
- __ Ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
-
- if (depth != 1 || check == CHECK_ALL_MAPS) {
- __ CheckMap(map_reg, current_map, miss, DONT_DO_SMI_CHECK);
- }
-
- // Check access rights to the global object. This has to happen after
- // the map check so that we know that the object is actually a global
- // object.
- // This allows us to install generated handlers for accesses to the
- // global proxy (as opposed to using slow ICs). See corresponding code
- // in LookupForRead().
- if (current_map->IsJSGlobalProxyMap()) {
- UseScratchRegisterScope temps(masm());
- __ CheckAccessGlobalProxy(reg, scratch2, temps.AcquireX(), miss);
- } else if (current_map->IsJSGlobalObjectMap()) {
- GenerateCheckPropertyCell(
- masm(), Handle<JSGlobalObject>::cast(current), name,
- scratch2, miss);
- }
-
- reg = holder_reg; // From now on the object will be in holder_reg.
-
- if (load_prototype_from_map) {
- __ Ldr(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
- } else {
- __ Mov(reg, Operand(prototype));
- }
- }
-
- // Go to the next object in the prototype chain.
- current = prototype;
- current_map = handle(current->map());
- }
-
- // Log the check depth.
- LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
-
- // Check the holder map.
- if (depth != 0 || check == CHECK_ALL_MAPS) {
- // Check the holder map.
- __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK);
- }
-
- // Perform security check for access to the global object.
- DCHECK(current_map->IsJSGlobalProxyMap() ||
- !current_map->is_access_check_needed());
- if (current_map->IsJSGlobalProxyMap()) {
- __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
- }
-
- // Return the register containing the holder.
- return reg;
-}
-
-
-void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
- if (!miss->is_unused()) {
- Label success;
- __ B(&success);
-
- __ Bind(miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- __ Bind(&success);
- }
-}
-
-
-void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
- if (!miss->is_unused()) {
- Label success;
- __ B(&success);
-
- GenerateRestoreName(miss, name);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- __ Bind(&success);
- }
-}
-
-
-void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
- // Return the constant value.
- __ LoadObject(x0, value);
- __ Ret();
-}
-
-
-void NamedLoadHandlerCompiler::GenerateLoadCallback(
- Register reg, Handle<ExecutableAccessorInfo> callback) {
- DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), reg));
-
- // Build ExecutableAccessorInfo::args_ list on the stack and push property
- // name below the exit frame to make GC aware of them and store pointers to
- // them.
- STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
- STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
- STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
- STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
- STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
-
- __ Push(receiver());
-
- if (heap()->InNewSpace(callback->data())) {
- __ Mov(scratch3(), Operand(callback));
- __ Ldr(scratch3(), FieldMemOperand(scratch3(),
- ExecutableAccessorInfo::kDataOffset));
- } else {
- __ Mov(scratch3(), Operand(Handle<Object>(callback->data(), isolate())));
- }
- __ LoadRoot(scratch4(), Heap::kUndefinedValueRootIndex);
- __ Mov(scratch2(), Operand(ExternalReference::isolate_address(isolate())));
- __ Push(scratch3(), scratch4(), scratch4(), scratch2(), reg, name());
-
- Register args_addr = scratch2();
- __ Add(args_addr, __ StackPointer(), kPointerSize);
-
- // Stack at this point:
- // sp[40] callback data
- // sp[32] undefined
- // sp[24] undefined
- // sp[16] isolate
- // args_addr -> sp[8] reg
- // sp[0] name
-
- // Abi for CallApiGetter.
- Register getter_address_reg = x2;
-
- // Set up the call.
- Address getter_address = v8::ToCData<Address>(callback->getter());
- ApiFunction fun(getter_address);
- ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
- ExternalReference ref = ExternalReference(&fun, type, isolate());
- __ Mov(getter_address_reg, ref);
-
- CallApiGetterStub stub(isolate());
- __ TailCallStub(&stub);
-}
-
-
-void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
- LookupIterator* it, Register holder_reg) {
- DCHECK(!AreAliased(receiver(), this->name(),
- scratch1(), scratch2(), scratch3()));
- DCHECK(holder()->HasNamedInterceptor());
- DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
-
- // Compile the interceptor call, followed by inline code to load the
- // property from further up the prototype chain if the call fails.
- // Check that the maps haven't changed.
- DCHECK(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
-
- // Preserve the receiver register explicitly whenever it is different from the
- // holder and it is needed should the interceptor return without any result.
- // The ACCESSOR case needs the receiver to be passed into C++ code, the FIELD
- // case might cause a miss during the prototype check.
- bool must_perform_prototype_check =
- !holder().is_identical_to(it->GetHolder<JSObject>());
- bool must_preserve_receiver_reg =
- !receiver().is(holder_reg) &&
- (it->property_kind() == LookupIterator::ACCESSOR ||
- must_perform_prototype_check);
-
- // Save necessary data before invoking an interceptor.
- // Requires a frame to make GC aware of pushed pointers.
- {
- FrameScope frame_scope(masm(), StackFrame::INTERNAL);
- if (must_preserve_receiver_reg) {
- __ Push(receiver(), holder_reg, this->name());
- } else {
- __ Push(holder_reg, this->name());
- }
- // Invoke an interceptor. Note: map checks from receiver to
- // interceptor's holder has been compiled before (see a caller
- // of this method.)
- CompileCallLoadPropertyWithInterceptor(
- masm(), receiver(), holder_reg, this->name(), holder(),
- IC::kLoadPropertyWithInterceptorOnly);
-
- // Check if interceptor provided a value for property. If it's
- // the case, return immediately.
- Label interceptor_failed;
- __ JumpIfRoot(x0, Heap::kNoInterceptorResultSentinelRootIndex,
- &interceptor_failed);
- frame_scope.GenerateLeaveFrame();
- __ Ret();
-
- __ Bind(&interceptor_failed);
- if (must_preserve_receiver_reg) {
- __ Pop(this->name(), holder_reg, receiver());
- } else {
- __ Pop(this->name(), holder_reg);
- }
- // Leave the internal frame.
- }
-
- GenerateLoadPostInterceptor(it, holder_reg);
-}
-
-
-void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
- // Call the runtime system to load the interceptor.
- DCHECK(holder()->HasNamedInterceptor());
- DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
- PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
- holder());
-
- ExternalReference ref = ExternalReference(
- IC_Utility(IC::kLoadPropertyWithInterceptor), isolate());
- __ TailCallExternalReference(
- ref, NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
-}
-
-
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
- Handle<JSObject> object, Handle<Name> name,
- Handle<ExecutableAccessorInfo> callback) {
- ASM_LOCATION("NamedStoreHandlerCompiler::CompileStoreCallback");
- Register holder_reg = Frontend(receiver(), name);
-
- // Stub never generated for non-global objects that require access checks.
- DCHECK(holder()->IsJSGlobalProxy() || !holder()->IsAccessCheckNeeded());
-
- // receiver() and holder_reg can alias.
- DCHECK(!AreAliased(receiver(), scratch1(), scratch2(), value()));
- DCHECK(!AreAliased(holder_reg, scratch1(), scratch2(), value()));
- __ Mov(scratch1(), Operand(callback));
- __ Mov(scratch2(), Operand(name));
- __ Push(receiver(), holder_reg, scratch1(), scratch2(), value());
-
- // Do tail-call to the runtime system.
- ExternalReference store_callback_property =
- ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
- __ TailCallExternalReference(store_callback_property, 5, 1);
-
- // Return the generated code.
- return GetCode(kind(), Code::FAST, name);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
- MacroAssembler* masm, Handle<HeapType> type, Register receiver,
- Handle<JSFunction> setter) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Save value register, so we can restore it later.
- __ Push(value());
-
- if (!setter.is_null()) {
- // Call the JavaScript setter with receiver and value on the stack.
- if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
- // Swap in the global receiver.
- __ Ldr(receiver,
- FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
- }
- __ Push(receiver, value());
- ParameterCount actual(1);
- ParameterCount expected(setter);
- __ InvokeFunction(setter, expected, actual,
- CALL_FUNCTION, NullCallWrapper());
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // We have to return the passed value, not the return value of the setter.
- __ Pop(x0);
-
- // Restore context register.
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- }
- __ Ret();
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
- Handle<Name> name) {
- Label miss;
-
- ASM_LOCATION("NamedStoreHandlerCompiler::CompileStoreInterceptor");
-
- __ Push(receiver(), this->name(), value());
-
- // Do tail-call to the runtime system.
- ExternalReference store_ic_property = ExternalReference(
- IC_Utility(IC::kStorePropertyWithInterceptor), isolate());
- __ TailCallExternalReference(store_ic_property, 3, 1);
-
- // Return the generated code.
- return GetCode(kind(), Code::FAST, name);
-}
-
-
-// TODO(all): The so-called scratch registers are significant in some cases. For
-// example, PropertyAccessCompiler::keyed_store_calling_convention()[3] (x3) is
-// actually
-// used for KeyedStoreCompiler::transition_map(). We should verify which
-// registers are actually scratch registers, and which are important. For now,
-// we use the same assignments as ARM to remain on the safe side.
-
-Register* PropertyAccessCompiler::load_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3, scratch4.
- Register receiver = LoadIC::ReceiverRegister();
- Register name = LoadIC::NameRegister();
- static Register registers[] = { receiver, name, x3, x0, x4, x5 };
- return registers;
-}
-
-
-Register* PropertyAccessCompiler::store_calling_convention() {
- // receiver, value, scratch1, scratch2, scratch3.
- Register receiver = StoreIC::ReceiverRegister();
- Register name = StoreIC::NameRegister();
- DCHECK(x3.is(KeyedStoreIC::MapRegister()));
- static Register registers[] = { receiver, name, x3, x4, x5 };
- return registers;
-}
-
-
-Register NamedStoreHandlerCompiler::value() { return StoreIC::ValueRegister(); }
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
- MacroAssembler* masm, Handle<HeapType> type, Register receiver,
- Handle<JSFunction> getter) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- if (!getter.is_null()) {
- // Call the JavaScript getter with the receiver on the stack.
- if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
- // Swap in the global receiver.
- __ Ldr(receiver,
- FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
- }
- __ Push(receiver);
- ParameterCount actual(0);
- ParameterCount expected(getter);
- __ InvokeFunction(getter, expected, actual,
- CALL_FUNCTION, NullCallWrapper());
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // Restore context register.
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- }
- __ Ret();
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
- Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
- Label miss;
- FrontendHeader(receiver(), name, &miss);
-
- // Get the value from the cell.
- Register result = StoreIC::ValueRegister();
- __ Mov(result, Operand(cell));
- __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
-
- // Check for deleted property if property can actually be deleted.
- if (is_configurable) {
- __ JumpIfRoot(result, Heap::kTheHoleValueRootIndex, &miss);
- }
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->named_load_global_stub(), 1, x1, x3);
- __ Ret();
-
- FrontendFooter(name, &miss);
-
- // Return the generated code.
- return GetCode(kind(), Code::NORMAL, name);
-}
-
-
-Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
- CodeHandleList* handlers,
- Handle<Name> name,
- Code::StubType type,
- IcCheckType check) {
- Label miss;
-
- if (check == PROPERTY &&
- (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
- // In case we are compiling an IC for dictionary loads and stores, just
- // check whether the name is unique.
- if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
- __ JumpIfNotUniqueName(this->name(), &miss);
- } else {
- __ CompareAndBranch(this->name(), Operand(name), ne, &miss);
- }
- }
-
- Label number_case;
- Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
- __ JumpIfSmi(receiver(), smi_target);
-
- // Polymorphic keyed stores may use the map register
- Register map_reg = scratch1();
- DCHECK(kind() != Code::KEYED_STORE_IC ||
- map_reg.is(KeyedStoreIC::MapRegister()));
- __ Ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
- int receiver_count = types->length();
- int number_of_handled_maps = 0;
- for (int current = 0; current < receiver_count; ++current) {
- Handle<HeapType> type = types->at(current);
- Handle<Map> map = IC::TypeToMap(*type, isolate());
- if (!map->is_deprecated()) {
- number_of_handled_maps++;
- Label try_next;
- __ Cmp(map_reg, Operand(map));
- __ B(ne, &try_next);
- if (type->Is(HeapType::Number())) {
- DCHECK(!number_case.is_unused());
- __ Bind(&number_case);
- }
- __ Jump(handlers->at(current), RelocInfo::CODE_TARGET);
- __ Bind(&try_next);
- }
- }
- DCHECK(number_of_handled_maps != 0);
-
- __ Bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- InlineCacheState state =
- (number_of_handled_maps > 1) ? POLYMORPHIC : MONOMORPHIC;
- return GetCode(kind(), type, name, state);
-}
-
-
-Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
- MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
- MapHandleList* transitioned_maps) {
- Label miss;
-
- ASM_LOCATION("PropertyICCompiler::CompileStorePolymorphic");
-
- __ JumpIfSmi(receiver(), &miss);
-
- int receiver_count = receiver_maps->length();
- __ Ldr(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
- for (int i = 0; i < receiver_count; i++) {
- __ Cmp(scratch1(), Operand(receiver_maps->at(i)));
-
- Label skip;
- __ B(&skip, ne);
- if (!transitioned_maps->at(i).is_null()) {
- // This argument is used by the handler stub. For example, see
- // ElementsTransitionGenerator::GenerateMapChangeElementsTransition.
- __ Mov(transition_map(), Operand(transitioned_maps->at(i)));
- }
- __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
- __ Bind(&skip);
- }
-
- __ Bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-void ElementHandlerCompiler::GenerateLoadDictionaryElement(
- MacroAssembler* masm) {
- // The return address is in lr.
- Label slow, miss;
-
- Register result = x0;
- Register key = LoadIC::NameRegister();
- Register receiver = LoadIC::ReceiverRegister();
- DCHECK(receiver.is(x1));
- DCHECK(key.is(x2));
-
- __ JumpIfNotSmi(key, &miss);
- __ Ldr(x4, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ LoadFromNumberDictionary(&slow, x4, key, result, x7, x3, x5, x6);
- __ Ret();
-
- __ Bind(&slow);
- __ IncrementCounter(
- masm->isolate()->counters()->keyed_load_external_array_slow(), 1, x4, x3);
- TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
-
- // Miss case, call the runtime.
- __ Bind(&miss);
- TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM64
#include "src/base/lazy-instance.h"
#include "src/base/platform/platform.h"
#include "src/builtins.h"
+#include "src/codegen.h"
#include "src/counters.h"
#include "src/cpu-profiler.h"
#include "src/debug.h"
#include "src/deoptimizer.h"
#include "src/execution.h"
-#include "src/ic.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
#include "src/isolate-inl.h"
#include "src/jsregexp.h"
#include "src/regexp-macro-assembler.h"
#include "src/regexp-stack.h"
#include "src/runtime.h"
#include "src/serialize.h"
-#include "src/stub-cache.h"
#include "src/token.h"
#if V8_TARGET_ARCH_IA32
#include "src/gdb-jit.h"
#include "src/heap/mark-compact.h"
#include "src/heap-profiler.h"
-#include "src/ic-inl.h"
+#include "src/ic/ic.h"
+#include "src/ic/ic-compiler.h"
#include "src/prototype.h"
-#include "src/stub-cache.h"
#include "src/vm-state-inl.h"
namespace v8 {
#include "src/cpu-profiler.h"
#include "src/factory.h"
#include "src/gdb-jit.h"
+#include "src/ic/ic-compiler.h"
#include "src/macro-assembler.h"
-#include "src/stub-cache.h"
namespace v8 {
namespace internal {
#include "src/assembler.h"
#include "src/codegen.h"
#include "src/globals.h"
+#include "src/ic/ic.h"
#include "src/macro-assembler.h"
#include "src/ostreams.h"
#include "src/prettyprinter.h"
#include "src/rewriter.h"
#include "src/runtime.h"
-#include "src/stub-cache.h"
namespace v8 {
namespace internal {
#include "src/execution.h"
#include "src/full-codegen.h"
#include "src/global-handles.h"
-#include "src/ic.h"
-#include "src/ic-inl.h"
#include "src/isolate-inl.h"
#include "src/list.h"
#include "src/log.h"
#include "src/messages.h"
#include "src/natives.h"
-#include "src/stub-cache.h"
#include "include/v8-debug.h"
#include "src/scopeinfo.h"
#include "src/scopes.h"
#include "src/snapshot.h"
-#include "src/stub-cache.h"
namespace v8 {
namespace internal {
#include "src/heap/spaces-inl.h"
#include "src/heap/sweeper-thread.h"
#include "src/heap-profiler.h"
-#include "src/ic-inl.h"
-#include "src/stub-cache.h"
+#include "src/ic/stub-cache.h"
namespace v8 {
namespace internal {
#include "src/v8.h"
#include "src/heap/objects-visiting.h"
-#include "src/ic-inl.h"
namespace v8 {
namespace internal {
#include "src/runtime.h"
#include "src/scopeinfo.h"
#include "src/scopes.h"
-#include "src/stub-cache.h"
#include "src/typing.h"
+// CallOptimization
+#include "src/ic/ic-compiler.h"
+// GetRootConstructor
+#include "src/ic/ic-inl.h"
#if V8_TARGET_ARCH_IA32
#include "src/ia32/lithium-codegen-ia32.h" // NOLINT
#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/full-codegen.h"
-#include "src/stub-cache.h"
namespace v8 {
namespace internal {
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
+#include "src/ic/ic-compiler.h"
#include "src/isolate.h"
#include "src/jsregexp.h"
#include "src/regexp-macro-assembler.h"
#include "src/runtime.h"
-#include "src/stub-cache.h"
namespace v8 {
namespace internal {
#ifndef V8_IA32_CODE_STUBS_IA32_H_
#define V8_IA32_CODE_STUBS_IA32_H_
-#include "src/ic-inl.h"
-#include "src/macro-assembler.h"
+#include "src/code-stubs.h"
namespace v8 {
namespace internal {
#define V8_IA32_CODEGEN_IA32_H_
#include "src/ast.h"
-#include "src/ic-inl.h"
+#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
#include "src/isolate-inl.h"
#include "src/parser.h"
#include "src/scopes.h"
-#include "src/stub-cache.h"
namespace v8 {
namespace internal {
+++ /dev/null
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#if V8_TARGET_ARCH_IA32
-
-#include "src/codegen.h"
-#include "src/ic-inl.h"
-#include "src/runtime.h"
-#include "src/stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-// ----------------------------------------------------------------------------
-// Static IC stub generators.
-//
-
-#define __ ACCESS_MASM(masm)
-
-
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
- Register type,
- Label* global_object) {
- // Register usage:
- // type: holds the receiver instance type on entry.
- __ cmp(type, JS_GLOBAL_OBJECT_TYPE);
- __ j(equal, global_object);
- __ cmp(type, JS_BUILTINS_OBJECT_TYPE);
- __ j(equal, global_object);
- __ cmp(type, JS_GLOBAL_PROXY_TYPE);
- __ j(equal, global_object);
-}
-
-
-// Helper function used to load a property from a dictionary backing
-// storage. This function may fail to load a property even though it is
-// in the dictionary, so code at miss_label must always call a backup
-// property load that is complete. This function is safe to call if
-// name is not internalized, and will jump to the miss_label in that
-// case. The generated code assumes that the receiver has slow
-// properties, is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm,
- Label* miss_label,
- Register elements,
- Register name,
- Register r0,
- Register r1,
- Register result) {
- // Register use:
- //
- // elements - holds the property dictionary on entry and is unchanged.
- //
- // name - holds the name of the property on entry and is unchanged.
- //
- // Scratch registers:
- //
- // r0 - used for the index into the property dictionary
- //
- // r1 - used to hold the capacity of the property dictionary.
- //
- // result - holds the result on exit.
-
- Label done;
-
- // Probe the dictionary.
- NameDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss_label,
- &done,
- elements,
- name,
- r0,
- r1);
-
- // If probing finds an entry in the dictionary, r0 contains the
- // index into the dictionary. Check that the value is a normal
- // property.
- __ bind(&done);
- const int kElementsStartOffset =
- NameDictionary::kHeaderSize +
- NameDictionary::kElementsStartIndex * kPointerSize;
- const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
- Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
- __ j(not_zero, miss_label);
-
- // Get the value at the masked, scaled index.
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ mov(result, Operand(elements, r0, times_4, kValueOffset - kHeapObjectTag));
-}
-
-
-// Helper function used to store a property to a dictionary backing
-// storage. This function may fail to store a property eventhough it
-// is in the dictionary, so code at miss_label must always call a
-// backup property store that is complete. This function is safe to
-// call if name is not internalized, and will jump to the miss_label in
-// that case. The generated code assumes that the receiver has slow
-// properties, is not a global object and does not have interceptors.
-static void GenerateDictionaryStore(MacroAssembler* masm,
- Label* miss_label,
- Register elements,
- Register name,
- Register value,
- Register r0,
- Register r1) {
- // Register use:
- //
- // elements - holds the property dictionary on entry and is clobbered.
- //
- // name - holds the name of the property on entry and is unchanged.
- //
- // value - holds the value to store and is unchanged.
- //
- // r0 - used for index into the property dictionary and is clobbered.
- //
- // r1 - used to hold the capacity of the property dictionary and is clobbered.
- Label done;
-
-
- // Probe the dictionary.
- NameDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss_label,
- &done,
- elements,
- name,
- r0,
- r1);
-
- // If probing finds an entry in the dictionary, r0 contains the
- // index into the dictionary. Check that the value is a normal
- // property that is not read only.
- __ bind(&done);
- const int kElementsStartOffset =
- NameDictionary::kHeaderSize +
- NameDictionary::kElementsStartIndex * kPointerSize;
- const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- const int kTypeAndReadOnlyMask =
- (PropertyDetails::TypeField::kMask |
- PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
- __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
- Immediate(kTypeAndReadOnlyMask));
- __ j(not_zero, miss_label);
-
- // Store the value at the masked, scaled index.
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ lea(r0, Operand(elements, r0, times_4, kValueOffset - kHeapObjectTag));
- __ mov(Operand(r0, 0), value);
-
- // Update write barrier. Make sure not to clobber the value.
- __ mov(r1, value);
- __ RecordWrite(elements, r0, r1, kDontSaveFPRegs);
-}
-
-
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
- Register receiver,
- Register map,
- int interceptor_bit,
- Label* slow) {
- // Register use:
- // receiver - holds the receiver and is unchanged.
- // Scratch registers:
- // map - used to hold the map of the receiver.
-
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, slow);
-
- // Get the map of the receiver.
- __ mov(map, FieldOperand(receiver, HeapObject::kMapOffset));
-
- // Check bit field.
- __ test_b(FieldOperand(map, Map::kBitFieldOffset),
- (1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit));
- __ j(not_zero, slow);
- // Check that the object is some kind of JS object EXCEPT JS Value type.
- // In the case that the object is a value-wrapper object,
- // we enter the runtime system to make sure that indexing
- // into string objects works as intended.
- DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
-
- __ CmpInstanceType(map, JS_OBJECT_TYPE);
- __ j(below, slow);
-}
-
-
-// Loads an indexed element from a fast case array.
-// If not_fast_array is NULL, doesn't perform the elements map check.
-static void GenerateFastArrayLoad(MacroAssembler* masm,
- Register receiver,
- Register key,
- Register scratch,
- Register result,
- Label* not_fast_array,
- Label* out_of_range) {
- // Register use:
- // receiver - holds the receiver and is unchanged.
- // key - holds the key and is unchanged (must be a smi).
- // Scratch registers:
- // scratch - used to hold elements of the receiver and the loaded value.
- // result - holds the result on exit if the load succeeds and
- // we fall through.
-
- __ mov(scratch, FieldOperand(receiver, JSObject::kElementsOffset));
- if (not_fast_array != NULL) {
- // Check that the object is in fast mode and writable.
- __ CheckMap(scratch,
- masm->isolate()->factory()->fixed_array_map(),
- not_fast_array,
- DONT_DO_SMI_CHECK);
- } else {
- __ AssertFastElements(scratch);
- }
- // Check that the key (index) is within bounds.
- __ cmp(key, FieldOperand(scratch, FixedArray::kLengthOffset));
- __ j(above_equal, out_of_range);
- // Fast case: Do the load.
- STATIC_ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
- __ mov(scratch, FieldOperand(scratch, key, times_2, FixedArray::kHeaderSize));
- __ cmp(scratch, Immediate(masm->isolate()->factory()->the_hole_value()));
- // In case the loaded value is the_hole we have to consult GetProperty
- // to ensure the prototype chain is searched.
- __ j(equal, out_of_range);
- if (!result.is(scratch)) {
- __ mov(result, scratch);
- }
-}
-
-
-// Checks whether a key is an array index string or a unique name.
-// Falls through if the key is a unique name.
-static void GenerateKeyNameCheck(MacroAssembler* masm,
- Register key,
- Register map,
- Register hash,
- Label* index_string,
- Label* not_unique) {
- // Register use:
- // key - holds the key and is unchanged. Assumed to be non-smi.
- // Scratch registers:
- // map - used to hold the map of the key.
- // hash - used to hold the hash of the key.
- Label unique;
- __ CmpObjectType(key, LAST_UNIQUE_NAME_TYPE, map);
- __ j(above, not_unique);
- STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
- __ j(equal, &unique);
-
- // Is the string an array index, with cached numeric value?
- __ mov(hash, FieldOperand(key, Name::kHashFieldOffset));
- __ test(hash, Immediate(Name::kContainsCachedArrayIndexMask));
- __ j(zero, index_string);
-
- // Is the string internalized? We already know it's a string so a single
- // bit test is enough.
- STATIC_ASSERT(kNotInternalizedTag != 0);
- __ test_b(FieldOperand(map, Map::kInstanceTypeOffset),
- kIsNotInternalizedMask);
- __ j(not_zero, not_unique);
-
- __ bind(&unique);
-}
-
-
-static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
- Register object,
- Register key,
- Register scratch1,
- Register scratch2,
- Label* unmapped_case,
- Label* slow_case) {
- Heap* heap = masm->isolate()->heap();
- Factory* factory = masm->isolate()->factory();
-
- // Check that the receiver is a JSObject. Because of the elements
- // map check later, we do not need to check for interceptors or
- // whether it requires access checks.
- __ JumpIfSmi(object, slow_case);
- // Check that the object is some kind of JSObject.
- __ CmpObjectType(object, FIRST_JS_RECEIVER_TYPE, scratch1);
- __ j(below, slow_case);
-
- // Check that the key is a positive smi.
- __ test(key, Immediate(0x80000001));
- __ j(not_zero, slow_case);
-
- // Load the elements into scratch1 and check its map.
- Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
- __ mov(scratch1, FieldOperand(object, JSObject::kElementsOffset));
- __ CheckMap(scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
-
- // Check if element is in the range of mapped arguments. If not, jump
- // to the unmapped lookup with the parameter map in scratch1.
- __ mov(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
- __ sub(scratch2, Immediate(Smi::FromInt(2)));
- __ cmp(key, scratch2);
- __ j(above_equal, unmapped_case);
-
- // Load element index and check whether it is the hole.
- const int kHeaderSize = FixedArray::kHeaderSize + 2 * kPointerSize;
- __ mov(scratch2, FieldOperand(scratch1,
- key,
- times_half_pointer_size,
- kHeaderSize));
- __ cmp(scratch2, factory->the_hole_value());
- __ j(equal, unmapped_case);
-
- // Load value from context and return it. We can reuse scratch1 because
- // we do not jump to the unmapped lookup (which requires the parameter
- // map in scratch1).
- const int kContextOffset = FixedArray::kHeaderSize;
- __ mov(scratch1, FieldOperand(scratch1, kContextOffset));
- return FieldOperand(scratch1,
- scratch2,
- times_half_pointer_size,
- Context::kHeaderSize);
-}
-
-
-static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
- Register key,
- Register parameter_map,
- Register scratch,
- Label* slow_case) {
- // Element is in arguments backing store, which is referenced by the
- // second element of the parameter_map.
- const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
- Register backing_store = parameter_map;
- __ mov(backing_store, FieldOperand(parameter_map, kBackingStoreOffset));
- Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
- __ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
- __ mov(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
- __ cmp(key, scratch);
- __ j(greater_equal, slow_case);
- return FieldOperand(backing_store,
- key,
- times_half_pointer_size,
- FixedArray::kHeaderSize);
-}
-
-
-void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
- // The return address is on the stack.
- Label slow, check_name, index_smi, index_name, property_array_property;
- Label probe_dictionary, check_number_dictionary;
-
- Register receiver = ReceiverRegister();
- Register key = NameRegister();
- DCHECK(receiver.is(edx));
- DCHECK(key.is(ecx));
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, &check_name);
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(
- masm, receiver, eax, Map::kHasIndexedInterceptor, &slow);
-
- // Check the receiver's map to see if it has fast elements.
- __ CheckFastElements(eax, &check_number_dictionary);
-
- GenerateFastArrayLoad(masm, receiver, key, eax, eax, NULL, &slow);
- Isolate* isolate = masm->isolate();
- Counters* counters = isolate->counters();
- __ IncrementCounter(counters->keyed_load_generic_smi(), 1);
- __ ret(0);
-
- __ bind(&check_number_dictionary);
- __ mov(ebx, key);
- __ SmiUntag(ebx);
- __ mov(eax, FieldOperand(receiver, JSObject::kElementsOffset));
-
- // Check whether the elements is a number dictionary.
- // ebx: untagged index
- // eax: elements
- __ CheckMap(eax,
- isolate->factory()->hash_table_map(),
- &slow,
- DONT_DO_SMI_CHECK);
- Label slow_pop_receiver;
- // Push receiver on the stack to free up a register for the dictionary
- // probing.
- __ push(receiver);
- __ LoadFromNumberDictionary(&slow_pop_receiver, eax, key, ebx, edx, edi, eax);
- // Pop receiver before returning.
- __ pop(receiver);
- __ ret(0);
-
- __ bind(&slow_pop_receiver);
- // Pop the receiver from the stack and jump to runtime.
- __ pop(receiver);
-
- __ bind(&slow);
- // Slow case: jump to runtime.
- __ IncrementCounter(counters->keyed_load_generic_slow(), 1);
- GenerateRuntimeGetProperty(masm);
-
- __ bind(&check_name);
- GenerateKeyNameCheck(masm, key, eax, ebx, &index_name, &slow);
-
- GenerateKeyedLoadReceiverCheck(
- masm, receiver, eax, Map::kHasNamedInterceptor, &slow);
-
- // If the receiver is a fast-case object, check the keyed lookup
- // cache. Otherwise probe the dictionary.
- __ mov(ebx, FieldOperand(receiver, JSObject::kPropertiesOffset));
- __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(isolate->factory()->hash_table_map()));
- __ j(equal, &probe_dictionary);
-
- // The receiver's map is still in eax, compute the keyed lookup cache hash
- // based on 32 bits of the map pointer and the string hash.
- if (FLAG_debug_code) {
- __ cmp(eax, FieldOperand(receiver, HeapObject::kMapOffset));
- __ Check(equal, kMapIsNoLongerInEax);
- }
- __ mov(ebx, eax); // Keep the map around for later.
- __ shr(eax, KeyedLookupCache::kMapHashShift);
- __ mov(edi, FieldOperand(key, String::kHashFieldOffset));
- __ shr(edi, String::kHashShift);
- __ xor_(eax, edi);
- __ and_(eax, KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
-
- // Load the key (consisting of map and internalized string) from the cache and
- // check for match.
- Label load_in_object_property;
- static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
- Label hit_on_nth_entry[kEntriesPerBucket];
- ExternalReference cache_keys =
- ExternalReference::keyed_lookup_cache_keys(masm->isolate());
-
- for (int i = 0; i < kEntriesPerBucket - 1; i++) {
- Label try_next_entry;
- __ mov(edi, eax);
- __ shl(edi, kPointerSizeLog2 + 1);
- if (i != 0) {
- __ add(edi, Immediate(kPointerSize * i * 2));
- }
- __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
- __ j(not_equal, &try_next_entry);
- __ add(edi, Immediate(kPointerSize));
- __ cmp(key, Operand::StaticArray(edi, times_1, cache_keys));
- __ j(equal, &hit_on_nth_entry[i]);
- __ bind(&try_next_entry);
- }
-
- __ lea(edi, Operand(eax, 1));
- __ shl(edi, kPointerSizeLog2 + 1);
- __ add(edi, Immediate(kPointerSize * (kEntriesPerBucket - 1) * 2));
- __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
- __ j(not_equal, &slow);
- __ add(edi, Immediate(kPointerSize));
- __ cmp(key, Operand::StaticArray(edi, times_1, cache_keys));
- __ j(not_equal, &slow);
-
- // Get field offset.
- // ebx : receiver's map
- // eax : lookup cache index
- ExternalReference cache_field_offsets =
- ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate());
-
- // Hit on nth entry.
- for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
- __ bind(&hit_on_nth_entry[i]);
- if (i != 0) {
- __ add(eax, Immediate(i));
- }
- __ mov(edi,
- Operand::StaticArray(eax, times_pointer_size, cache_field_offsets));
- __ movzx_b(eax, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
- __ sub(edi, eax);
- __ j(above_equal, &property_array_property);
- if (i != 0) {
- __ jmp(&load_in_object_property);
- }
- }
-
- // Load in-object property.
- __ bind(&load_in_object_property);
- __ movzx_b(eax, FieldOperand(ebx, Map::kInstanceSizeOffset));
- __ add(eax, edi);
- __ mov(eax, FieldOperand(receiver, eax, times_pointer_size, 0));
- __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
- __ ret(0);
-
- // Load property array property.
- __ bind(&property_array_property);
- __ mov(eax, FieldOperand(receiver, JSObject::kPropertiesOffset));
- __ mov(eax, FieldOperand(eax, edi, times_pointer_size,
- FixedArray::kHeaderSize));
- __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
- __ ret(0);
-
- // Do a quick inline probe of the receiver's dictionary, if it
- // exists.
- __ bind(&probe_dictionary);
-
- __ mov(eax, FieldOperand(receiver, JSObject::kMapOffset));
- __ movzx_b(eax, FieldOperand(eax, Map::kInstanceTypeOffset));
- GenerateGlobalInstanceTypeCheck(masm, eax, &slow);
-
- GenerateDictionaryLoad(masm, &slow, ebx, key, eax, edi, eax);
- __ IncrementCounter(counters->keyed_load_generic_symbol(), 1);
- __ ret(0);
-
- __ bind(&index_name);
- __ IndexFromHash(ebx, key);
- // Now jump to the place where smi keys are handled.
- __ jmp(&index_smi);
-}
-
-
-void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
- // Return address is on the stack.
- Label miss;
-
- Register receiver = ReceiverRegister();
- Register index = NameRegister();
- Register scratch = ebx;
- DCHECK(!scratch.is(receiver) && !scratch.is(index));
- Register result = eax;
- DCHECK(!result.is(scratch));
-
- StringCharAtGenerator char_at_generator(receiver,
- index,
- scratch,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- &miss, // When index out of range.
- STRING_INDEX_IS_ARRAY_INDEX);
- char_at_generator.GenerateFast(masm);
- __ ret(0);
-
- StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm, call_helper);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
- // Return address is on the stack.
- Label slow;
-
- Register receiver = ReceiverRegister();
- Register key = NameRegister();
- Register scratch = eax;
- DCHECK(!scratch.is(receiver) && !scratch.is(key));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &slow);
-
- // Check that the key is an array index, that is Uint32.
- __ test(key, Immediate(kSmiTagMask | kSmiSignMask));
- __ j(not_zero, &slow);
-
- // Get the map of the receiver.
- __ mov(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
-
- // Check that it has indexed interceptor and access checks
- // are not enabled for this object.
- __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
- __ and_(scratch, Immediate(kSlowCaseBitFieldMask));
- __ cmp(scratch, Immediate(1 << Map::kHasIndexedInterceptor));
- __ j(not_zero, &slow);
-
- // Everything is fine, call runtime.
- __ pop(scratch);
- __ push(receiver); // receiver
- __ push(key); // key
- __ push(scratch); // return address
-
- // Perform tail call to the entry.
- ExternalReference ref = ExternalReference(
- IC_Utility(kLoadElementWithInterceptor), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-
- __ bind(&slow);
- GenerateMiss(masm);
-}
-
-
-void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
- // The return address is on the stack.
- Register receiver = ReceiverRegister();
- Register key = NameRegister();
- DCHECK(receiver.is(edx));
- DCHECK(key.is(ecx));
-
- Label slow, notin;
- Factory* factory = masm->isolate()->factory();
- Operand mapped_location =
- GenerateMappedArgumentsLookup(
- masm, receiver, key, ebx, eax, ¬in, &slow);
- __ mov(eax, mapped_location);
- __ Ret();
- __ bind(¬in);
- // The unmapped lookup expects that the parameter map is in ebx.
- Operand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, key, ebx, eax, &slow);
- __ cmp(unmapped_location, factory->the_hole_value());
- __ j(equal, &slow);
- __ mov(eax, unmapped_location);
- __ Ret();
- __ bind(&slow);
- GenerateMiss(masm);
-}
-
-
-void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
- // Return address is on the stack.
- Label slow, notin;
- Register receiver = ReceiverRegister();
- Register name = NameRegister();
- Register value = ValueRegister();
- DCHECK(receiver.is(edx));
- DCHECK(name.is(ecx));
- DCHECK(value.is(eax));
-
- Operand mapped_location =
- GenerateMappedArgumentsLookup(masm, receiver, name, ebx, edi, ¬in,
- &slow);
- __ mov(mapped_location, value);
- __ lea(ecx, mapped_location);
- __ mov(edx, value);
- __ RecordWrite(ebx, ecx, edx, kDontSaveFPRegs);
- __ Ret();
- __ bind(¬in);
- // The unmapped lookup expects that the parameter map is in ebx.
- Operand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, name, ebx, edi, &slow);
- __ mov(unmapped_location, value);
- __ lea(edi, unmapped_location);
- __ mov(edx, value);
- __ RecordWrite(ebx, edi, edx, kDontSaveFPRegs);
- __ Ret();
- __ bind(&slow);
- GenerateMiss(masm);
-}
-
-
-static void KeyedStoreGenerateGenericHelper(
- MacroAssembler* masm,
- Label* fast_object,
- Label* fast_double,
- Label* slow,
- KeyedStoreCheckMap check_map,
- KeyedStoreIncrementLength increment_length) {
- Label transition_smi_elements;
- Label finish_object_store, non_double_value, transition_double_elements;
- Label fast_double_without_map_check;
- Register receiver = KeyedStoreIC::ReceiverRegister();
- Register key = KeyedStoreIC::NameRegister();
- Register value = KeyedStoreIC::ValueRegister();
- DCHECK(receiver.is(edx));
- DCHECK(key.is(ecx));
- DCHECK(value.is(eax));
- // key is a smi.
- // ebx: FixedArray receiver->elements
- // edi: receiver map
- // Fast case: Do the store, could either Object or double.
- __ bind(fast_object);
- if (check_map == kCheckMap) {
- __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
- __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
- __ j(not_equal, fast_double);
- }
-
- // HOLECHECK: guards "A[i] = V"
- // We have to go to the runtime if the current value is the hole because
- // there may be a callback on the element
- Label holecheck_passed1;
- __ cmp(FixedArrayElementOperand(ebx, key),
- masm->isolate()->factory()->the_hole_value());
- __ j(not_equal, &holecheck_passed1);
- __ JumpIfDictionaryInPrototypeChain(receiver, ebx, edi, slow);
- __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
-
- __ bind(&holecheck_passed1);
-
- // Smi stores don't require further checks.
- Label non_smi_value;
- __ JumpIfNotSmi(value, &non_smi_value);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ add(FieldOperand(receiver, JSArray::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- }
- // It's irrelevant whether array is smi-only or not when writing a smi.
- __ mov(FixedArrayElementOperand(ebx, key), value);
- __ ret(0);
-
- __ bind(&non_smi_value);
- // Escape to elements kind transition case.
- __ mov(edi, FieldOperand(receiver, HeapObject::kMapOffset));
- __ CheckFastObjectElements(edi, &transition_smi_elements);
-
- // Fast elements array, store the value to the elements backing store.
- __ bind(&finish_object_store);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ add(FieldOperand(receiver, JSArray::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- }
- __ mov(FixedArrayElementOperand(ebx, key), value);
- // Update write barrier for the elements array address.
- __ mov(edx, value); // Preserve the value which is returned.
- __ RecordWriteArray(
- ebx, edx, key, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ ret(0);
-
- __ bind(fast_double);
- if (check_map == kCheckMap) {
- // Check for fast double array case. If this fails, call through to the
- // runtime.
- __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
- __ j(not_equal, slow);
- // If the value is a number, store it as a double in the FastDoubleElements
- // array.
- }
-
- // HOLECHECK: guards "A[i] double hole?"
- // We have to see if the double version of the hole is present. If so
- // go to the runtime.
- uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
- __ cmp(FieldOperand(ebx, key, times_4, offset), Immediate(kHoleNanUpper32));
- __ j(not_equal, &fast_double_without_map_check);
- __ JumpIfDictionaryInPrototypeChain(receiver, ebx, edi, slow);
- __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
-
- __ bind(&fast_double_without_map_check);
- __ StoreNumberToDoubleElements(value, ebx, key, edi, xmm0,
- &transition_double_elements);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ add(FieldOperand(receiver, JSArray::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- }
- __ ret(0);
-
- __ bind(&transition_smi_elements);
- __ mov(ebx, FieldOperand(receiver, HeapObject::kMapOffset));
-
- // Transition the array appropriately depending on the value type.
- __ CheckMap(value,
- masm->isolate()->factory()->heap_number_map(),
- &non_double_value,
- DONT_DO_SMI_CHECK);
-
- // Value is a double. Transition FAST_SMI_ELEMENTS -> FAST_DOUBLE_ELEMENTS
- // and complete the store.
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS,
- ebx,
- edi,
- slow);
- AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS);
- ElementsTransitionGenerator::GenerateSmiToDouble(
- masm, receiver, key, value, ebx, mode, slow);
- __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
- __ jmp(&fast_double_without_map_check);
-
- __ bind(&non_double_value);
- // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- ebx,
- edi,
- slow);
- mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
- masm, receiver, key, value, ebx, mode, slow);
- __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
- __ jmp(&finish_object_store);
-
- __ bind(&transition_double_elements);
- // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
- // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
- // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
- __ mov(ebx, FieldOperand(receiver, HeapObject::kMapOffset));
- __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
- FAST_ELEMENTS,
- ebx,
- edi,
- slow);
- mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateDoubleToObject(
- masm, receiver, key, value, ebx, mode, slow);
- __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
- __ jmp(&finish_object_store);
-}
-
-
-void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
- StrictMode strict_mode) {
- // Return address is on the stack.
- Label slow, fast_object, fast_object_grow;
- Label fast_double, fast_double_grow;
- Label array, extra, check_if_double_array;
- Register receiver = ReceiverRegister();
- Register key = NameRegister();
- DCHECK(receiver.is(edx));
- DCHECK(key.is(ecx));
-
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, &slow);
- // Get the map from the receiver.
- __ mov(edi, FieldOperand(receiver, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks and is not observed.
- // The generic stub does not perform map checks or handle observed objects.
- __ test_b(FieldOperand(edi, Map::kBitFieldOffset),
- 1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved);
- __ j(not_zero, &slow);
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, &slow);
- __ CmpInstanceType(edi, JS_ARRAY_TYPE);
- __ j(equal, &array);
- // Check that the object is some kind of JSObject.
- __ CmpInstanceType(edi, FIRST_JS_OBJECT_TYPE);
- __ j(below, &slow);
-
- // Object case: Check key against length in the elements array.
- // Key is a smi.
- // edi: receiver map
- __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
- // Check array bounds. Both the key and the length of FixedArray are smis.
- __ cmp(key, FieldOperand(ebx, FixedArray::kLengthOffset));
- __ j(below, &fast_object);
-
- // Slow case: call runtime.
- __ bind(&slow);
- GenerateRuntimeSetProperty(masm, strict_mode);
-
- // Extra capacity case: Check if there is extra capacity to
- // perform the store and update the length. Used for adding one
- // element to the array by writing to array[array.length].
- __ bind(&extra);
- // receiver is a JSArray.
- // key is a smi.
- // ebx: receiver->elements, a FixedArray
- // edi: receiver map
- // flags: compare (key, receiver.length())
- // do not leave holes in the array:
- __ j(not_equal, &slow);
- __ cmp(key, FieldOperand(ebx, FixedArray::kLengthOffset));
- __ j(above_equal, &slow);
- __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
- __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
- __ j(not_equal, &check_if_double_array);
- __ jmp(&fast_object_grow);
-
- __ bind(&check_if_double_array);
- __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
- __ j(not_equal, &slow);
- __ jmp(&fast_double_grow);
-
- // Array case: Get the length and the elements array from the JS
- // array. Check that the array is in fast mode (and writable); if it
- // is the length is always a smi.
- __ bind(&array);
- // receiver is a JSArray.
- // key is a smi.
- // edi: receiver map
- __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
-
- // Check the key against the length in the array and fall through to the
- // common store code.
- __ cmp(key, FieldOperand(receiver, JSArray::kLengthOffset)); // Compare smis.
- __ j(above_equal, &extra);
-
- KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
- &slow, kCheckMap, kDontIncrementLength);
- KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
- &slow, kDontCheckMap, kIncrementLength);
-}
-
-
-void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
- // The return address is on the stack.
- Register receiver = ReceiverRegister();
- Register name = NameRegister();
- DCHECK(receiver.is(edx));
- DCHECK(name.is(ecx));
-
- // Probe the stub cache.
- Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::LOAD_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, receiver, name, ebx, eax);
-
- // Cache miss: Jump to runtime.
- GenerateMiss(masm);
-}
-
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
- Register dictionary = eax;
- DCHECK(!dictionary.is(ReceiverRegister()));
- DCHECK(!dictionary.is(NameRegister()));
-
- Label slow;
-
- __ mov(dictionary,
- FieldOperand(ReceiverRegister(), JSObject::kPropertiesOffset));
- GenerateDictionaryLoad(masm, &slow, dictionary, NameRegister(), edi, ebx,
- eax);
- __ ret(0);
-
- // Dictionary load failed, go slow (but don't miss).
- __ bind(&slow);
- GenerateRuntimeGetProperty(masm);
-}
-
-
-static void LoadIC_PushArgs(MacroAssembler* masm) {
- Register receiver = LoadIC::ReceiverRegister();
- Register name = LoadIC::NameRegister();
- DCHECK(!ebx.is(receiver) && !ebx.is(name));
-
- __ pop(ebx);
- __ push(receiver);
- __ push(name);
- __ push(ebx);
-}
-
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
- // Return address is on the stack.
- __ IncrementCounter(masm->isolate()->counters()->load_miss(), 1);
-
- LoadIC_PushArgs(masm);
-
- // Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kLoadIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
- // Return address is on the stack.
- LoadIC_PushArgs(masm);
-
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
- // Return address is on the stack.
- __ IncrementCounter(masm->isolate()->counters()->keyed_load_miss(), 1);
-
- LoadIC_PushArgs(masm);
-
- // Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-// IC register specifications
-const Register LoadIC::ReceiverRegister() { return edx; }
-const Register LoadIC::NameRegister() { return ecx; }
-
-
-const Register LoadIC::SlotRegister() {
- DCHECK(FLAG_vector_ics);
- return eax;
-}
-
-
-const Register LoadIC::VectorRegister() {
- DCHECK(FLAG_vector_ics);
- return ebx;
-}
-
-
-const Register StoreIC::ReceiverRegister() { return edx; }
-const Register StoreIC::NameRegister() { return ecx; }
-const Register StoreIC::ValueRegister() { return eax; }
-
-
-const Register KeyedStoreIC::MapRegister() {
- return ebx;
-}
-
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
- // Return address is on the stack.
- LoadIC_PushArgs(masm);
-
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
-}
-
-
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
- // Return address is on the stack.
- Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, ReceiverRegister(), NameRegister(),
- ebx, no_reg);
-
- // Cache miss: Jump to runtime.
- GenerateMiss(masm);
-}
-
-
-static void StoreIC_PushArgs(MacroAssembler* masm) {
- Register receiver = StoreIC::ReceiverRegister();
- Register name = StoreIC::NameRegister();
- Register value = StoreIC::ValueRegister();
-
- DCHECK(!ebx.is(receiver) && !ebx.is(name) && !ebx.is(value));
-
- __ pop(ebx);
- __ push(receiver);
- __ push(name);
- __ push(value);
- __ push(ebx);
-}
-
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
- // Return address is on the stack.
- StoreIC_PushArgs(masm);
-
- // Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
- Label restore_miss;
- Register receiver = ReceiverRegister();
- Register name = NameRegister();
- Register value = ValueRegister();
- Register dictionary = ebx;
-
- __ mov(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset));
-
- // A lot of registers are needed for storing to slow case
- // objects. Push and restore receiver but rely on
- // GenerateDictionaryStore preserving the value and name.
- __ push(receiver);
- GenerateDictionaryStore(masm, &restore_miss, dictionary, name, value,
- receiver, edi);
- __ Drop(1);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->store_normal_hit(), 1);
- __ ret(0);
-
- __ bind(&restore_miss);
- __ pop(receiver);
- __ IncrementCounter(counters->store_normal_miss(), 1);
- GenerateMiss(masm);
-}
-
-
-void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictMode strict_mode) {
- // Return address is on the stack.
- DCHECK(!ebx.is(ReceiverRegister()) && !ebx.is(NameRegister()) &&
- !ebx.is(ValueRegister()));
- __ pop(ebx);
- __ push(ReceiverRegister());
- __ push(NameRegister());
- __ push(ValueRegister());
- __ push(Immediate(Smi::FromInt(strict_mode)));
- __ push(ebx); // return address
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
-}
-
-
-void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictMode strict_mode) {
- // Return address is on the stack.
- DCHECK(!ebx.is(ReceiverRegister()) && !ebx.is(NameRegister()) &&
- !ebx.is(ValueRegister()));
- __ pop(ebx);
- __ push(ReceiverRegister());
- __ push(NameRegister());
- __ push(ValueRegister());
- __ push(Immediate(Smi::FromInt(strict_mode)));
- __ push(ebx); // return address
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
-}
-
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
- // Return address is on the stack.
- StoreIC_PushArgs(masm);
-
- // Do tail-call to runtime routine.
- ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void StoreIC::GenerateSlow(MacroAssembler* masm) {
- // Return address is on the stack.
- StoreIC_PushArgs(masm);
-
- // Do tail-call to runtime routine.
- ExternalReference ref(IC_Utility(kStoreIC_Slow), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
- // Return address is on the stack.
- StoreIC_PushArgs(masm);
-
- // Do tail-call to runtime routine.
- ExternalReference ref(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-#undef __
-
-
-Condition CompareIC::ComputeCondition(Token::Value op) {
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- return equal;
- case Token::LT:
- return less;
- case Token::GT:
- return greater;
- case Token::LTE:
- return less_equal;
- case Token::GTE:
- return greater_equal;
- default:
- UNREACHABLE();
- return no_condition;
- }
-}
-
-
-bool CompareIC::HasInlinedSmiCode(Address address) {
- // The address of the instruction following the call.
- Address test_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
-
- // If the instruction following the call is not a test al, nothing
- // was inlined.
- return *test_instruction_address == Assembler::kTestAlByte;
-}
-
-
-void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
- // The address of the instruction following the call.
- Address test_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
-
- // If the instruction following the call is not a test al, nothing
- // was inlined.
- if (*test_instruction_address != Assembler::kTestAlByte) {
- DCHECK(*test_instruction_address == Assembler::kNopByte);
- return;
- }
-
- Address delta_address = test_instruction_address + 1;
- // The delta to the start of the map check instruction and the
- // condition code uses at the patched jump.
- uint8_t delta = *reinterpret_cast<uint8_t*>(delta_address);
- if (FLAG_trace_ic) {
- PrintF("[ patching ic at %p, test=%p, delta=%d\n",
- address, test_instruction_address, delta);
- }
-
- // Patch with a short conditional jump. Enabling means switching from a short
- // jump-if-carry/not-carry to jump-if-zero/not-zero, whereas disabling is the
- // reverse operation of that.
- Address jmp_address = test_instruction_address - delta;
- DCHECK((check == ENABLE_INLINED_SMI_CHECK)
- ? (*jmp_address == Assembler::kJncShortOpcode ||
- *jmp_address == Assembler::kJcShortOpcode)
- : (*jmp_address == Assembler::kJnzShortOpcode ||
- *jmp_address == Assembler::kJzShortOpcode));
- Condition cc = (check == ENABLE_INLINED_SMI_CHECK)
- ? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero)
- : (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry);
- *jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
#include "src/deoptimizer.h"
#include "src/hydrogen-osr.h"
#include "src/ia32/lithium-codegen-ia32.h"
-#include "src/ic.h"
-#include "src/stub-cache.h"
namespace v8 {
namespace internal {
+++ /dev/null
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#if V8_TARGET_ARCH_IA32
-
-#include "src/codegen.h"
-#include "src/ic-inl.h"
-#include "src/stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-
-static void ProbeTable(Isolate* isolate,
- MacroAssembler* masm,
- Code::Flags flags,
- StubCache::Table table,
- Register name,
- Register receiver,
- // Number of the cache entry pointer-size scaled.
- Register offset,
- Register extra) {
- ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
- ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
- ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
-
- Label miss;
-
- // Multiply by 3 because there are 3 fields per entry (name, code, map).
- __ lea(offset, Operand(offset, offset, times_2, 0));
-
- if (extra.is_valid()) {
- // Get the code entry from the cache.
- __ mov(extra, Operand::StaticArray(offset, times_1, value_offset));
-
- // Check that the key in the entry matches the name.
- __ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
- __ j(not_equal, &miss);
-
- // Check the map matches.
- __ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
- __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
- __ j(not_equal, &miss);
-
- // Check that the flags match what we're looking for.
- __ mov(offset, FieldOperand(extra, Code::kFlagsOffset));
- __ and_(offset, ~Code::kFlagsNotUsedInLookup);
- __ cmp(offset, flags);
- __ j(not_equal, &miss);
-
-#ifdef DEBUG
- if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
- __ jmp(&miss);
- } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
- __ jmp(&miss);
- }
-#endif
-
- // Jump to the first instruction in the code stub.
- __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(extra);
-
- __ bind(&miss);
- } else {
- // Save the offset on the stack.
- __ push(offset);
-
- // Check that the key in the entry matches the name.
- __ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
- __ j(not_equal, &miss);
-
- // Check the map matches.
- __ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
- __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
- __ j(not_equal, &miss);
-
- // Restore offset register.
- __ mov(offset, Operand(esp, 0));
-
- // Get the code entry from the cache.
- __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
-
- // Check that the flags match what we're looking for.
- __ mov(offset, FieldOperand(offset, Code::kFlagsOffset));
- __ and_(offset, ~Code::kFlagsNotUsedInLookup);
- __ cmp(offset, flags);
- __ j(not_equal, &miss);
-
-#ifdef DEBUG
- if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
- __ jmp(&miss);
- } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
- __ jmp(&miss);
- }
-#endif
-
- // Restore offset and re-load code entry from cache.
- __ pop(offset);
- __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
-
- // Jump to the first instruction in the code stub.
- __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(offset);
-
- // Pop at miss.
- __ bind(&miss);
- __ pop(offset);
- }
-}
-
-
-void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
- MacroAssembler* masm, Label* miss_label, Register receiver,
- Handle<Name> name, Register scratch0, Register scratch1) {
- DCHECK(name->IsUniqueName());
- DCHECK(!receiver.is(scratch0));
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->negative_lookups(), 1);
- __ IncrementCounter(counters->negative_lookups_miss(), 1);
-
- __ mov(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
-
- const int kInterceptorOrAccessCheckNeededMask =
- (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
-
- // Bail out if the receiver has a named interceptor or requires access checks.
- __ test_b(FieldOperand(scratch0, Map::kBitFieldOffset),
- kInterceptorOrAccessCheckNeededMask);
- __ j(not_zero, miss_label);
-
- // Check that receiver is a JSObject.
- __ CmpInstanceType(scratch0, FIRST_SPEC_OBJECT_TYPE);
- __ j(below, miss_label);
-
- // Load properties array.
- Register properties = scratch0;
- __ mov(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
-
- // Check that the properties array is a dictionary.
- __ cmp(FieldOperand(properties, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->hash_table_map()));
- __ j(not_equal, miss_label);
-
- Label done;
- NameDictionaryLookupStub::GenerateNegativeLookup(masm,
- miss_label,
- &done,
- properties,
- name,
- scratch1);
- __ bind(&done);
- __ DecrementCounter(counters->negative_lookups_miss(), 1);
-}
-
-
-void StubCache::GenerateProbe(MacroAssembler* masm,
- Code::Flags flags,
- Register receiver,
- Register name,
- Register scratch,
- Register extra,
- Register extra2,
- Register extra3) {
- Label miss;
-
- // Assert that code is valid. The multiplying code relies on the entry size
- // being 12.
- DCHECK(sizeof(Entry) == 12);
-
- // Assert the flags do not name a specific type.
- DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
-
- // Assert that there are no register conflicts.
- DCHECK(!scratch.is(receiver));
- DCHECK(!scratch.is(name));
- DCHECK(!extra.is(receiver));
- DCHECK(!extra.is(name));
- DCHECK(!extra.is(scratch));
-
- // Assert scratch and extra registers are valid, and extra2/3 are unused.
- DCHECK(!scratch.is(no_reg));
- DCHECK(extra2.is(no_reg));
- DCHECK(extra3.is(no_reg));
-
- Register offset = scratch;
- scratch = no_reg;
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Get the map of the receiver and compute the hash.
- __ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
- __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xor_(offset, flags);
- // We mask out the last two bits because they are not part of the hash and
- // they are always 01 for maps. Also in the two 'and' instructions below.
- __ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
- // ProbeTable expects the offset to be pointer scaled, which it is, because
- // the heap object tag size is 2 and the pointer size log 2 is also 2.
- DCHECK(kCacheIndexShift == kPointerSizeLog2);
-
- // Probe the primary table.
- ProbeTable(isolate(), masm, flags, kPrimary, name, receiver, offset, extra);
-
- // Primary miss: Compute hash for secondary probe.
- __ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
- __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xor_(offset, flags);
- __ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
- __ sub(offset, name);
- __ add(offset, Immediate(flags));
- __ and_(offset, (kSecondaryTableSize - 1) << kCacheIndexShift);
-
- // Probe the secondary table.
- ProbeTable(
- isolate(), masm, flags, kSecondary, name, receiver, offset, extra);
-
- // Cache miss: Fall-through and let caller handle the miss by
- // entering the runtime system.
- __ bind(&miss);
- __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
-}
-
-
-void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm, int index, Register prototype, Label* miss) {
- // Get the global function with the given index.
- Handle<JSFunction> function(
- JSFunction::cast(masm->isolate()->native_context()->get(index)));
- // Check we're still in the same context.
- Register scratch = prototype;
- const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
- __ mov(scratch, Operand(esi, offset));
- __ mov(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
- __ cmp(Operand(scratch, Context::SlotOffset(index)), function);
- __ j(not_equal, miss);
-
- // Load its initial map. The global functions all have initial maps.
- __ Move(prototype, Immediate(Handle<Map>(function->initial_map())));
- // Load the prototype from the initial map.
- __ mov(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
-}
-
-
-void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
- MacroAssembler* masm, Register receiver, Register scratch1,
- Register scratch2, Label* miss_label) {
- __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
- __ mov(eax, scratch1);
- __ ret(0);
-}
-
-
-static void PushInterceptorArguments(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
- Handle<JSObject> holder_obj) {
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex == 1);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 2);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 3);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 4);
- __ push(name);
- Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
- DCHECK(!masm->isolate()->heap()->InNewSpace(*interceptor));
- Register scratch = name;
- __ mov(scratch, Immediate(interceptor));
- __ push(scratch);
- __ push(receiver);
- __ push(holder);
-}
-
-
-static void CompileCallLoadPropertyWithInterceptor(
- MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
- Handle<JSObject> holder_obj,
- IC::UtilityId id) {
- PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
- __ CallExternalReference(ExternalReference(IC_Utility(id), masm->isolate()),
- NamedLoadHandlerCompiler::kInterceptorArgsLength);
-}
-
-
-// Generate call to api function.
-// This function uses push() to generate smaller, faster code than
-// the version above. It is an optimization that should will be removed
-// when api call ICs are generated in hydrogen.
-void PropertyHandlerCompiler::GenerateFastApiCall(
- MacroAssembler* masm, const CallOptimization& optimization,
- Handle<Map> receiver_map, Register receiver, Register scratch_in,
- bool is_store, int argc, Register* values) {
- // Copy return value.
- __ pop(scratch_in);
- // receiver
- __ push(receiver);
- // Write the arguments to stack frame.
- for (int i = 0; i < argc; i++) {
- Register arg = values[argc-1-i];
- DCHECK(!receiver.is(arg));
- DCHECK(!scratch_in.is(arg));
- __ push(arg);
- }
- __ push(scratch_in);
- // Stack now matches JSFunction abi.
- DCHECK(optimization.is_simple_api_call());
-
- // Abi for CallApiFunctionStub.
- Register callee = eax;
- Register call_data = ebx;
- Register holder = ecx;
- Register api_function_address = edx;
- Register scratch = edi; // scratch_in is no longer valid.
-
- // Put holder in place.
- CallOptimization::HolderLookup holder_lookup;
- Handle<JSObject> api_holder = optimization.LookupHolderOfExpectedType(
- receiver_map,
- &holder_lookup);
- switch (holder_lookup) {
- case CallOptimization::kHolderIsReceiver:
- __ Move(holder, receiver);
- break;
- case CallOptimization::kHolderFound:
- __ LoadHeapObject(holder, api_holder);
- break;
- case CallOptimization::kHolderNotFound:
- UNREACHABLE();
- break;
- }
-
- Isolate* isolate = masm->isolate();
- Handle<JSFunction> function = optimization.constant_function();
- Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- Handle<Object> call_data_obj(api_call_info->data(), isolate);
-
- // Put callee in place.
- __ LoadHeapObject(callee, function);
-
- bool call_data_undefined = false;
- // Put call_data in place.
- if (isolate->heap()->InNewSpace(*call_data_obj)) {
- __ mov(scratch, api_call_info);
- __ mov(call_data, FieldOperand(scratch, CallHandlerInfo::kDataOffset));
- } else if (call_data_obj->IsUndefined()) {
- call_data_undefined = true;
- __ mov(call_data, Immediate(isolate->factory()->undefined_value()));
- } else {
- __ mov(call_data, call_data_obj);
- }
-
- // Put api_function_address in place.
- Address function_address = v8::ToCData<Address>(api_call_info->callback());
- __ mov(api_function_address, Immediate(function_address));
-
- // Jump to stub.
- CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc);
- __ TailCallStub(&stub);
-}
-
-
-// Generate code to check that a global property cell is empty. Create
-// the property cell at compilation time if no cell exists for the
-// property.
-void PropertyHandlerCompiler::GenerateCheckPropertyCell(
- MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
- Register scratch, Label* miss) {
- Handle<PropertyCell> cell =
- JSGlobalObject::EnsurePropertyCell(global, name);
- DCHECK(cell->value()->IsTheHole());
- Handle<Oddball> the_hole = masm->isolate()->factory()->the_hole_value();
- if (masm->serializer_enabled()) {
- __ mov(scratch, Immediate(cell));
- __ cmp(FieldOperand(scratch, PropertyCell::kValueOffset),
- Immediate(the_hole));
- } else {
- __ cmp(Operand::ForCell(cell), Immediate(the_hole));
- }
- __ j(not_equal, miss);
-}
-
-
-void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
- Handle<Code> code) {
- __ jmp(code, RelocInfo::CODE_TARGET);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
- Handle<Name> name) {
- if (!label->is_unused()) {
- __ bind(label);
- __ mov(this->name(), Immediate(name));
- }
-}
-
-
-// Receiver_reg is preserved on jumps to miss_label, but may be destroyed if
-// store is successful.
-void NamedStoreHandlerCompiler::GenerateStoreTransition(
- Handle<Map> transition, Handle<Name> name, Register receiver_reg,
- Register storage_reg, Register value_reg, Register scratch1,
- Register scratch2, Register unused, Label* miss_label, Label* slow) {
- int descriptor = transition->LastAdded();
- DescriptorArray* descriptors = transition->instance_descriptors();
- PropertyDetails details = descriptors->GetDetails(descriptor);
- Representation representation = details.representation();
- DCHECK(!representation.IsNone());
-
- if (details.type() == CONSTANT) {
- Handle<Object> constant(descriptors->GetValue(descriptor), isolate());
- __ CmpObject(value_reg, constant);
- __ j(not_equal, miss_label);
- } else if (representation.IsSmi()) {
- __ JumpIfNotSmi(value_reg, miss_label);
- } else if (representation.IsHeapObject()) {
- __ JumpIfSmi(value_reg, miss_label);
- HeapType* field_type = descriptors->GetFieldType(descriptor);
- HeapType::Iterator<Map> it = field_type->Classes();
- if (!it.Done()) {
- Label do_store;
- while (true) {
- __ CompareMap(value_reg, it.Current());
- it.Advance();
- if (it.Done()) {
- __ j(not_equal, miss_label);
- break;
- }
- __ j(equal, &do_store, Label::kNear);
- }
- __ bind(&do_store);
- }
- } else if (representation.IsDouble()) {
- Label do_store, heap_number;
- __ AllocateHeapNumber(storage_reg, scratch1, scratch2, slow, MUTABLE);
-
- __ JumpIfNotSmi(value_reg, &heap_number);
- __ SmiUntag(value_reg);
- __ Cvtsi2sd(xmm0, value_reg);
- __ SmiTag(value_reg);
- __ jmp(&do_store);
-
- __ bind(&heap_number);
- __ CheckMap(value_reg, isolate()->factory()->heap_number_map(), miss_label,
- DONT_DO_SMI_CHECK);
- __ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
-
- __ bind(&do_store);
- __ movsd(FieldOperand(storage_reg, HeapNumber::kValueOffset), xmm0);
- }
-
- // Stub never generated for objects that require access checks.
- DCHECK(!transition->is_access_check_needed());
-
- // Perform map transition for the receiver if necessary.
- if (details.type() == FIELD &&
- Map::cast(transition->GetBackPointer())->unused_property_fields() == 0) {
- // The properties must be extended before we can store the value.
- // We jump to a runtime call that extends the properties array.
- __ pop(scratch1); // Return address.
- __ push(receiver_reg);
- __ push(Immediate(transition));
- __ push(value_reg);
- __ push(scratch1);
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
- isolate()),
- 3, 1);
- return;
- }
-
- // Update the map of the object.
- __ mov(scratch1, Immediate(transition));
- __ mov(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
-
- // Update the write barrier for the map field.
- __ RecordWriteField(receiver_reg,
- HeapObject::kMapOffset,
- scratch1,
- scratch2,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-
- if (details.type() == CONSTANT) {
- DCHECK(value_reg.is(eax));
- __ ret(0);
- return;
- }
-
- int index = transition->instance_descriptors()->GetFieldIndex(
- transition->LastAdded());
-
- // Adjust for the number of properties stored in the object. Even in the
- // face of a transition we can use the old map here because the size of the
- // object and the number of in-object properties is not going to change.
- index -= transition->inobject_properties();
-
- SmiCheck smi_check = representation.IsTagged()
- ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
- // TODO(verwaest): Share this code as a code stub.
- if (index < 0) {
- // Set the property straight into the object.
- int offset = transition->instance_size() + (index * kPointerSize);
- if (representation.IsDouble()) {
- __ mov(FieldOperand(receiver_reg, offset), storage_reg);
- } else {
- __ mov(FieldOperand(receiver_reg, offset), value_reg);
- }
-
- if (!representation.IsSmi()) {
- // Update the write barrier for the array address.
- if (!representation.IsDouble()) {
- __ mov(storage_reg, value_reg);
- }
- __ RecordWriteField(receiver_reg,
- offset,
- storage_reg,
- scratch1,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- smi_check);
- }
- } else {
- // Write to the properties array.
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
- // Get the properties array (optimistically).
- __ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
- if (representation.IsDouble()) {
- __ mov(FieldOperand(scratch1, offset), storage_reg);
- } else {
- __ mov(FieldOperand(scratch1, offset), value_reg);
- }
-
- if (!representation.IsSmi()) {
- // Update the write barrier for the array address.
- if (!representation.IsDouble()) {
- __ mov(storage_reg, value_reg);
- }
- __ RecordWriteField(scratch1,
- offset,
- storage_reg,
- receiver_reg,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- smi_check);
- }
- }
-
- // Return the value (register eax).
- DCHECK(value_reg.is(eax));
- __ ret(0);
-}
-
-
-void NamedStoreHandlerCompiler::GenerateStoreField(LookupIterator* lookup,
- Register value_reg,
- Label* miss_label) {
- DCHECK(lookup->representation().IsHeapObject());
- __ JumpIfSmi(value_reg, miss_label);
- HeapType::Iterator<Map> it = lookup->GetFieldType()->Classes();
- Label do_store;
- while (true) {
- __ CompareMap(value_reg, it.Current());
- it.Advance();
- if (it.Done()) {
- __ j(not_equal, miss_label);
- break;
- }
- __ j(equal, &do_store, Label::kNear);
- }
- __ bind(&do_store);
-
- StoreFieldStub stub(isolate(), lookup->GetFieldIndex(),
- lookup->representation());
- GenerateTailCall(masm(), stub.GetCode());
-}
-
-
-Register PropertyHandlerCompiler::CheckPrototypes(
- Register object_reg, Register holder_reg, Register scratch1,
- Register scratch2, Handle<Name> name, Label* miss,
- PrototypeCheckType check) {
- Handle<Map> receiver_map(IC::TypeToMap(*type(), isolate()));
-
- // Make sure there's no overlap between holder and object registers.
- DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
- DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
- && !scratch2.is(scratch1));
-
- // Keep track of the current object in register reg.
- Register reg = object_reg;
- int depth = 0;
-
- Handle<JSObject> current = Handle<JSObject>::null();
- if (type()->IsConstant())
- current = Handle<JSObject>::cast(type()->AsConstant()->Value());
- Handle<JSObject> prototype = Handle<JSObject>::null();
- Handle<Map> current_map = receiver_map;
- Handle<Map> holder_map(holder()->map());
- // Traverse the prototype chain and check the maps in the prototype chain for
- // fast and global objects or do negative lookup for normal objects.
- while (!current_map.is_identical_to(holder_map)) {
- ++depth;
-
- // Only global objects and objects that do not require access
- // checks are allowed in stubs.
- DCHECK(current_map->IsJSGlobalProxyMap() ||
- !current_map->is_access_check_needed());
-
- prototype = handle(JSObject::cast(current_map->prototype()));
- if (current_map->is_dictionary_map() &&
- !current_map->IsJSGlobalObjectMap()) {
- DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
- if (!name->IsUniqueName()) {
- DCHECK(name->IsString());
- name = factory()->InternalizeString(Handle<String>::cast(name));
- }
- DCHECK(current.is_null() ||
- current->property_dictionary()->FindEntry(name) ==
- NameDictionary::kNotFound);
-
- GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
- scratch1, scratch2);
-
- __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
- reg = holder_reg; // From now on the object will be in holder_reg.
- __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
- } else {
- bool in_new_space = heap()->InNewSpace(*prototype);
- // Two possible reasons for loading the prototype from the map:
- // (1) Can't store references to new space in code.
- // (2) Handler is shared for all receivers with the same prototype
- // map (but not necessarily the same prototype instance).
- bool load_prototype_from_map = in_new_space || depth == 1;
- if (depth != 1 || check == CHECK_ALL_MAPS) {
- __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
- }
-
- // Check access rights to the global object. This has to happen after
- // the map check so that we know that the object is actually a global
- // object.
- // This allows us to install generated handlers for accesses to the
- // global proxy (as opposed to using slow ICs). See corresponding code
- // in LookupForRead().
- if (current_map->IsJSGlobalProxyMap()) {
- __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
- } else if (current_map->IsJSGlobalObjectMap()) {
- GenerateCheckPropertyCell(
- masm(), Handle<JSGlobalObject>::cast(current), name,
- scratch2, miss);
- }
-
- if (load_prototype_from_map) {
- // Save the map in scratch1 for later.
- __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
- }
-
- reg = holder_reg; // From now on the object will be in holder_reg.
-
- if (load_prototype_from_map) {
- __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
- } else {
- __ mov(reg, prototype);
- }
- }
-
- // Go to the next object in the prototype chain.
- current = prototype;
- current_map = handle(current->map());
- }
-
- // Log the check depth.
- LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
-
- if (depth != 0 || check == CHECK_ALL_MAPS) {
- // Check the holder map.
- __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
- }
-
- // Perform security check for access to the global object.
- DCHECK(current_map->IsJSGlobalProxyMap() ||
- !current_map->is_access_check_needed());
- if (current_map->IsJSGlobalProxyMap()) {
- __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
- }
-
- // Return the register containing the holder.
- return reg;
-}
-
-
-void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
- if (!miss->is_unused()) {
- Label success;
- __ jmp(&success);
- __ bind(miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
- __ bind(&success);
- }
-}
-
-
-void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
- if (!miss->is_unused()) {
- Label success;
- __ jmp(&success);
- GenerateRestoreName(miss, name);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
- __ bind(&success);
- }
-}
-
-
-void NamedLoadHandlerCompiler::GenerateLoadCallback(
- Register reg, Handle<ExecutableAccessorInfo> callback) {
- // Insert additional parameters into the stack frame above return address.
- DCHECK(!scratch3().is(reg));
- __ pop(scratch3()); // Get return address to place it below.
-
- STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
- STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
- STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
- STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
- __ push(receiver()); // receiver
- // Push data from ExecutableAccessorInfo.
- if (isolate()->heap()->InNewSpace(callback->data())) {
- DCHECK(!scratch2().is(reg));
- __ mov(scratch2(), Immediate(callback));
- __ push(FieldOperand(scratch2(), ExecutableAccessorInfo::kDataOffset));
- } else {
- __ push(Immediate(Handle<Object>(callback->data(), isolate())));
- }
- __ push(Immediate(isolate()->factory()->undefined_value())); // ReturnValue
- // ReturnValue default value
- __ push(Immediate(isolate()->factory()->undefined_value()));
- __ push(Immediate(reinterpret_cast<int>(isolate())));
- __ push(reg); // holder
-
- // Save a pointer to where we pushed the arguments. This will be
- // passed as the const PropertyAccessorInfo& to the C++ callback.
- __ push(esp);
-
- __ push(name()); // name
-
- __ push(scratch3()); // Restore return address.
-
- // Abi for CallApiGetter
- Register getter_address = edx;
- Address function_address = v8::ToCData<Address>(callback->getter());
- __ mov(getter_address, Immediate(function_address));
-
- CallApiGetterStub stub(isolate());
- __ TailCallStub(&stub);
-}
-
-
-void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
- // Return the constant value.
- __ LoadObject(eax, value);
- __ ret(0);
-}
-
-
-void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
- LookupIterator* it, Register holder_reg) {
- DCHECK(holder()->HasNamedInterceptor());
- DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
-
- // Compile the interceptor call, followed by inline code to load the
- // property from further up the prototype chain if the call fails.
- // Check that the maps haven't changed.
- DCHECK(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
-
- // Preserve the receiver register explicitly whenever it is different from the
- // holder and it is needed should the interceptor return without any result.
- // The ACCESSOR case needs the receiver to be passed into C++ code, the FIELD
- // case might cause a miss during the prototype check.
- bool must_perform_prototype_check =
- !holder().is_identical_to(it->GetHolder<JSObject>());
- bool must_preserve_receiver_reg =
- !receiver().is(holder_reg) &&
- (it->property_kind() == LookupIterator::ACCESSOR ||
- must_perform_prototype_check);
-
- // Save necessary data before invoking an interceptor.
- // Requires a frame to make GC aware of pushed pointers.
- {
- FrameScope frame_scope(masm(), StackFrame::INTERNAL);
-
- if (must_preserve_receiver_reg) {
- __ push(receiver());
- }
- __ push(holder_reg);
- __ push(this->name());
-
- // Invoke an interceptor. Note: map checks from receiver to
- // interceptor's holder has been compiled before (see a caller
- // of this method.)
- CompileCallLoadPropertyWithInterceptor(
- masm(), receiver(), holder_reg, this->name(), holder(),
- IC::kLoadPropertyWithInterceptorOnly);
-
- // Check if interceptor provided a value for property. If it's
- // the case, return immediately.
- Label interceptor_failed;
- __ cmp(eax, factory()->no_interceptor_result_sentinel());
- __ j(equal, &interceptor_failed);
- frame_scope.GenerateLeaveFrame();
- __ ret(0);
-
- // Clobber registers when generating debug-code to provoke errors.
- __ bind(&interceptor_failed);
- if (FLAG_debug_code) {
- __ mov(receiver(), Immediate(BitCast<int32_t>(kZapValue)));
- __ mov(holder_reg, Immediate(BitCast<int32_t>(kZapValue)));
- __ mov(this->name(), Immediate(BitCast<int32_t>(kZapValue)));
- }
-
- __ pop(this->name());
- __ pop(holder_reg);
- if (must_preserve_receiver_reg) {
- __ pop(receiver());
- }
-
- // Leave the internal frame.
- }
-
- GenerateLoadPostInterceptor(it, holder_reg);
-}
-
-
-void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
- DCHECK(holder()->HasNamedInterceptor());
- DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
- // Call the runtime system to load the interceptor.
- __ pop(scratch2()); // save old return address
- PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
- holder());
- __ push(scratch2()); // restore old return address
-
- ExternalReference ref = ExternalReference(
- IC_Utility(IC::kLoadPropertyWithInterceptor), isolate());
- __ TailCallExternalReference(
- ref, NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
-}
-
-
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
- Handle<JSObject> object, Handle<Name> name,
- Handle<ExecutableAccessorInfo> callback) {
- Register holder_reg = Frontend(receiver(), name);
-
- __ pop(scratch1()); // remove the return address
- __ push(receiver());
- __ push(holder_reg);
- __ Push(callback);
- __ Push(name);
- __ push(value());
- __ push(scratch1()); // restore return address
-
- // Do tail-call to the runtime system.
- ExternalReference store_callback_property =
- ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
- __ TailCallExternalReference(store_callback_property, 5, 1);
-
- // Return the generated code.
- return GetCode(kind(), Code::FAST, name);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
- MacroAssembler* masm, Handle<HeapType> type, Register receiver,
- Handle<JSFunction> setter) {
- // ----------- S t a t e -------------
- // -- esp[0] : return address
- // -----------------------------------
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Save value register, so we can restore it later.
- __ push(value());
-
- if (!setter.is_null()) {
- // Call the JavaScript setter with receiver and value on the stack.
- if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
- // Swap in the global receiver.
- __ mov(receiver,
- FieldOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
- }
- __ push(receiver);
- __ push(value());
- ParameterCount actual(1);
- ParameterCount expected(setter);
- __ InvokeFunction(setter, expected, actual,
- CALL_FUNCTION, NullCallWrapper());
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // We have to return the passed value, not the return value of the setter.
- __ pop(eax);
-
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- }
- __ ret(0);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
- Handle<Name> name) {
- __ pop(scratch1()); // remove the return address
- __ push(receiver());
- __ push(this->name());
- __ push(value());
- __ push(scratch1()); // restore return address
-
- // Do tail-call to the runtime system.
- ExternalReference store_ic_property = ExternalReference(
- IC_Utility(IC::kStorePropertyWithInterceptor), isolate());
- __ TailCallExternalReference(store_ic_property, 3, 1);
-
- // Return the generated code.
- return GetCode(kind(), Code::FAST, name);
-}
-
-
-Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
- MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
- MapHandleList* transitioned_maps) {
- Label miss;
- __ JumpIfSmi(receiver(), &miss, Label::kNear);
- __ mov(scratch1(), FieldOperand(receiver(), HeapObject::kMapOffset));
- for (int i = 0; i < receiver_maps->length(); ++i) {
- __ cmp(scratch1(), receiver_maps->at(i));
- if (transitioned_maps->at(i).is_null()) {
- __ j(equal, handler_stubs->at(i));
- } else {
- Label next_map;
- __ j(not_equal, &next_map, Label::kNear);
- __ mov(transition_map(), Immediate(transitioned_maps->at(i)));
- __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
- __ bind(&next_map);
- }
- }
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
-}
-
-
-Register* PropertyAccessCompiler::load_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3, scratch4.
- Register receiver = LoadIC::ReceiverRegister();
- Register name = LoadIC::NameRegister();
- static Register registers[] = { receiver, name, ebx, eax, edi, no_reg };
- return registers;
-}
-
-
-Register* PropertyAccessCompiler::store_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3.
- Register receiver = StoreIC::ReceiverRegister();
- Register name = StoreIC::NameRegister();
- DCHECK(ebx.is(KeyedStoreIC::MapRegister()));
- static Register registers[] = { receiver, name, ebx, edi, no_reg };
- return registers;
-}
-
-
-Register NamedStoreHandlerCompiler::value() { return StoreIC::ValueRegister(); }
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
- MacroAssembler* masm, Handle<HeapType> type, Register receiver,
- Handle<JSFunction> getter) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- if (!getter.is_null()) {
- // Call the JavaScript getter with the receiver on the stack.
- if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
- // Swap in the global receiver.
- __ mov(receiver,
- FieldOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
- }
- __ push(receiver);
- ParameterCount actual(0);
- ParameterCount expected(getter);
- __ InvokeFunction(getter, expected, actual,
- CALL_FUNCTION, NullCallWrapper());
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- }
- __ ret(0);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
- Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
- Label miss;
-
- FrontendHeader(receiver(), name, &miss);
- // Get the value from the cell.
- Register result = StoreIC::ValueRegister();
- if (masm()->serializer_enabled()) {
- __ mov(result, Immediate(cell));
- __ mov(result, FieldOperand(result, PropertyCell::kValueOffset));
- } else {
- __ mov(result, Operand::ForCell(cell));
- }
-
- // Check for deleted property if property can actually be deleted.
- if (is_configurable) {
- __ cmp(result, factory()->the_hole_value());
- __ j(equal, &miss);
- } else if (FLAG_debug_code) {
- __ cmp(result, factory()->the_hole_value());
- __ Check(not_equal, kDontDeleteCellsCannotContainTheHole);
- }
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->named_load_global_stub(), 1);
- // The code above already loads the result into the return register.
- __ ret(0);
-
- FrontendFooter(name, &miss);
-
- // Return the generated code.
- return GetCode(kind(), Code::NORMAL, name);
-}
-
-
-Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
- CodeHandleList* handlers,
- Handle<Name> name,
- Code::StubType type,
- IcCheckType check) {
- Label miss;
-
- if (check == PROPERTY &&
- (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
- // In case we are compiling an IC for dictionary loads and stores, just
- // check whether the name is unique.
- if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
- __ JumpIfNotUniqueName(this->name(), &miss);
- } else {
- __ cmp(this->name(), Immediate(name));
- __ j(not_equal, &miss);
- }
- }
-
- Label number_case;
- Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
- __ JumpIfSmi(receiver(), smi_target);
-
- // Polymorphic keyed stores may use the map register
- Register map_reg = scratch1();
- DCHECK(kind() != Code::KEYED_STORE_IC ||
- map_reg.is(KeyedStoreIC::MapRegister()));
- __ mov(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
- int receiver_count = types->length();
- int number_of_handled_maps = 0;
- for (int current = 0; current < receiver_count; ++current) {
- Handle<HeapType> type = types->at(current);
- Handle<Map> map = IC::TypeToMap(*type, isolate());
- if (!map->is_deprecated()) {
- number_of_handled_maps++;
- __ cmp(map_reg, map);
- if (type->Is(HeapType::Number())) {
- DCHECK(!number_case.is_unused());
- __ bind(&number_case);
- }
- __ j(equal, handlers->at(current));
- }
- }
- DCHECK(number_of_handled_maps != 0);
-
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- InlineCacheState state =
- number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
- return GetCode(kind(), type, name, state);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void ElementHandlerCompiler::GenerateLoadDictionaryElement(
- MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- DCHECK(edx.is(LoadIC::ReceiverRegister()));
- DCHECK(ecx.is(LoadIC::NameRegister()));
- Label slow, miss;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
- __ JumpIfNotSmi(ecx, &miss);
- __ mov(ebx, ecx);
- __ SmiUntag(ebx);
- __ mov(eax, FieldOperand(edx, JSObject::kElementsOffset));
-
- // Push receiver on the stack to free up a register for the dictionary
- // probing.
- __ push(edx);
- __ LoadFromNumberDictionary(&slow, eax, ecx, ebx, edx, edi, eax);
- // Pop receiver before returning.
- __ pop(edx);
- __ ret(0);
-
- __ bind(&slow);
- __ pop(edx);
-
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
-
- __ bind(&miss);
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
+++ /dev/null
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_IC_INL_H_
-#define V8_IC_INL_H_
-
-#include "src/ic.h"
-
-#include "src/compiler.h"
-#include "src/debug.h"
-#include "src/macro-assembler.h"
-#include "src/prototype.h"
-
-namespace v8 {
-namespace internal {
-
-
-Address IC::address() const {
- // Get the address of the call.
- Address result = Assembler::target_address_from_return_address(pc());
-
- Debug* debug = isolate()->debug();
- // First check if any break points are active if not just return the address
- // of the call.
- if (!debug->has_break_points()) return result;
-
- // At least one break point is active perform additional test to ensure that
- // break point locations are updated correctly.
- if (debug->IsDebugBreak(Assembler::target_address_at(result,
- raw_constant_pool()))) {
- // If the call site is a call to debug break then return the address in
- // the original code instead of the address in the running code. This will
- // cause the original code to be updated and keeps the breakpoint active in
- // the running code.
- Code* code = GetCode();
- Code* original_code = GetOriginalCode();
- intptr_t delta =
- original_code->instruction_start() - code->instruction_start();
- // Return the address in the original code. This is the place where
- // the call which has been overwritten by the DebugBreakXXX resides
- // and the place where the inline cache system should look.
- return result + delta;
- } else {
- // No break point here just return the address of the call.
- return result;
- }
-}
-
-
-ConstantPoolArray* IC::constant_pool() const {
- if (!FLAG_enable_ool_constant_pool) {
- return NULL;
- } else {
- Handle<ConstantPoolArray> result = raw_constant_pool_;
- Debug* debug = isolate()->debug();
- // First check if any break points are active if not just return the
- // original constant pool.
- if (!debug->has_break_points()) return *result;
-
- // At least one break point is active perform additional test to ensure that
- // break point locations are updated correctly.
- Address target = Assembler::target_address_from_return_address(pc());
- if (debug->IsDebugBreak(
- Assembler::target_address_at(target, raw_constant_pool()))) {
- // If the call site is a call to debug break then we want to return the
- // constant pool for the original code instead of the breakpointed code.
- return GetOriginalCode()->constant_pool();
- }
- return *result;
- }
-}
-
-
-ConstantPoolArray* IC::raw_constant_pool() const {
- if (FLAG_enable_ool_constant_pool) {
- return *raw_constant_pool_;
- } else {
- return NULL;
- }
-}
-
-
-Code* IC::GetTargetAtAddress(Address address,
- ConstantPoolArray* constant_pool) {
- // Get the target address of the IC.
- Address target = Assembler::target_address_at(address, constant_pool);
- // Convert target address to the code object. Code::GetCodeFromTargetAddress
- // is safe for use during GC where the map might be marked.
- Code* result = Code::GetCodeFromTargetAddress(target);
- DCHECK(result->is_inline_cache_stub());
- return result;
-}
-
-
-void IC::SetTargetAtAddress(Address address,
- Code* target,
- ConstantPoolArray* constant_pool) {
- DCHECK(target->is_inline_cache_stub() || target->is_compare_ic_stub());
- Heap* heap = target->GetHeap();
- Code* old_target = GetTargetAtAddress(address, constant_pool);
-#ifdef DEBUG
- // STORE_IC and KEYED_STORE_IC use Code::extra_ic_state() to mark
- // ICs as strict mode. The strict-ness of the IC must be preserved.
- if (old_target->kind() == Code::STORE_IC ||
- old_target->kind() == Code::KEYED_STORE_IC) {
- DCHECK(StoreIC::GetStrictMode(old_target->extra_ic_state()) ==
- StoreIC::GetStrictMode(target->extra_ic_state()));
- }
-#endif
- Assembler::set_target_address_at(
- address, constant_pool, target->instruction_start());
- if (heap->gc_state() == Heap::MARK_COMPACT) {
- heap->mark_compact_collector()->RecordCodeTargetPatch(address, target);
- } else {
- heap->incremental_marking()->RecordCodeTargetPatch(address, target);
- }
- PostPatching(address, target, old_target);
-}
-
-
-template <class TypeClass>
-JSFunction* IC::GetRootConstructor(TypeClass* type, Context* native_context) {
- if (type->Is(TypeClass::Boolean())) {
- return native_context->boolean_function();
- } else if (type->Is(TypeClass::Number())) {
- return native_context->number_function();
- } else if (type->Is(TypeClass::String())) {
- return native_context->string_function();
- } else if (type->Is(TypeClass::Symbol())) {
- return native_context->symbol_function();
- } else {
- return NULL;
- }
-}
-
-
-Handle<Map> IC::GetHandlerCacheHolder(HeapType* type, bool receiver_is_holder,
- Isolate* isolate, CacheHolderFlag* flag) {
- Handle<Map> receiver_map = TypeToMap(type, isolate);
- if (receiver_is_holder) {
- *flag = kCacheOnReceiver;
- return receiver_map;
- }
- Context* native_context = *isolate->native_context();
- JSFunction* builtin_ctor = GetRootConstructor(type, native_context);
- if (builtin_ctor != NULL) {
- *flag = kCacheOnPrototypeReceiverIsPrimitive;
- return handle(HeapObject::cast(builtin_ctor->instance_prototype())->map());
- }
- *flag = receiver_map->is_dictionary_map()
- ? kCacheOnPrototypeReceiverIsDictionary
- : kCacheOnPrototype;
- // Callers must ensure that the prototype is non-null.
- return handle(JSObject::cast(receiver_map->prototype())->map());
-}
-
-
-Handle<Map> IC::GetICCacheHolder(HeapType* type, Isolate* isolate,
- CacheHolderFlag* flag) {
- Context* native_context = *isolate->native_context();
- JSFunction* builtin_ctor = GetRootConstructor(type, native_context);
- if (builtin_ctor != NULL) {
- *flag = kCacheOnPrototype;
- return handle(builtin_ctor->initial_map());
- }
- *flag = kCacheOnReceiver;
- return TypeToMap(type, isolate);
-}
-
-
-IC::State CallIC::FeedbackToState(Handle<FixedArray> vector,
- Handle<Smi> slot) const {
- IC::State state = UNINITIALIZED;
- Object* feedback = vector->get(slot->value());
-
- if (feedback == *TypeFeedbackInfo::MegamorphicSentinel(isolate())) {
- state = GENERIC;
- } else if (feedback->IsAllocationSite() || feedback->IsJSFunction()) {
- state = MONOMORPHIC;
- } else {
- CHECK(feedback == *TypeFeedbackInfo::UninitializedSentinel(isolate()));
- }
-
- return state;
-}
-} } // namespace v8::internal
-
-#endif // V8_IC_INL_H_
+++ /dev/null
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#include "src/accessors.h"
-#include "src/api.h"
-#include "src/arguments.h"
-#include "src/codegen.h"
-#include "src/conversions.h"
-#include "src/execution.h"
-#include "src/ic-inl.h"
-#include "src/prototype.h"
-#include "src/runtime.h"
-#include "src/stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-char IC::TransitionMarkFromState(IC::State state) {
- switch (state) {
- case UNINITIALIZED: return '0';
- case PREMONOMORPHIC: return '.';
- case MONOMORPHIC: return '1';
- case PROTOTYPE_FAILURE:
- return '^';
- case POLYMORPHIC: return 'P';
- case MEGAMORPHIC: return 'N';
- case GENERIC: return 'G';
-
- // We never see the debugger states here, because the state is
- // computed from the original code - not the patched code. Let
- // these cases fall through to the unreachable code below.
- case DEBUG_STUB: break;
- // Type-vector-based ICs resolve state to one of the above.
- case DEFAULT:
- break;
- }
- UNREACHABLE();
- return 0;
-}
-
-
-const char* GetTransitionMarkModifier(KeyedAccessStoreMode mode) {
- if (mode == STORE_NO_TRANSITION_HANDLE_COW) return ".COW";
- if (mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
- return ".IGNORE_OOB";
- }
- if (IsGrowStoreMode(mode)) return ".GROW";
- return "";
-}
-
-
-#ifdef DEBUG
-
-#define TRACE_GENERIC_IC(isolate, type, reason) \
- do { \
- if (FLAG_trace_ic) { \
- PrintF("[%s patching generic stub in ", type); \
- JavaScriptFrame::PrintTop(isolate, stdout, false, true); \
- PrintF(" (%s)]\n", reason); \
- } \
- } while (false)
-
-#else
-
-#define TRACE_GENERIC_IC(isolate, type, reason)
-
-#endif // DEBUG
-
-
-void IC::TraceIC(const char* type, Handle<Object> name) {
- if (FLAG_trace_ic) {
- Code* new_target = raw_target();
- State new_state = new_target->ic_state();
- TraceIC(type, name, state(), new_state);
- }
-}
-
-
-void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
- State new_state) {
- if (FLAG_trace_ic) {
- Code* new_target = raw_target();
- PrintF("[%s%s in ", new_target->is_keyed_stub() ? "Keyed" : "", type);
-
- // TODO(jkummerow): Add support for "apply". The logic is roughly:
- // marker = [fp_ + kMarkerOffset];
- // if marker is smi and marker.value == INTERNAL and
- // the frame's code == builtin(Builtins::kFunctionApply):
- // then print "apply from" and advance one frame
-
- Object* maybe_function =
- Memory::Object_at(fp_ + JavaScriptFrameConstants::kFunctionOffset);
- if (maybe_function->IsJSFunction()) {
- JSFunction* function = JSFunction::cast(maybe_function);
- JavaScriptFrame::PrintFunctionAndOffset(function, function->code(), pc(),
- stdout, true);
- }
-
- ExtraICState extra_state = new_target->extra_ic_state();
- const char* modifier = "";
- if (new_target->kind() == Code::KEYED_STORE_IC) {
- modifier = GetTransitionMarkModifier(
- KeyedStoreIC::GetKeyedAccessStoreMode(extra_state));
- }
- PrintF(" (%c->%c%s)", TransitionMarkFromState(old_state),
- TransitionMarkFromState(new_state), modifier);
-#ifdef OBJECT_PRINT
- OFStream os(stdout);
- name->Print(os);
-#else
- name->ShortPrint(stdout);
-#endif
- PrintF("]\n");
- }
-}
-
-#define TRACE_IC(type, name) TraceIC(type, name)
-#define TRACE_VECTOR_IC(type, name, old_state, new_state) \
- TraceIC(type, name, old_state, new_state)
-
-IC::IC(FrameDepth depth, Isolate* isolate)
- : isolate_(isolate),
- target_set_(false),
- target_maps_set_(false) {
- // To improve the performance of the (much used) IC code, we unfold a few
- // levels of the stack frame iteration code. This yields a ~35% speedup when
- // running DeltaBlue and a ~25% speedup of gbemu with the '--nouse-ic' flag.
- const Address entry =
- Isolate::c_entry_fp(isolate->thread_local_top());
- Address constant_pool = NULL;
- if (FLAG_enable_ool_constant_pool) {
- constant_pool = Memory::Address_at(
- entry + ExitFrameConstants::kConstantPoolOffset);
- }
- Address* pc_address =
- reinterpret_cast<Address*>(entry + ExitFrameConstants::kCallerPCOffset);
- Address fp = Memory::Address_at(entry + ExitFrameConstants::kCallerFPOffset);
- // If there's another JavaScript frame on the stack or a
- // StubFailureTrampoline, we need to look one frame further down the stack to
- // find the frame pointer and the return address stack slot.
- if (depth == EXTRA_CALL_FRAME) {
- if (FLAG_enable_ool_constant_pool) {
- constant_pool = Memory::Address_at(
- fp + StandardFrameConstants::kConstantPoolOffset);
- }
- const int kCallerPCOffset = StandardFrameConstants::kCallerPCOffset;
- pc_address = reinterpret_cast<Address*>(fp + kCallerPCOffset);
- fp = Memory::Address_at(fp + StandardFrameConstants::kCallerFPOffset);
- }
-#ifdef DEBUG
- StackFrameIterator it(isolate);
- for (int i = 0; i < depth + 1; i++) it.Advance();
- StackFrame* frame = it.frame();
- DCHECK(fp == frame->fp() && pc_address == frame->pc_address());
-#endif
- fp_ = fp;
- if (FLAG_enable_ool_constant_pool) {
- raw_constant_pool_ = handle(
- ConstantPoolArray::cast(reinterpret_cast<Object*>(constant_pool)),
- isolate);
- }
- pc_address_ = StackFrame::ResolveReturnAddressLocation(pc_address);
- target_ = handle(raw_target(), isolate);
- state_ = target_->ic_state();
- kind_ = target_->kind();
- extra_ic_state_ = target_->extra_ic_state();
-}
-
-
-SharedFunctionInfo* IC::GetSharedFunctionInfo() const {
- // Compute the JavaScript frame for the frame pointer of this IC
- // structure. We need this to be able to find the function
- // corresponding to the frame.
- StackFrameIterator it(isolate());
- while (it.frame()->fp() != this->fp()) it.Advance();
- JavaScriptFrame* frame = JavaScriptFrame::cast(it.frame());
- // Find the function on the stack and both the active code for the
- // function and the original code.
- JSFunction* function = frame->function();
- return function->shared();
-}
-
-
-Code* IC::GetCode() const {
- HandleScope scope(isolate());
- Handle<SharedFunctionInfo> shared(GetSharedFunctionInfo(), isolate());
- Code* code = shared->code();
- return code;
-}
-
-
-Code* IC::GetOriginalCode() const {
- HandleScope scope(isolate());
- Handle<SharedFunctionInfo> shared(GetSharedFunctionInfo(), isolate());
- DCHECK(Debug::HasDebugInfo(shared));
- Code* original_code = Debug::GetDebugInfo(shared)->original_code();
- DCHECK(original_code->IsCode());
- return original_code;
-}
-
-
-static void LookupForRead(LookupIterator* it) {
- for (; it->IsFound(); it->Next()) {
- switch (it->state()) {
- case LookupIterator::NOT_FOUND:
- case LookupIterator::TRANSITION:
- UNREACHABLE();
- case LookupIterator::JSPROXY:
- return;
- case LookupIterator::INTERCEPTOR: {
- // If there is a getter, return; otherwise loop to perform the lookup.
- Handle<JSObject> holder = it->GetHolder<JSObject>();
- if (!holder->GetNamedInterceptor()->getter()->IsUndefined()) {
- return;
- }
- break;
- }
- case LookupIterator::ACCESS_CHECK:
- // PropertyHandlerCompiler::CheckPrototypes() knows how to emit
- // access checks for global proxies.
- if (it->GetHolder<JSObject>()->IsJSGlobalProxy() &&
- it->HasAccess(v8::ACCESS_GET)) {
- break;
- }
- return;
- case LookupIterator::PROPERTY:
- if (it->HasProperty()) return; // Yay!
- break;
- }
- }
-}
-
-
-bool IC::TryRemoveInvalidPrototypeDependentStub(Handle<Object> receiver,
- Handle<String> name) {
- if (!IsNameCompatibleWithPrototypeFailure(name)) return false;
- Handle<Map> receiver_map = TypeToMap(*receiver_type(), isolate());
- maybe_handler_ = target()->FindHandlerForMap(*receiver_map);
-
- // The current map wasn't handled yet. There's no reason to stay monomorphic,
- // *unless* we're moving from a deprecated map to its replacement, or
- // to a more general elements kind.
- // TODO(verwaest): Check if the current map is actually what the old map
- // would transition to.
- if (maybe_handler_.is_null()) {
- if (!receiver_map->IsJSObjectMap()) return false;
- Map* first_map = FirstTargetMap();
- if (first_map == NULL) return false;
- Handle<Map> old_map(first_map);
- if (old_map->is_deprecated()) return true;
- if (IsMoreGeneralElementsKindTransition(old_map->elements_kind(),
- receiver_map->elements_kind())) {
- return true;
- }
- return false;
- }
-
- CacheHolderFlag flag;
- Handle<Map> ic_holder_map(
- GetICCacheHolder(*receiver_type(), isolate(), &flag));
-
- DCHECK(flag != kCacheOnReceiver || receiver->IsJSObject());
- DCHECK(flag != kCacheOnPrototype || !receiver->IsJSReceiver());
- DCHECK(flag != kCacheOnPrototypeReceiverIsDictionary);
-
- if (state() == MONOMORPHIC) {
- int index = ic_holder_map->IndexInCodeCache(*name, *target());
- if (index >= 0) {
- ic_holder_map->RemoveFromCodeCache(*name, *target(), index);
- }
- }
-
- if (receiver->IsGlobalObject()) {
- Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver);
- LookupIterator it(global, name, LookupIterator::CHECK_PROPERTY);
- if (!it.IsFound() || !it.HasProperty()) return false;
- Handle<PropertyCell> cell = it.GetPropertyCell();
- return cell->type()->IsConstant();
- }
-
- return true;
-}
-
-
-bool IC::IsNameCompatibleWithPrototypeFailure(Handle<Object> name) {
- if (target()->is_keyed_stub()) {
- // Determine whether the failure is due to a name failure.
- if (!name->IsName()) return false;
- Name* stub_name = target()->FindFirstName();
- if (*name != stub_name) return false;
- }
-
- return true;
-}
-
-
-void IC::UpdateState(Handle<Object> receiver, Handle<Object> name) {
- update_receiver_type(receiver);
- if (!name->IsString()) return;
- if (state() != MONOMORPHIC && state() != POLYMORPHIC) return;
- if (receiver->IsUndefined() || receiver->IsNull()) return;
-
- // Remove the target from the code cache if it became invalid
- // because of changes in the prototype chain to avoid hitting it
- // again.
- if (TryRemoveInvalidPrototypeDependentStub(receiver,
- Handle<String>::cast(name))) {
- MarkPrototypeFailure(name);
- return;
- }
-
- // The builtins object is special. It only changes when JavaScript
- // builtins are loaded lazily. It is important to keep inline
- // caches for the builtins object monomorphic. Therefore, if we get
- // an inline cache miss for the builtins object after lazily loading
- // JavaScript builtins, we return uninitialized as the state to
- // force the inline cache back to monomorphic state.
- if (receiver->IsJSBuiltinsObject()) state_ = UNINITIALIZED;
-}
-
-
-MaybeHandle<Object> IC::TypeError(const char* type,
- Handle<Object> object,
- Handle<Object> key) {
- HandleScope scope(isolate());
- Handle<Object> args[2] = { key, object };
- Handle<Object> error = isolate()->factory()->NewTypeError(
- type, HandleVector(args, 2));
- return isolate()->Throw<Object>(error);
-}
-
-
-MaybeHandle<Object> IC::ReferenceError(const char* type, Handle<Name> name) {
- HandleScope scope(isolate());
- Handle<Object> error = isolate()->factory()->NewReferenceError(
- type, HandleVector(&name, 1));
- return isolate()->Throw<Object>(error);
-}
-
-
-static void ComputeTypeInfoCountDelta(IC::State old_state, IC::State new_state,
- int* polymorphic_delta,
- int* generic_delta) {
- switch (old_state) {
- case UNINITIALIZED:
- case PREMONOMORPHIC:
- if (new_state == UNINITIALIZED || new_state == PREMONOMORPHIC) break;
- if (new_state == MONOMORPHIC || new_state == POLYMORPHIC) {
- *polymorphic_delta = 1;
- } else if (new_state == MEGAMORPHIC || new_state == GENERIC) {
- *generic_delta = 1;
- }
- break;
- case MONOMORPHIC:
- case POLYMORPHIC:
- if (new_state == MONOMORPHIC || new_state == POLYMORPHIC) break;
- *polymorphic_delta = -1;
- if (new_state == MEGAMORPHIC || new_state == GENERIC) {
- *generic_delta = 1;
- }
- break;
- case MEGAMORPHIC:
- case GENERIC:
- if (new_state == MEGAMORPHIC || new_state == GENERIC) break;
- *generic_delta = -1;
- if (new_state == MONOMORPHIC || new_state == POLYMORPHIC) {
- *polymorphic_delta = 1;
- }
- break;
- case PROTOTYPE_FAILURE:
- case DEBUG_STUB:
- case DEFAULT:
- UNREACHABLE();
- }
-}
-
-
-void IC::OnTypeFeedbackChanged(Isolate* isolate, Address address,
- State old_state, State new_state,
- bool target_remains_ic_stub) {
- Code* host = isolate->
- inner_pointer_to_code_cache()->GetCacheEntry(address)->code;
- if (host->kind() != Code::FUNCTION) return;
-
- if (FLAG_type_info_threshold > 0 && target_remains_ic_stub &&
- // Not all Code objects have TypeFeedbackInfo.
- host->type_feedback_info()->IsTypeFeedbackInfo()) {
- int polymorphic_delta = 0; // "Polymorphic" here includes monomorphic.
- int generic_delta = 0; // "Generic" here includes megamorphic.
- ComputeTypeInfoCountDelta(old_state, new_state, &polymorphic_delta,
- &generic_delta);
- TypeFeedbackInfo* info = TypeFeedbackInfo::cast(host->type_feedback_info());
- info->change_ic_with_type_info_count(polymorphic_delta);
- info->change_ic_generic_count(generic_delta);
- }
- if (host->type_feedback_info()->IsTypeFeedbackInfo()) {
- TypeFeedbackInfo* info =
- TypeFeedbackInfo::cast(host->type_feedback_info());
- info->change_own_type_change_checksum();
- }
- host->set_profiler_ticks(0);
- isolate->runtime_profiler()->NotifyICChanged();
- // TODO(2029): When an optimized function is patched, it would
- // be nice to propagate the corresponding type information to its
- // unoptimized version for the benefit of later inlining.
-}
-
-
-void IC::PostPatching(Address address, Code* target, Code* old_target) {
- // Type vector based ICs update these statistics at a different time because
- // they don't always patch on state change.
- if (target->kind() == Code::CALL_IC) return;
-
- Isolate* isolate = target->GetHeap()->isolate();
- State old_state = UNINITIALIZED;
- State new_state = UNINITIALIZED;
- bool target_remains_ic_stub = false;
- if (old_target->is_inline_cache_stub() && target->is_inline_cache_stub()) {
- old_state = old_target->ic_state();
- new_state = target->ic_state();
- target_remains_ic_stub = true;
- }
-
- OnTypeFeedbackChanged(isolate, address, old_state, new_state,
- target_remains_ic_stub);
-}
-
-
-void IC::RegisterWeakMapDependency(Handle<Code> stub) {
- if (FLAG_collect_maps && FLAG_weak_embedded_maps_in_ic &&
- stub->CanBeWeakStub()) {
- DCHECK(!stub->is_weak_stub());
- MapHandleList maps;
- stub->FindAllMaps(&maps);
- if (maps.length() == 1 && stub->IsWeakObjectInIC(*maps.at(0))) {
- Map::AddDependentIC(maps.at(0), stub);
- stub->mark_as_weak_stub();
- if (FLAG_enable_ool_constant_pool) {
- stub->constant_pool()->set_weak_object_state(
- ConstantPoolArray::WEAK_OBJECTS_IN_IC);
- }
- }
- }
-}
-
-
-void IC::InvalidateMaps(Code* stub) {
- DCHECK(stub->is_weak_stub());
- stub->mark_as_invalidated_weak_stub();
- Isolate* isolate = stub->GetIsolate();
- Heap* heap = isolate->heap();
- Object* undefined = heap->undefined_value();
- int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
- for (RelocIterator it(stub, mode_mask); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT &&
- it.rinfo()->target_object()->IsMap()) {
- it.rinfo()->set_target_object(undefined, SKIP_WRITE_BARRIER);
- }
- }
- CpuFeatures::FlushICache(stub->instruction_start(), stub->instruction_size());
-}
-
-
-void IC::Clear(Isolate* isolate, Address address,
- ConstantPoolArray* constant_pool) {
- Code* target = GetTargetAtAddress(address, constant_pool);
-
- // Don't clear debug break inline cache as it will remove the break point.
- if (target->is_debug_stub()) return;
-
- switch (target->kind()) {
- case Code::LOAD_IC:
- return LoadIC::Clear(isolate, address, target, constant_pool);
- case Code::KEYED_LOAD_IC:
- return KeyedLoadIC::Clear(isolate, address, target, constant_pool);
- case Code::STORE_IC:
- return StoreIC::Clear(isolate, address, target, constant_pool);
- case Code::KEYED_STORE_IC:
- return KeyedStoreIC::Clear(isolate, address, target, constant_pool);
- case Code::CALL_IC:
- return CallIC::Clear(isolate, address, target, constant_pool);
- case Code::COMPARE_IC:
- return CompareIC::Clear(isolate, address, target, constant_pool);
- case Code::COMPARE_NIL_IC:
- return CompareNilIC::Clear(address, target, constant_pool);
- case Code::BINARY_OP_IC:
- case Code::TO_BOOLEAN_IC:
- // Clearing these is tricky and does not
- // make any performance difference.
- return;
- default: UNREACHABLE();
- }
-}
-
-
-void KeyedLoadIC::Clear(Isolate* isolate,
- Address address,
- Code* target,
- ConstantPoolArray* constant_pool) {
- if (IsCleared(target)) return;
- // Make sure to also clear the map used in inline fast cases. If we
- // do not clear these maps, cached code can keep objects alive
- // through the embedded maps.
- SetTargetAtAddress(address, *pre_monomorphic_stub(isolate), constant_pool);
-}
-
-
-void CallIC::Clear(Isolate* isolate,
- Address address,
- Code* target,
- ConstantPoolArray* constant_pool) {
- // Currently, CallIC doesn't have state changes.
-}
-
-
-void LoadIC::Clear(Isolate* isolate,
- Address address,
- Code* target,
- ConstantPoolArray* constant_pool) {
- if (IsCleared(target)) return;
- Code* code = PropertyICCompiler::FindPreMonomorphic(isolate, Code::LOAD_IC,
- target->extra_ic_state());
- SetTargetAtAddress(address, code, constant_pool);
-}
-
-
-void StoreIC::Clear(Isolate* isolate,
- Address address,
- Code* target,
- ConstantPoolArray* constant_pool) {
- if (IsCleared(target)) return;
- Code* code = PropertyICCompiler::FindPreMonomorphic(isolate, Code::STORE_IC,
- target->extra_ic_state());
- SetTargetAtAddress(address, code, constant_pool);
-}
-
-
-void KeyedStoreIC::Clear(Isolate* isolate,
- Address address,
- Code* target,
- ConstantPoolArray* constant_pool) {
- if (IsCleared(target)) return;
- SetTargetAtAddress(address,
- *pre_monomorphic_stub(
- isolate, StoreIC::GetStrictMode(target->extra_ic_state())),
- constant_pool);
-}
-
-
-void CompareIC::Clear(Isolate* isolate,
- Address address,
- Code* target,
- ConstantPoolArray* constant_pool) {
- DCHECK(CodeStub::GetMajorKey(target) == CodeStub::CompareIC);
- CompareIC::State handler_state;
- Token::Value op;
- ICCompareStub::DecodeKey(target->stub_key(), NULL, NULL, &handler_state, &op);
- // Only clear CompareICs that can retain objects.
- if (handler_state != KNOWN_OBJECT) return;
- SetTargetAtAddress(address, GetRawUninitialized(isolate, op), constant_pool);
- PatchInlinedSmiCode(address, DISABLE_INLINED_SMI_CHECK);
-}
-
-
-// static
-Handle<Code> KeyedLoadIC::generic_stub(Isolate* isolate) {
- if (FLAG_compiled_keyed_generic_loads) {
- return KeyedLoadGenericStub(isolate).GetCode();
- } else {
- return isolate->builtins()->KeyedLoadIC_Generic();
- }
-}
-
-
-static bool MigrateDeprecated(Handle<Object> object) {
- if (!object->IsJSObject()) return false;
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- if (!receiver->map()->is_deprecated()) return false;
- JSObject::MigrateInstance(Handle<JSObject>::cast(object));
- return true;
-}
-
-
-MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
- // If the object is undefined or null it's illegal to try to get any
- // of its properties; throw a TypeError in that case.
- if (object->IsUndefined() || object->IsNull()) {
- return TypeError("non_object_property_load", object, name);
- }
-
- // Check if the name is trivially convertible to an index and get
- // the element or char if so.
- uint32_t index;
- if (kind() == Code::KEYED_LOAD_IC && name->AsArrayIndex(&index)) {
- // Rewrite to the generic keyed load stub.
- if (FLAG_use_ic) {
- set_target(*KeyedLoadIC::generic_stub(isolate()));
- TRACE_IC("LoadIC", name);
- TRACE_GENERIC_IC(isolate(), "LoadIC", "name as array index");
- }
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(),
- result,
- Runtime::GetElementOrCharAt(isolate(), object, index),
- Object);
- return result;
- }
-
- bool use_ic = MigrateDeprecated(object) ? false : FLAG_use_ic;
-
- // Named lookup in the object.
- LookupIterator it(object, name);
- LookupForRead(&it);
-
- if (it.IsFound() || !IsUndeclaredGlobal(object)) {
- // Update inline cache and stub cache.
- if (use_ic) UpdateCaches(&it);
-
- // Get the property.
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(isolate(), result, Object::GetProperty(&it),
- Object);
- if (it.IsFound()) {
- return result;
- } else if (!IsUndeclaredGlobal(object)) {
- LOG(isolate(), SuspectReadEvent(*name, *object));
- return result;
- }
- }
- return ReferenceError("not_defined", name);
-}
-
-
-static bool AddOneReceiverMapIfMissing(MapHandleList* receiver_maps,
- Handle<Map> new_receiver_map) {
- DCHECK(!new_receiver_map.is_null());
- for (int current = 0; current < receiver_maps->length(); ++current) {
- if (!receiver_maps->at(current).is_null() &&
- receiver_maps->at(current).is_identical_to(new_receiver_map)) {
- return false;
- }
- }
- receiver_maps->Add(new_receiver_map);
- return true;
-}
-
-
-bool IC::UpdatePolymorphicIC(Handle<Name> name, Handle<Code> code) {
- if (!code->is_handler()) return false;
- if (target()->is_keyed_stub() && state() != PROTOTYPE_FAILURE) return false;
- Handle<HeapType> type = receiver_type();
- TypeHandleList types;
- CodeHandleList handlers;
-
- TargetTypes(&types);
- int number_of_types = types.length();
- int deprecated_types = 0;
- int handler_to_overwrite = -1;
-
- for (int i = 0; i < number_of_types; i++) {
- Handle<HeapType> current_type = types.at(i);
- if (current_type->IsClass() &&
- current_type->AsClass()->Map()->is_deprecated()) {
- // Filter out deprecated maps to ensure their instances get migrated.
- ++deprecated_types;
- } else if (type->NowIs(current_type)) {
- // If the receiver type is already in the polymorphic IC, this indicates
- // there was a prototoype chain failure. In that case, just overwrite the
- // handler.
- handler_to_overwrite = i;
- } else if (handler_to_overwrite == -1 &&
- current_type->IsClass() &&
- type->IsClass() &&
- IsTransitionOfMonomorphicTarget(*current_type->AsClass()->Map(),
- *type->AsClass()->Map())) {
- handler_to_overwrite = i;
- }
- }
-
- int number_of_valid_types =
- number_of_types - deprecated_types - (handler_to_overwrite != -1);
-
- if (number_of_valid_types >= 4) return false;
- if (number_of_types == 0) return false;
- if (!target()->FindHandlers(&handlers, types.length())) return false;
-
- number_of_valid_types++;
- if (number_of_valid_types > 1 && target()->is_keyed_stub()) return false;
- Handle<Code> ic;
- if (number_of_valid_types == 1) {
- ic = PropertyICCompiler::ComputeMonomorphic(kind(), name, type, code,
- extra_ic_state());
- } else {
- if (handler_to_overwrite >= 0) {
- handlers.Set(handler_to_overwrite, code);
- if (!type->NowIs(types.at(handler_to_overwrite))) {
- types.Set(handler_to_overwrite, type);
- }
- } else {
- types.Add(type);
- handlers.Add(code);
- }
- ic = PropertyICCompiler::ComputePolymorphic(kind(), &types, &handlers,
- number_of_valid_types, name,
- extra_ic_state());
- }
- set_target(*ic);
- return true;
-}
-
-
-Handle<HeapType> IC::CurrentTypeOf(Handle<Object> object, Isolate* isolate) {
- return object->IsJSGlobalObject()
- ? HeapType::Constant(Handle<JSGlobalObject>::cast(object), isolate)
- : HeapType::NowOf(object, isolate);
-}
-
-
-Handle<Map> IC::TypeToMap(HeapType* type, Isolate* isolate) {
- if (type->Is(HeapType::Number()))
- return isolate->factory()->heap_number_map();
- if (type->Is(HeapType::Boolean())) return isolate->factory()->boolean_map();
- if (type->IsConstant()) {
- return handle(
- Handle<JSGlobalObject>::cast(type->AsConstant()->Value())->map());
- }
- DCHECK(type->IsClass());
- return type->AsClass()->Map();
-}
-
-
-template <class T>
-typename T::TypeHandle IC::MapToType(Handle<Map> map,
- typename T::Region* region) {
- if (map->instance_type() == HEAP_NUMBER_TYPE) {
- return T::Number(region);
- } else if (map->instance_type() == ODDBALL_TYPE) {
- // The only oddballs that can be recorded in ICs are booleans.
- return T::Boolean(region);
- } else {
- return T::Class(map, region);
- }
-}
-
-
-template
-Type* IC::MapToType<Type>(Handle<Map> map, Zone* zone);
-
-
-template
-Handle<HeapType> IC::MapToType<HeapType>(Handle<Map> map, Isolate* region);
-
-
-void IC::UpdateMonomorphicIC(Handle<Code> handler, Handle<Name> name) {
- DCHECK(handler->is_handler());
- Handle<Code> ic = PropertyICCompiler::ComputeMonomorphic(
- kind(), name, receiver_type(), handler, extra_ic_state());
- set_target(*ic);
-}
-
-
-void IC::CopyICToMegamorphicCache(Handle<Name> name) {
- TypeHandleList types;
- CodeHandleList handlers;
- TargetTypes(&types);
- if (!target()->FindHandlers(&handlers, types.length())) return;
- for (int i = 0; i < types.length(); i++) {
- UpdateMegamorphicCache(*types.at(i), *name, *handlers.at(i));
- }
-}
-
-
-bool IC::IsTransitionOfMonomorphicTarget(Map* source_map, Map* target_map) {
- if (source_map == NULL) return true;
- if (target_map == NULL) return false;
- ElementsKind target_elements_kind = target_map->elements_kind();
- bool more_general_transition =
- IsMoreGeneralElementsKindTransition(
- source_map->elements_kind(), target_elements_kind);
- Map* transitioned_map = more_general_transition
- ? source_map->LookupElementsTransitionMap(target_elements_kind)
- : NULL;
-
- return transitioned_map == target_map;
-}
-
-
-void IC::PatchCache(Handle<Name> name, Handle<Code> code) {
- switch (state()) {
- case UNINITIALIZED:
- case PREMONOMORPHIC:
- UpdateMonomorphicIC(code, name);
- break;
- case PROTOTYPE_FAILURE:
- case MONOMORPHIC:
- case POLYMORPHIC:
- if (!target()->is_keyed_stub() || state() == PROTOTYPE_FAILURE) {
- if (UpdatePolymorphicIC(name, code)) break;
- CopyICToMegamorphicCache(name);
- }
- set_target(*megamorphic_stub());
- // Fall through.
- case MEGAMORPHIC:
- UpdateMegamorphicCache(*receiver_type(), *name, *code);
- break;
- case DEBUG_STUB:
- break;
- case DEFAULT:
- case GENERIC:
- UNREACHABLE();
- break;
- }
-}
-
-
-Handle<Code> LoadIC::initialize_stub(Isolate* isolate,
- ExtraICState extra_state) {
- return PropertyICCompiler::ComputeLoad(isolate, UNINITIALIZED, extra_state);
-}
-
-
-Handle<Code> LoadIC::megamorphic_stub() {
- if (kind() == Code::LOAD_IC) {
- return PropertyICCompiler::ComputeLoad(isolate(), MEGAMORPHIC,
- extra_ic_state());
- } else {
- DCHECK_EQ(Code::KEYED_LOAD_IC, kind());
- return KeyedLoadIC::generic_stub(isolate());
- }
-}
-
-
-Handle<Code> LoadIC::pre_monomorphic_stub(Isolate* isolate,
- ExtraICState extra_state) {
- return PropertyICCompiler::ComputeLoad(isolate, PREMONOMORPHIC, extra_state);
-}
-
-
-Handle<Code> KeyedLoadIC::pre_monomorphic_stub(Isolate* isolate) {
- return isolate->builtins()->KeyedLoadIC_PreMonomorphic();
-}
-
-
-Handle<Code> LoadIC::pre_monomorphic_stub() const {
- if (kind() == Code::LOAD_IC) {
- return LoadIC::pre_monomorphic_stub(isolate(), extra_ic_state());
- } else {
- DCHECK_EQ(Code::KEYED_LOAD_IC, kind());
- return KeyedLoadIC::pre_monomorphic_stub(isolate());
- }
-}
-
-
-Handle<Code> LoadIC::SimpleFieldLoad(FieldIndex index) {
- LoadFieldStub stub(isolate(), index);
- return stub.GetCode();
-}
-
-
-void LoadIC::UpdateCaches(LookupIterator* lookup) {
- if (state() == UNINITIALIZED) {
- // This is the first time we execute this inline cache. Set the target to
- // the pre monomorphic stub to delay setting the monomorphic state.
- set_target(*pre_monomorphic_stub());
- TRACE_IC("LoadIC", lookup->name());
- return;
- }
-
- Handle<Code> code;
- if (lookup->state() == LookupIterator::JSPROXY ||
- lookup->state() == LookupIterator::ACCESS_CHECK) {
- code = slow_stub();
- } else if (!lookup->IsFound()) {
- if (kind() == Code::LOAD_IC) {
- code = NamedLoadHandlerCompiler::ComputeLoadNonexistent(lookup->name(),
- receiver_type());
- // TODO(jkummerow/verwaest): Introduce a builtin that handles this case.
- if (code.is_null()) code = slow_stub();
- } else {
- code = slow_stub();
- }
- } else {
- code = ComputeHandler(lookup);
- }
-
- PatchCache(lookup->name(), code);
- TRACE_IC("LoadIC", lookup->name());
-}
-
-
-void IC::UpdateMegamorphicCache(HeapType* type, Name* name, Code* code) {
- if (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC) return;
- Map* map = *TypeToMap(type, isolate());
- isolate()->stub_cache()->Set(name, map, code);
-}
-
-
-Handle<Code> IC::ComputeHandler(LookupIterator* lookup, Handle<Object> value) {
- bool receiver_is_holder =
- lookup->GetReceiver().is_identical_to(lookup->GetHolder<JSObject>());
- CacheHolderFlag flag;
- Handle<Map> stub_holder_map = IC::GetHandlerCacheHolder(
- *receiver_type(), receiver_is_holder, isolate(), &flag);
-
- Handle<Code> code = PropertyHandlerCompiler::Find(
- lookup->name(), stub_holder_map, kind(), flag,
- lookup->holder_map()->is_dictionary_map() ? Code::NORMAL : Code::FAST);
- // Use the cached value if it exists, and if it is different from the
- // handler that just missed.
- if (!code.is_null()) {
- if (!maybe_handler_.is_null() &&
- !maybe_handler_.ToHandleChecked().is_identical_to(code)) {
- return code;
- }
- if (maybe_handler_.is_null()) {
- // maybe_handler_ is only populated for MONOMORPHIC and POLYMORPHIC ICs.
- // In MEGAMORPHIC case, check if the handler in the megamorphic stub
- // cache (which just missed) is different from the cached handler.
- if (state() == MEGAMORPHIC && lookup->GetReceiver()->IsHeapObject()) {
- Map* map = Handle<HeapObject>::cast(lookup->GetReceiver())->map();
- Code* megamorphic_cached_code =
- isolate()->stub_cache()->Get(*lookup->name(), map, code->flags());
- if (megamorphic_cached_code != *code) return code;
- } else {
- return code;
- }
- }
- }
-
- code = CompileHandler(lookup, value, flag);
- DCHECK(code->is_handler());
-
- if (code->type() != Code::NORMAL) {
- Map::UpdateCodeCache(stub_holder_map, lookup->name(), code);
- }
-
- return code;
-}
-
-
-Handle<Code> LoadIC::CompileHandler(LookupIterator* lookup,
- Handle<Object> unused,
- CacheHolderFlag cache_holder) {
- Handle<Object> receiver = lookup->GetReceiver();
- if (receiver->IsString() &&
- Name::Equals(isolate()->factory()->length_string(), lookup->name())) {
- FieldIndex index = FieldIndex::ForInObjectOffset(String::kLengthOffset);
- return SimpleFieldLoad(index);
- }
-
- if (receiver->IsStringWrapper() &&
- Name::Equals(isolate()->factory()->length_string(), lookup->name())) {
- StringLengthStub string_length_stub(isolate());
- return string_length_stub.GetCode();
- }
-
- // Use specialized code for getting prototype of functions.
- if (receiver->IsJSFunction() &&
- Name::Equals(isolate()->factory()->prototype_string(), lookup->name()) &&
- Handle<JSFunction>::cast(receiver)->should_have_prototype() &&
- !Handle<JSFunction>::cast(receiver)
- ->map()
- ->has_non_instance_prototype()) {
- Handle<Code> stub;
- FunctionPrototypeStub function_prototype_stub(isolate());
- return function_prototype_stub.GetCode();
- }
-
- Handle<HeapType> type = receiver_type();
- Handle<JSObject> holder = lookup->GetHolder<JSObject>();
- bool receiver_is_holder = receiver.is_identical_to(holder);
- // -------------- Interceptors --------------
- if (lookup->state() == LookupIterator::INTERCEPTOR) {
- DCHECK(!holder->GetNamedInterceptor()->getter()->IsUndefined());
- NamedLoadHandlerCompiler compiler(isolate(), receiver_type(), holder,
- cache_holder);
- // Perform a lookup behind the interceptor. Copy the LookupIterator since
- // the original iterator will be used to fetch the value.
- LookupIterator it(lookup);
- it.Next();
- LookupForRead(&it);
- return compiler.CompileLoadInterceptor(&it);
- }
-
- // -------------- Accessors --------------
- DCHECK(lookup->state() == LookupIterator::PROPERTY);
- if (lookup->property_kind() == LookupIterator::ACCESSOR) {
- // Use simple field loads for some well-known callback properties.
- if (receiver_is_holder) {
- DCHECK(receiver->IsJSObject());
- Handle<JSObject> js_receiver = Handle<JSObject>::cast(receiver);
- int object_offset;
- if (Accessors::IsJSObjectFieldAccessor<HeapType>(type, lookup->name(),
- &object_offset)) {
- FieldIndex index =
- FieldIndex::ForInObjectOffset(object_offset, js_receiver->map());
- return SimpleFieldLoad(index);
- }
- }
-
- Handle<Object> accessors = lookup->GetAccessors();
- if (accessors->IsExecutableAccessorInfo()) {
- Handle<ExecutableAccessorInfo> info =
- Handle<ExecutableAccessorInfo>::cast(accessors);
- if (v8::ToCData<Address>(info->getter()) == 0) return slow_stub();
- if (!ExecutableAccessorInfo::IsCompatibleReceiverType(isolate(), info,
- type)) {
- return slow_stub();
- }
- if (!holder->HasFastProperties()) return slow_stub();
- NamedLoadHandlerCompiler compiler(isolate(), receiver_type(), holder,
- cache_holder);
- return compiler.CompileLoadCallback(lookup->name(), info);
- }
- if (accessors->IsAccessorPair()) {
- Handle<Object> getter(Handle<AccessorPair>::cast(accessors)->getter(),
- isolate());
- if (!getter->IsJSFunction()) return slow_stub();
- if (!holder->HasFastProperties()) return slow_stub();
- Handle<JSFunction> function = Handle<JSFunction>::cast(getter);
- if (!receiver->IsJSObject() && !function->IsBuiltin() &&
- function->shared()->strict_mode() == SLOPPY) {
- // Calling sloppy non-builtins with a value as the receiver
- // requires boxing.
- return slow_stub();
- }
- CallOptimization call_optimization(function);
- NamedLoadHandlerCompiler compiler(isolate(), receiver_type(), holder,
- cache_holder);
- if (call_optimization.is_simple_api_call() &&
- call_optimization.IsCompatibleReceiver(receiver, holder)) {
- return compiler.CompileLoadCallback(lookup->name(), call_optimization);
- }
- return compiler.CompileLoadViaGetter(lookup->name(), function);
- }
- // TODO(dcarney): Handle correctly.
- DCHECK(accessors->IsDeclaredAccessorInfo());
- return slow_stub();
- }
-
- // -------------- Dictionary properties --------------
- DCHECK(lookup->property_kind() == LookupIterator::DATA);
- if (lookup->property_encoding() == LookupIterator::DICTIONARY) {
- if (kind() != Code::LOAD_IC) return slow_stub();
- if (holder->IsGlobalObject()) {
- NamedLoadHandlerCompiler compiler(isolate(), receiver_type(), holder,
- cache_holder);
- Handle<PropertyCell> cell = lookup->GetPropertyCell();
- Handle<Code> code = compiler.CompileLoadGlobal(cell, lookup->name(),
- lookup->IsConfigurable());
- // TODO(verwaest): Move caching of these NORMAL stubs outside as well.
- CacheHolderFlag flag;
- Handle<Map> stub_holder_map =
- GetHandlerCacheHolder(*type, receiver_is_holder, isolate(), &flag);
- Map::UpdateCodeCache(stub_holder_map, lookup->name(), code);
- return code;
- }
- // There is only one shared stub for loading normalized
- // properties. It does not traverse the prototype chain, so the
- // property must be found in the object for the stub to be
- // applicable.
- if (!receiver_is_holder) return slow_stub();
- return isolate()->builtins()->LoadIC_Normal();
- }
-
- // -------------- Fields --------------
- DCHECK(lookup->property_encoding() == LookupIterator::DESCRIPTOR);
- if (lookup->property_details().type() == FIELD) {
- FieldIndex field = lookup->GetFieldIndex();
- if (receiver_is_holder) {
- return SimpleFieldLoad(field);
- }
- NamedLoadHandlerCompiler compiler(isolate(), receiver_type(), holder,
- cache_holder);
- return compiler.CompileLoadField(lookup->name(), field);
- }
-
- // -------------- Constant properties --------------
- DCHECK(lookup->property_details().type() == CONSTANT);
- if (receiver_is_holder) {
- LoadConstantStub stub(isolate(), lookup->GetConstantIndex());
- return stub.GetCode();
- }
- NamedLoadHandlerCompiler compiler(isolate(), receiver_type(), holder,
- cache_holder);
- return compiler.CompileLoadConstant(lookup->name(),
- lookup->GetConstantIndex());
-}
-
-
-static Handle<Object> TryConvertKey(Handle<Object> key, Isolate* isolate) {
- // This helper implements a few common fast cases for converting
- // non-smi keys of keyed loads/stores to a smi or a string.
- if (key->IsHeapNumber()) {
- double value = Handle<HeapNumber>::cast(key)->value();
- if (std::isnan(value)) {
- key = isolate->factory()->nan_string();
- } else {
- int int_value = FastD2I(value);
- if (value == int_value && Smi::IsValid(int_value)) {
- key = Handle<Smi>(Smi::FromInt(int_value), isolate);
- }
- }
- } else if (key->IsUndefined()) {
- key = isolate->factory()->undefined_string();
- }
- return key;
-}
-
-
-Handle<Code> KeyedLoadIC::LoadElementStub(Handle<JSObject> receiver) {
- // Don't handle megamorphic property accesses for INTERCEPTORS or CALLBACKS
- // via megamorphic stubs, since they don't have a map in their relocation info
- // and so the stubs can't be harvested for the object needed for a map check.
- if (target()->type() != Code::NORMAL) {
- TRACE_GENERIC_IC(isolate(), "KeyedIC", "non-NORMAL target type");
- return generic_stub();
- }
-
- Handle<Map> receiver_map(receiver->map(), isolate());
- MapHandleList target_receiver_maps;
- if (target().is_identical_to(string_stub())) {
- target_receiver_maps.Add(isolate()->factory()->string_map());
- } else {
- TargetMaps(&target_receiver_maps);
- }
- if (target_receiver_maps.length() == 0) {
- return PropertyICCompiler::ComputeKeyedLoadMonomorphic(receiver_map);
- }
-
- // The first time a receiver is seen that is a transitioned version of the
- // previous monomorphic receiver type, assume the new ElementsKind is the
- // monomorphic type. This benefits global arrays that only transition
- // once, and all call sites accessing them are faster if they remain
- // monomorphic. If this optimistic assumption is not true, the IC will
- // miss again and it will become polymorphic and support both the
- // untransitioned and transitioned maps.
- if (state() == MONOMORPHIC &&
- IsMoreGeneralElementsKindTransition(
- target_receiver_maps.at(0)->elements_kind(),
- receiver->GetElementsKind())) {
- return PropertyICCompiler::ComputeKeyedLoadMonomorphic(receiver_map);
- }
-
- DCHECK(state() != GENERIC);
-
- // Determine the list of receiver maps that this call site has seen,
- // adding the map that was just encountered.
- if (!AddOneReceiverMapIfMissing(&target_receiver_maps, receiver_map)) {
- // If the miss wasn't due to an unseen map, a polymorphic stub
- // won't help, use the generic stub.
- TRACE_GENERIC_IC(isolate(), "KeyedIC", "same map added twice");
- return generic_stub();
- }
-
- // If the maximum number of receiver maps has been exceeded, use the generic
- // version of the IC.
- if (target_receiver_maps.length() > kMaxKeyedPolymorphism) {
- TRACE_GENERIC_IC(isolate(), "KeyedIC", "max polymorph exceeded");
- return generic_stub();
- }
-
- return PropertyICCompiler::ComputeKeyedLoadPolymorphic(&target_receiver_maps);
-}
-
-
-MaybeHandle<Object> KeyedLoadIC::Load(Handle<Object> object,
- Handle<Object> key) {
- if (MigrateDeprecated(object)) {
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(),
- result,
- Runtime::GetObjectProperty(isolate(), object, key),
- Object);
- return result;
- }
-
- Handle<Object> load_handle;
- Handle<Code> stub = generic_stub();
-
- // Check for non-string values that can be converted into an
- // internalized string directly or is representable as a smi.
- key = TryConvertKey(key, isolate());
-
- if (key->IsInternalizedString() || key->IsSymbol()) {
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(),
- load_handle,
- LoadIC::Load(object, Handle<Name>::cast(key)),
- Object);
- } else if (FLAG_use_ic && !object->IsAccessCheckNeeded()) {
- if (object->IsString() && key->IsNumber()) {
- if (state() == UNINITIALIZED) stub = string_stub();
- } else if (object->IsJSObject()) {
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- if (receiver->elements()->map() ==
- isolate()->heap()->sloppy_arguments_elements_map()) {
- stub = sloppy_arguments_stub();
- } else if (receiver->HasIndexedInterceptor()) {
- stub = indexed_interceptor_stub();
- } else if (!Object::ToSmi(isolate(), key).is_null() &&
- (!target().is_identical_to(sloppy_arguments_stub()))) {
- stub = LoadElementStub(receiver);
- }
- }
- }
-
- if (!is_target_set()) {
- Code* generic = *generic_stub();
- if (*stub == generic) {
- TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "set generic");
- }
- set_target(*stub);
- TRACE_IC("LoadIC", key);
- }
-
- if (!load_handle.is_null()) return load_handle;
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(),
- result,
- Runtime::GetObjectProperty(isolate(), object, key),
- Object);
- return result;
-}
-
-
-bool StoreIC::LookupForWrite(LookupIterator* it, Handle<Object> value,
- JSReceiver::StoreFromKeyed store_mode) {
- // Disable ICs for non-JSObjects for now.
- Handle<Object> receiver = it->GetReceiver();
- if (!receiver->IsJSObject()) return false;
- DCHECK(!Handle<JSObject>::cast(receiver)->map()->is_deprecated());
-
- for (; it->IsFound(); it->Next()) {
- switch (it->state()) {
- case LookupIterator::NOT_FOUND:
- case LookupIterator::TRANSITION:
- UNREACHABLE();
- case LookupIterator::JSPROXY:
- return false;
- case LookupIterator::INTERCEPTOR: {
- Handle<JSObject> holder = it->GetHolder<JSObject>();
- InterceptorInfo* info = holder->GetNamedInterceptor();
- if (it->HolderIsReceiverOrHiddenPrototype()) {
- if (!info->setter()->IsUndefined()) return true;
- } else if (!info->getter()->IsUndefined() ||
- !info->query()->IsUndefined()) {
- return false;
- }
- break;
- }
- case LookupIterator::ACCESS_CHECK:
- if (it->GetHolder<JSObject>()->IsAccessCheckNeeded()) return false;
- break;
- case LookupIterator::PROPERTY:
- if (!it->HasProperty()) break;
- if (it->IsReadOnly()) return false;
- if (it->property_kind() == LookupIterator::ACCESSOR) return true;
- if (it->GetHolder<Object>().is_identical_to(receiver)) {
- it->PrepareForDataProperty(value);
- // The previous receiver map might just have been deprecated,
- // so reload it.
- update_receiver_type(receiver);
- return true;
- }
-
- // Receiver != holder.
- if (receiver->IsJSGlobalProxy()) {
- PrototypeIterator iter(it->isolate(), receiver);
- return it->GetHolder<Object>().is_identical_to(
- PrototypeIterator::GetCurrent(iter));
- }
-
- it->PrepareTransitionToDataProperty(value, NONE, store_mode);
- return it->IsCacheableTransition();
- }
- }
-
- it->PrepareTransitionToDataProperty(value, NONE, store_mode);
- return it->IsCacheableTransition();
-}
-
-
-MaybeHandle<Object> StoreIC::Store(Handle<Object> object,
- Handle<Name> name,
- Handle<Object> value,
- JSReceiver::StoreFromKeyed store_mode) {
- // TODO(verwaest): Let SetProperty do the migration, since storing a property
- // might deprecate the current map again, if value does not fit.
- if (MigrateDeprecated(object) || object->IsJSProxy()) {
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(), result,
- Object::SetProperty(object, name, value, strict_mode()), Object);
- return result;
- }
-
- // If the object is undefined or null it's illegal to try to set any
- // properties on it; throw a TypeError in that case.
- if (object->IsUndefined() || object->IsNull()) {
- return TypeError("non_object_property_store", object, name);
- }
-
- // Check if the given name is an array index.
- uint32_t index;
- if (name->AsArrayIndex(&index)) {
- // Ignore other stores where the receiver is not a JSObject.
- // TODO(1475): Must check prototype chains of object wrappers.
- if (!object->IsJSObject()) return value;
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
-
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(),
- result,
- JSObject::SetElement(receiver, index, value, NONE, strict_mode()),
- Object);
- return value;
- }
-
- // Observed objects are always modified through the runtime.
- if (object->IsHeapObject() &&
- Handle<HeapObject>::cast(object)->map()->is_observed()) {
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(), result,
- Object::SetProperty(object, name, value, strict_mode(), store_mode),
- Object);
- return result;
- }
-
- LookupIterator it(object, name);
- if (FLAG_use_ic) UpdateCaches(&it, value, store_mode);
-
- // Set the property.
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(), result,
- Object::SetProperty(&it, value, strict_mode(), store_mode), Object);
- return result;
-}
-
-
-OStream& operator<<(OStream& os, const CallIC::State& s) {
- return os << "(args(" << s.arg_count() << "), "
- << (s.call_type() == CallIC::METHOD ? "METHOD" : "FUNCTION")
- << ", ";
-}
-
-
-Handle<Code> CallIC::initialize_stub(Isolate* isolate,
- int argc,
- CallType call_type) {
- CallICStub stub(isolate, State(argc, call_type));
- Handle<Code> code = stub.GetCode();
- return code;
-}
-
-
-Handle<Code> StoreIC::initialize_stub(Isolate* isolate,
- StrictMode strict_mode) {
- ExtraICState extra_state = ComputeExtraICState(strict_mode);
- Handle<Code> ic =
- PropertyICCompiler::ComputeStore(isolate, UNINITIALIZED, extra_state);
- return ic;
-}
-
-
-Handle<Code> StoreIC::megamorphic_stub() {
- return PropertyICCompiler::ComputeStore(isolate(), MEGAMORPHIC,
- extra_ic_state());
-}
-
-
-Handle<Code> StoreIC::generic_stub() const {
- return PropertyICCompiler::ComputeStore(isolate(), GENERIC, extra_ic_state());
-}
-
-
-Handle<Code> StoreIC::pre_monomorphic_stub(Isolate* isolate,
- StrictMode strict_mode) {
- ExtraICState state = ComputeExtraICState(strict_mode);
- return PropertyICCompiler::ComputeStore(isolate, PREMONOMORPHIC, state);
-}
-
-
-void StoreIC::UpdateCaches(LookupIterator* lookup, Handle<Object> value,
- JSReceiver::StoreFromKeyed store_mode) {
- if (state() == UNINITIALIZED) {
- // This is the first time we execute this inline cache. Set the target to
- // the pre monomorphic stub to delay setting the monomorphic state.
- set_target(*pre_monomorphic_stub());
- TRACE_IC("StoreIC", lookup->name());
- return;
- }
-
- Handle<Code> code = LookupForWrite(lookup, value, store_mode)
- ? ComputeHandler(lookup, value)
- : slow_stub();
-
- PatchCache(lookup->name(), code);
- TRACE_IC("StoreIC", lookup->name());
-}
-
-
-Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup,
- Handle<Object> value,
- CacheHolderFlag cache_holder) {
- DCHECK_NE(LookupIterator::JSPROXY, lookup->state());
-
- // This is currently guaranteed by checks in StoreIC::Store.
- Handle<JSObject> receiver = Handle<JSObject>::cast(lookup->GetReceiver());
- Handle<JSObject> holder = lookup->GetHolder<JSObject>();
- DCHECK(!receiver->IsAccessCheckNeeded());
-
- // -------------- Transition --------------
- if (lookup->state() == LookupIterator::TRANSITION) {
- Handle<Map> transition = lookup->transition_map();
- // Currently not handled by CompileStoreTransition.
- if (!holder->HasFastProperties()) return slow_stub();
-
- DCHECK(lookup->IsCacheableTransition());
- NamedStoreHandlerCompiler compiler(isolate(), receiver_type(), holder);
- return compiler.CompileStoreTransition(transition, lookup->name());
- }
-
- // -------------- Interceptors --------------
- if (lookup->state() == LookupIterator::INTERCEPTOR) {
- DCHECK(!holder->GetNamedInterceptor()->setter()->IsUndefined());
- NamedStoreHandlerCompiler compiler(isolate(), receiver_type(), holder);
- return compiler.CompileStoreInterceptor(lookup->name());
- }
-
- // -------------- Accessors --------------
- DCHECK(lookup->state() == LookupIterator::PROPERTY);
- if (lookup->property_kind() == LookupIterator::ACCESSOR) {
- if (!holder->HasFastProperties()) return slow_stub();
- Handle<Object> accessors = lookup->GetAccessors();
- if (accessors->IsExecutableAccessorInfo()) {
- Handle<ExecutableAccessorInfo> info =
- Handle<ExecutableAccessorInfo>::cast(accessors);
- if (v8::ToCData<Address>(info->setter()) == 0) return slow_stub();
- if (!ExecutableAccessorInfo::IsCompatibleReceiverType(isolate(), info,
- receiver_type())) {
- return slow_stub();
- }
- NamedStoreHandlerCompiler compiler(isolate(), receiver_type(), holder);
- return compiler.CompileStoreCallback(receiver, lookup->name(), info);
- } else if (accessors->IsAccessorPair()) {
- Handle<Object> setter(Handle<AccessorPair>::cast(accessors)->setter(),
- isolate());
- if (!setter->IsJSFunction()) return slow_stub();
- Handle<JSFunction> function = Handle<JSFunction>::cast(setter);
- CallOptimization call_optimization(function);
- NamedStoreHandlerCompiler compiler(isolate(), receiver_type(), holder);
- if (call_optimization.is_simple_api_call() &&
- call_optimization.IsCompatibleReceiver(receiver, holder)) {
- return compiler.CompileStoreCallback(receiver, lookup->name(),
- call_optimization);
- }
- return compiler.CompileStoreViaSetter(receiver, lookup->name(),
- Handle<JSFunction>::cast(setter));
- }
- // TODO(dcarney): Handle correctly.
- DCHECK(accessors->IsDeclaredAccessorInfo());
- return slow_stub();
- }
-
- // -------------- Dictionary properties --------------
- DCHECK(lookup->property_kind() == LookupIterator::DATA);
- if (lookup->property_encoding() == LookupIterator::DICTIONARY) {
- if (holder->IsGlobalObject()) {
- Handle<PropertyCell> cell = lookup->GetPropertyCell();
- Handle<HeapType> union_type = PropertyCell::UpdatedType(cell, value);
- StoreGlobalStub stub(isolate(), union_type->IsConstant(),
- receiver->IsJSGlobalProxy());
- Handle<Code> code = stub.GetCodeCopyFromTemplate(
- Handle<GlobalObject>::cast(holder), cell);
- // TODO(verwaest): Move caching of these NORMAL stubs outside as well.
- HeapObject::UpdateMapCodeCache(receiver, lookup->name(), code);
- return code;
- }
- DCHECK(holder.is_identical_to(receiver));
- return isolate()->builtins()->StoreIC_Normal();
- }
-
- // -------------- Fields --------------
- DCHECK(lookup->property_encoding() == LookupIterator::DESCRIPTOR);
- if (lookup->property_details().type() == FIELD) {
- bool use_stub = true;
- if (lookup->representation().IsHeapObject()) {
- // Only use a generic stub if no types need to be tracked.
- Handle<HeapType> field_type = lookup->GetFieldType();
- HeapType::Iterator<Map> it = field_type->Classes();
- use_stub = it.Done();
- }
- if (use_stub) {
- StoreFieldStub stub(isolate(), lookup->GetFieldIndex(),
- lookup->representation());
- return stub.GetCode();
- }
- NamedStoreHandlerCompiler compiler(isolate(), receiver_type(), holder);
- return compiler.CompileStoreField(lookup);
- }
-
- // -------------- Constant properties --------------
- DCHECK(lookup->property_details().type() == CONSTANT);
- return slow_stub();
-}
-
-
-Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
- KeyedAccessStoreMode store_mode) {
- // Don't handle megamorphic property accesses for INTERCEPTORS or CALLBACKS
- // via megamorphic stubs, since they don't have a map in their relocation info
- // and so the stubs can't be harvested for the object needed for a map check.
- if (target()->type() != Code::NORMAL) {
- TRACE_GENERIC_IC(isolate(), "KeyedIC", "non-NORMAL target type");
- return generic_stub();
- }
-
- Handle<Map> receiver_map(receiver->map(), isolate());
- MapHandleList target_receiver_maps;
- TargetMaps(&target_receiver_maps);
- if (target_receiver_maps.length() == 0) {
- Handle<Map> monomorphic_map =
- ComputeTransitionedMap(receiver_map, store_mode);
- store_mode = GetNonTransitioningStoreMode(store_mode);
- return PropertyICCompiler::ComputeKeyedStoreMonomorphic(
- monomorphic_map, strict_mode(), store_mode);
- }
-
- // There are several special cases where an IC that is MONOMORPHIC can still
- // transition to a different GetNonTransitioningStoreMode IC that handles a
- // superset of the original IC. Handle those here if the receiver map hasn't
- // changed or it has transitioned to a more general kind.
- KeyedAccessStoreMode old_store_mode =
- KeyedStoreIC::GetKeyedAccessStoreMode(target()->extra_ic_state());
- Handle<Map> previous_receiver_map = target_receiver_maps.at(0);
- if (state() == MONOMORPHIC) {
- Handle<Map> transitioned_receiver_map = receiver_map;
- if (IsTransitionStoreMode(store_mode)) {
- transitioned_receiver_map =
- ComputeTransitionedMap(receiver_map, store_mode);
- }
- if ((receiver_map.is_identical_to(previous_receiver_map) &&
- IsTransitionStoreMode(store_mode)) ||
- IsTransitionOfMonomorphicTarget(*previous_receiver_map,
- *transitioned_receiver_map)) {
- // If the "old" and "new" maps are in the same elements map family, or
- // if they at least come from the same origin for a transitioning store,
- // stay MONOMORPHIC and use the map for the most generic ElementsKind.
- store_mode = GetNonTransitioningStoreMode(store_mode);
- return PropertyICCompiler::ComputeKeyedStoreMonomorphic(
- transitioned_receiver_map, strict_mode(), store_mode);
- } else if (*previous_receiver_map == receiver->map() &&
- old_store_mode == STANDARD_STORE &&
- (store_mode == STORE_AND_GROW_NO_TRANSITION ||
- store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
- store_mode == STORE_NO_TRANSITION_HANDLE_COW)) {
- // A "normal" IC that handles stores can switch to a version that can
- // grow at the end of the array, handle OOB accesses or copy COW arrays
- // and still stay MONOMORPHIC.
- return PropertyICCompiler::ComputeKeyedStoreMonomorphic(
- receiver_map, strict_mode(), store_mode);
- }
- }
-
- DCHECK(state() != GENERIC);
-
- bool map_added =
- AddOneReceiverMapIfMissing(&target_receiver_maps, receiver_map);
-
- if (IsTransitionStoreMode(store_mode)) {
- Handle<Map> transitioned_receiver_map =
- ComputeTransitionedMap(receiver_map, store_mode);
- map_added |= AddOneReceiverMapIfMissing(&target_receiver_maps,
- transitioned_receiver_map);
- }
-
- if (!map_added) {
- // If the miss wasn't due to an unseen map, a polymorphic stub
- // won't help, use the generic stub.
- TRACE_GENERIC_IC(isolate(), "KeyedIC", "same map added twice");
- return generic_stub();
- }
-
- // If the maximum number of receiver maps has been exceeded, use the generic
- // version of the IC.
- if (target_receiver_maps.length() > kMaxKeyedPolymorphism) {
- TRACE_GENERIC_IC(isolate(), "KeyedIC", "max polymorph exceeded");
- return generic_stub();
- }
-
- // Make sure all polymorphic handlers have the same store mode, otherwise the
- // generic stub must be used.
- store_mode = GetNonTransitioningStoreMode(store_mode);
- if (old_store_mode != STANDARD_STORE) {
- if (store_mode == STANDARD_STORE) {
- store_mode = old_store_mode;
- } else if (store_mode != old_store_mode) {
- TRACE_GENERIC_IC(isolate(), "KeyedIC", "store mode mismatch");
- return generic_stub();
- }
- }
-
- // If the store mode isn't the standard mode, make sure that all polymorphic
- // receivers are either external arrays, or all "normal" arrays. Otherwise,
- // use the generic stub.
- if (store_mode != STANDARD_STORE) {
- int external_arrays = 0;
- for (int i = 0; i < target_receiver_maps.length(); ++i) {
- if (target_receiver_maps[i]->has_external_array_elements() ||
- target_receiver_maps[i]->has_fixed_typed_array_elements()) {
- external_arrays++;
- }
- }
- if (external_arrays != 0 &&
- external_arrays != target_receiver_maps.length()) {
- TRACE_GENERIC_IC(isolate(), "KeyedIC",
- "unsupported combination of external and normal arrays");
- return generic_stub();
- }
- }
-
- return PropertyICCompiler::ComputeKeyedStorePolymorphic(
- &target_receiver_maps, store_mode, strict_mode());
-}
-
-
-Handle<Map> KeyedStoreIC::ComputeTransitionedMap(
- Handle<Map> map,
- KeyedAccessStoreMode store_mode) {
- switch (store_mode) {
- case STORE_TRANSITION_SMI_TO_OBJECT:
- case STORE_TRANSITION_DOUBLE_TO_OBJECT:
- case STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT:
- case STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT:
- return Map::TransitionElementsTo(map, FAST_ELEMENTS);
- case STORE_TRANSITION_SMI_TO_DOUBLE:
- case STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE:
- return Map::TransitionElementsTo(map, FAST_DOUBLE_ELEMENTS);
- case STORE_TRANSITION_HOLEY_SMI_TO_OBJECT:
- case STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT:
- case STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT:
- case STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT:
- return Map::TransitionElementsTo(map, FAST_HOLEY_ELEMENTS);
- case STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE:
- case STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE:
- return Map::TransitionElementsTo(map, FAST_HOLEY_DOUBLE_ELEMENTS);
- case STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS:
- DCHECK(map->has_external_array_elements());
- // Fall through
- case STORE_NO_TRANSITION_HANDLE_COW:
- case STANDARD_STORE:
- case STORE_AND_GROW_NO_TRANSITION:
- return map;
- }
- UNREACHABLE();
- return MaybeHandle<Map>().ToHandleChecked();
-}
-
-
-bool IsOutOfBoundsAccess(Handle<JSObject> receiver,
- int index) {
- if (receiver->IsJSArray()) {
- return JSArray::cast(*receiver)->length()->IsSmi() &&
- index >= Smi::cast(JSArray::cast(*receiver)->length())->value();
- }
- return index >= receiver->elements()->length();
-}
-
-
-KeyedAccessStoreMode KeyedStoreIC::GetStoreMode(Handle<JSObject> receiver,
- Handle<Object> key,
- Handle<Object> value) {
- Handle<Smi> smi_key = Object::ToSmi(isolate(), key).ToHandleChecked();
- int index = smi_key->value();
- bool oob_access = IsOutOfBoundsAccess(receiver, index);
- // Don't consider this a growing store if the store would send the receiver to
- // dictionary mode.
- bool allow_growth = receiver->IsJSArray() && oob_access &&
- !receiver->WouldConvertToSlowElements(key);
- if (allow_growth) {
- // Handle growing array in stub if necessary.
- if (receiver->HasFastSmiElements()) {
- if (value->IsHeapNumber()) {
- if (receiver->HasFastHoleyElements()) {
- return STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE;
- } else {
- return STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE;
- }
- }
- if (value->IsHeapObject()) {
- if (receiver->HasFastHoleyElements()) {
- return STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT;
- } else {
- return STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT;
- }
- }
- } else if (receiver->HasFastDoubleElements()) {
- if (!value->IsSmi() && !value->IsHeapNumber()) {
- if (receiver->HasFastHoleyElements()) {
- return STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT;
- } else {
- return STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT;
- }
- }
- }
- return STORE_AND_GROW_NO_TRANSITION;
- } else {
- // Handle only in-bounds elements accesses.
- if (receiver->HasFastSmiElements()) {
- if (value->IsHeapNumber()) {
- if (receiver->HasFastHoleyElements()) {
- return STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE;
- } else {
- return STORE_TRANSITION_SMI_TO_DOUBLE;
- }
- } else if (value->IsHeapObject()) {
- if (receiver->HasFastHoleyElements()) {
- return STORE_TRANSITION_HOLEY_SMI_TO_OBJECT;
- } else {
- return STORE_TRANSITION_SMI_TO_OBJECT;
- }
- }
- } else if (receiver->HasFastDoubleElements()) {
- if (!value->IsSmi() && !value->IsHeapNumber()) {
- if (receiver->HasFastHoleyElements()) {
- return STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT;
- } else {
- return STORE_TRANSITION_DOUBLE_TO_OBJECT;
- }
- }
- }
- if (!FLAG_trace_external_array_abuse &&
- receiver->map()->has_external_array_elements() && oob_access) {
- return STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS;
- }
- Heap* heap = receiver->GetHeap();
- if (receiver->elements()->map() == heap->fixed_cow_array_map()) {
- return STORE_NO_TRANSITION_HANDLE_COW;
- } else {
- return STANDARD_STORE;
- }
- }
-}
-
-
-MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
- Handle<Object> key,
- Handle<Object> value) {
- // TODO(verwaest): Let SetProperty do the migration, since storing a property
- // might deprecate the current map again, if value does not fit.
- if (MigrateDeprecated(object)) {
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(),
- result,
- Runtime::SetObjectProperty(
- isolate(), object, key, value, strict_mode()),
- Object);
- return result;
- }
-
- // Check for non-string values that can be converted into an
- // internalized string directly or is representable as a smi.
- key = TryConvertKey(key, isolate());
-
- Handle<Object> store_handle;
- Handle<Code> stub = generic_stub();
-
- if (key->IsInternalizedString()) {
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(),
- store_handle,
- StoreIC::Store(object,
- Handle<String>::cast(key),
- value,
- JSReceiver::MAY_BE_STORE_FROM_KEYED),
- Object);
- TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "set generic");
- set_target(*stub);
- return store_handle;
- }
-
- bool use_ic =
- FLAG_use_ic && !object->IsStringWrapper() &&
- !object->IsAccessCheckNeeded() && !object->IsJSGlobalProxy() &&
- !(object->IsJSObject() && JSObject::cast(*object)->map()->is_observed());
- if (use_ic && !object->IsSmi()) {
- // Don't use ICs for maps of the objects in Array's prototype chain. We
- // expect to be able to trap element sets to objects with those maps in
- // the runtime to enable optimization of element hole access.
- Handle<HeapObject> heap_object = Handle<HeapObject>::cast(object);
- if (heap_object->map()->IsMapInArrayPrototypeChain()) use_ic = false;
- }
-
- if (use_ic) {
- DCHECK(!object->IsAccessCheckNeeded());
-
- if (object->IsJSObject()) {
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- bool key_is_smi_like = !Object::ToSmi(isolate(), key).is_null();
- if (receiver->elements()->map() ==
- isolate()->heap()->sloppy_arguments_elements_map()) {
- if (strict_mode() == SLOPPY) {
- stub = sloppy_arguments_stub();
- }
- } else if (key_is_smi_like &&
- !(target().is_identical_to(sloppy_arguments_stub()))) {
- // We should go generic if receiver isn't a dictionary, but our
- // prototype chain does have dictionary elements. This ensures that
- // other non-dictionary receivers in the polymorphic case benefit
- // from fast path keyed stores.
- if (!(receiver->map()->DictionaryElementsInPrototypeChainOnly())) {
- KeyedAccessStoreMode store_mode = GetStoreMode(receiver, key, value);
- stub = StoreElementStub(receiver, store_mode);
- }
- }
- }
- }
-
- if (store_handle.is_null()) {
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(),
- store_handle,
- Runtime::SetObjectProperty(
- isolate(), object, key, value, strict_mode()),
- Object);
- }
-
- DCHECK(!is_target_set());
- Code* generic = *generic_stub();
- if (*stub == generic) {
- TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "set generic");
- }
- DCHECK(!stub.is_null());
- set_target(*stub);
- TRACE_IC("StoreIC", key);
-
- return store_handle;
-}
-
-
-CallIC::State::State(ExtraICState extra_ic_state)
- : argc_(ArgcBits::decode(extra_ic_state)),
- call_type_(CallTypeBits::decode(extra_ic_state)) {
-}
-
-
-ExtraICState CallIC::State::GetExtraICState() const {
- ExtraICState extra_ic_state =
- ArgcBits::encode(argc_) |
- CallTypeBits::encode(call_type_);
- return extra_ic_state;
-}
-
-
-bool CallIC::DoCustomHandler(Handle<Object> receiver,
- Handle<Object> function,
- Handle<FixedArray> vector,
- Handle<Smi> slot,
- const State& state) {
- DCHECK(FLAG_use_ic && function->IsJSFunction());
-
- // Are we the array function?
- Handle<JSFunction> array_function = Handle<JSFunction>(
- isolate()->native_context()->array_function());
- if (array_function.is_identical_to(Handle<JSFunction>::cast(function))) {
- // Alter the slot.
- IC::State old_state = FeedbackToState(vector, slot);
- Object* feedback = vector->get(slot->value());
- if (!feedback->IsAllocationSite()) {
- Handle<AllocationSite> new_site =
- isolate()->factory()->NewAllocationSite();
- vector->set(slot->value(), *new_site);
- }
-
- CallIC_ArrayStub stub(isolate(), state);
- set_target(*stub.GetCode());
- Handle<String> name;
- if (array_function->shared()->name()->IsString()) {
- name = Handle<String>(String::cast(array_function->shared()->name()),
- isolate());
- }
-
- IC::State new_state = FeedbackToState(vector, slot);
- OnTypeFeedbackChanged(isolate(), address(), old_state, new_state, true);
- TRACE_VECTOR_IC("CallIC (custom handler)", name, old_state, new_state);
- return true;
- }
- return false;
-}
-
-
-void CallIC::PatchMegamorphic(Handle<Object> function,
- Handle<FixedArray> vector, Handle<Smi> slot) {
- State state(target()->extra_ic_state());
- IC::State old_state = FeedbackToState(vector, slot);
-
- // We are going generic.
- vector->set(slot->value(),
- *TypeFeedbackInfo::MegamorphicSentinel(isolate()),
- SKIP_WRITE_BARRIER);
-
- CallICStub stub(isolate(), state);
- Handle<Code> code = stub.GetCode();
- set_target(*code);
-
- Handle<Object> name = isolate()->factory()->empty_string();
- if (function->IsJSFunction()) {
- Handle<JSFunction> js_function = Handle<JSFunction>::cast(function);
- name = handle(js_function->shared()->name(), isolate());
- }
-
- IC::State new_state = FeedbackToState(vector, slot);
- OnTypeFeedbackChanged(isolate(), address(), old_state, new_state, true);
- TRACE_VECTOR_IC("CallIC", name, old_state, new_state);
-}
-
-
-void CallIC::HandleMiss(Handle<Object> receiver,
- Handle<Object> function,
- Handle<FixedArray> vector,
- Handle<Smi> slot) {
- State state(target()->extra_ic_state());
- IC::State old_state = FeedbackToState(vector, slot);
- Handle<Object> name = isolate()->factory()->empty_string();
- Object* feedback = vector->get(slot->value());
-
- // Hand-coded MISS handling is easier if CallIC slots don't contain smis.
- DCHECK(!feedback->IsSmi());
-
- if (feedback->IsJSFunction() || !function->IsJSFunction()) {
- // We are going generic.
- vector->set(slot->value(),
- *TypeFeedbackInfo::MegamorphicSentinel(isolate()),
- SKIP_WRITE_BARRIER);
- } else {
- // The feedback is either uninitialized or an allocation site.
- // It might be an allocation site because if we re-compile the full code
- // to add deoptimization support, we call with the default call-ic, and
- // merely need to patch the target to match the feedback.
- // TODO(mvstanton): the better approach is to dispense with patching
- // altogether, which is in progress.
- DCHECK(feedback == *TypeFeedbackInfo::UninitializedSentinel(isolate()) ||
- feedback->IsAllocationSite());
-
- // Do we want to install a custom handler?
- if (FLAG_use_ic &&
- DoCustomHandler(receiver, function, vector, slot, state)) {
- return;
- }
-
- vector->set(slot->value(), *function);
- }
-
- if (function->IsJSFunction()) {
- Handle<JSFunction> js_function = Handle<JSFunction>::cast(function);
- name = handle(js_function->shared()->name(), isolate());
- }
-
- IC::State new_state = FeedbackToState(vector, slot);
- OnTypeFeedbackChanged(isolate(), address(), old_state, new_state, true);
- TRACE_VECTOR_IC("CallIC", name, old_state, new_state);
-}
-
-
-#undef TRACE_IC
-
-
-// ----------------------------------------------------------------------------
-// Static IC stub generators.
-//
-
-// Used from ic-<arch>.cc.
-RUNTIME_FUNCTION(CallIC_Miss) {
- TimerEventScope<TimerEventIcMiss> timer(isolate);
- HandleScope scope(isolate);
- DCHECK(args.length() == 4);
- CallIC ic(isolate);
- Handle<Object> receiver = args.at<Object>(0);
- Handle<Object> function = args.at<Object>(1);
- Handle<FixedArray> vector = args.at<FixedArray>(2);
- Handle<Smi> slot = args.at<Smi>(3);
- ic.HandleMiss(receiver, function, vector, slot);
- return *function;
-}
-
-
-RUNTIME_FUNCTION(CallIC_Customization_Miss) {
- TimerEventScope<TimerEventIcMiss> timer(isolate);
- HandleScope scope(isolate);
- DCHECK(args.length() == 4);
- // A miss on a custom call ic always results in going megamorphic.
- CallIC ic(isolate);
- Handle<Object> function = args.at<Object>(1);
- Handle<FixedArray> vector = args.at<FixedArray>(2);
- Handle<Smi> slot = args.at<Smi>(3);
- ic.PatchMegamorphic(function, vector, slot);
- return *function;
-}
-
-
-// Used from ic-<arch>.cc.
-RUNTIME_FUNCTION(LoadIC_Miss) {
- TimerEventScope<TimerEventIcMiss> timer(isolate);
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- LoadIC ic(IC::NO_EXTRA_FRAME, isolate);
- Handle<Object> receiver = args.at<Object>(0);
- Handle<Name> key = args.at<Name>(1);
- ic.UpdateState(receiver, key);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
- return *result;
-}
-
-
-// Used from ic-<arch>.cc
-RUNTIME_FUNCTION(KeyedLoadIC_Miss) {
- TimerEventScope<TimerEventIcMiss> timer(isolate);
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate);
- Handle<Object> receiver = args.at<Object>(0);
- Handle<Object> key = args.at<Object>(1);
- ic.UpdateState(receiver, key);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
- return *result;
-}
-
-
-RUNTIME_FUNCTION(KeyedLoadIC_MissFromStubFailure) {
- TimerEventScope<TimerEventIcMiss> timer(isolate);
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- KeyedLoadIC ic(IC::EXTRA_CALL_FRAME, isolate);
- Handle<Object> receiver = args.at<Object>(0);
- Handle<Object> key = args.at<Object>(1);
- ic.UpdateState(receiver, key);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
- return *result;
-}
-
-
-// Used from ic-<arch>.cc.
-RUNTIME_FUNCTION(StoreIC_Miss) {
- TimerEventScope<TimerEventIcMiss> timer(isolate);
- HandleScope scope(isolate);
- DCHECK(args.length() == 3);
- StoreIC ic(IC::NO_EXTRA_FRAME, isolate);
- Handle<Object> receiver = args.at<Object>(0);
- Handle<String> key = args.at<String>(1);
- ic.UpdateState(receiver, key);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate,
- result,
- ic.Store(receiver, key, args.at<Object>(2)));
- return *result;
-}
-
-
-RUNTIME_FUNCTION(StoreIC_MissFromStubFailure) {
- TimerEventScope<TimerEventIcMiss> timer(isolate);
- HandleScope scope(isolate);
- DCHECK(args.length() == 3);
- StoreIC ic(IC::EXTRA_CALL_FRAME, isolate);
- Handle<Object> receiver = args.at<Object>(0);
- Handle<String> key = args.at<String>(1);
- ic.UpdateState(receiver, key);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate,
- result,
- ic.Store(receiver, key, args.at<Object>(2)));
- return *result;
-}
-
-
-// Extend storage is called in a store inline cache when
-// it is necessary to extend the properties array of a
-// JSObject.
-RUNTIME_FUNCTION(SharedStoreIC_ExtendStorage) {
- TimerEventScope<TimerEventIcMiss> timer(isolate);
- HandleScope shs(isolate);
- DCHECK(args.length() == 3);
-
- // Convert the parameters
- Handle<JSObject> object = args.at<JSObject>(0);
- Handle<Map> transition = args.at<Map>(1);
- Handle<Object> value = args.at<Object>(2);
-
- // Check the object has run out out property space.
- DCHECK(object->HasFastProperties());
- DCHECK(object->map()->unused_property_fields() == 0);
-
- JSObject::MigrateToNewProperty(object, transition, value);
-
- // Return the stored value.
- return *value;
-}
-
-
-// Used from ic-<arch>.cc.
-RUNTIME_FUNCTION(KeyedStoreIC_Miss) {
- TimerEventScope<TimerEventIcMiss> timer(isolate);
- HandleScope scope(isolate);
- DCHECK(args.length() == 3);
- KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate);
- Handle<Object> receiver = args.at<Object>(0);
- Handle<Object> key = args.at<Object>(1);
- ic.UpdateState(receiver, key);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate,
- result,
- ic.Store(receiver, key, args.at<Object>(2)));
- return *result;
-}
-
-
-RUNTIME_FUNCTION(KeyedStoreIC_MissFromStubFailure) {
- TimerEventScope<TimerEventIcMiss> timer(isolate);
- HandleScope scope(isolate);
- DCHECK(args.length() == 3);
- KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate);
- Handle<Object> receiver = args.at<Object>(0);
- Handle<Object> key = args.at<Object>(1);
- ic.UpdateState(receiver, key);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate,
- result,
- ic.Store(receiver, key, args.at<Object>(2)));
- return *result;
-}
-
-
-RUNTIME_FUNCTION(StoreIC_Slow) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 3);
- StoreIC ic(IC::NO_EXTRA_FRAME, isolate);
- Handle<Object> object = args.at<Object>(0);
- Handle<Object> key = args.at<Object>(1);
- Handle<Object> value = args.at<Object>(2);
- StrictMode strict_mode = ic.strict_mode();
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- Runtime::SetObjectProperty(
- isolate, object, key, value, strict_mode));
- return *result;
-}
-
-
-RUNTIME_FUNCTION(KeyedStoreIC_Slow) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 3);
- KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate);
- Handle<Object> object = args.at<Object>(0);
- Handle<Object> key = args.at<Object>(1);
- Handle<Object> value = args.at<Object>(2);
- StrictMode strict_mode = ic.strict_mode();
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- Runtime::SetObjectProperty(
- isolate, object, key, value, strict_mode));
- return *result;
-}
-
-
-RUNTIME_FUNCTION(ElementsTransitionAndStoreIC_Miss) {
- TimerEventScope<TimerEventIcMiss> timer(isolate);
- HandleScope scope(isolate);
- DCHECK(args.length() == 4);
- KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate);
- Handle<Object> value = args.at<Object>(0);
- Handle<Map> map = args.at<Map>(1);
- Handle<Object> key = args.at<Object>(2);
- Handle<Object> object = args.at<Object>(3);
- StrictMode strict_mode = ic.strict_mode();
- if (object->IsJSObject()) {
- JSObject::TransitionElementsKind(Handle<JSObject>::cast(object),
- map->elements_kind());
- }
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- Runtime::SetObjectProperty(
- isolate, object, key, value, strict_mode));
- return *result;
-}
-
-
-BinaryOpIC::State::State(Isolate* isolate, ExtraICState extra_ic_state)
- : isolate_(isolate) {
- op_ = static_cast<Token::Value>(
- FIRST_TOKEN + OpField::decode(extra_ic_state));
- mode_ = OverwriteModeField::decode(extra_ic_state);
- fixed_right_arg_ = Maybe<int>(
- HasFixedRightArgField::decode(extra_ic_state),
- 1 << FixedRightArgValueField::decode(extra_ic_state));
- left_kind_ = LeftKindField::decode(extra_ic_state);
- if (fixed_right_arg_.has_value) {
- right_kind_ = Smi::IsValid(fixed_right_arg_.value) ? SMI : INT32;
- } else {
- right_kind_ = RightKindField::decode(extra_ic_state);
- }
- result_kind_ = ResultKindField::decode(extra_ic_state);
- DCHECK_LE(FIRST_TOKEN, op_);
- DCHECK_LE(op_, LAST_TOKEN);
-}
-
-
-ExtraICState BinaryOpIC::State::GetExtraICState() const {
- ExtraICState extra_ic_state =
- OpField::encode(op_ - FIRST_TOKEN) |
- OverwriteModeField::encode(mode_) |
- LeftKindField::encode(left_kind_) |
- ResultKindField::encode(result_kind_) |
- HasFixedRightArgField::encode(fixed_right_arg_.has_value);
- if (fixed_right_arg_.has_value) {
- extra_ic_state = FixedRightArgValueField::update(
- extra_ic_state, WhichPowerOf2(fixed_right_arg_.value));
- } else {
- extra_ic_state = RightKindField::update(extra_ic_state, right_kind_);
- }
- return extra_ic_state;
-}
-
-
-// static
-void BinaryOpIC::State::GenerateAheadOfTime(
- Isolate* isolate, void (*Generate)(Isolate*, const State&)) {
- // TODO(olivf) We should investigate why adding stubs to the snapshot is so
- // expensive at runtime. When solved we should be able to add most binops to
- // the snapshot instead of hand-picking them.
- // Generated list of commonly used stubs
-#define GENERATE(op, left_kind, right_kind, result_kind, mode) \
- do { \
- State state(isolate, op, mode); \
- state.left_kind_ = left_kind; \
- state.fixed_right_arg_.has_value = false; \
- state.right_kind_ = right_kind; \
- state.result_kind_ = result_kind; \
- Generate(isolate, state); \
- } while (false)
- GENERATE(Token::ADD, INT32, INT32, INT32, NO_OVERWRITE);
- GENERATE(Token::ADD, INT32, INT32, INT32, OVERWRITE_LEFT);
- GENERATE(Token::ADD, INT32, INT32, NUMBER, NO_OVERWRITE);
- GENERATE(Token::ADD, INT32, INT32, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::ADD, INT32, NUMBER, NUMBER, NO_OVERWRITE);
- GENERATE(Token::ADD, INT32, NUMBER, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::ADD, INT32, NUMBER, NUMBER, OVERWRITE_RIGHT);
- GENERATE(Token::ADD, INT32, SMI, INT32, NO_OVERWRITE);
- GENERATE(Token::ADD, INT32, SMI, INT32, OVERWRITE_LEFT);
- GENERATE(Token::ADD, INT32, SMI, INT32, OVERWRITE_RIGHT);
- GENERATE(Token::ADD, NUMBER, INT32, NUMBER, NO_OVERWRITE);
- GENERATE(Token::ADD, NUMBER, INT32, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::ADD, NUMBER, INT32, NUMBER, OVERWRITE_RIGHT);
- GENERATE(Token::ADD, NUMBER, NUMBER, NUMBER, NO_OVERWRITE);
- GENERATE(Token::ADD, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::ADD, NUMBER, NUMBER, NUMBER, OVERWRITE_RIGHT);
- GENERATE(Token::ADD, NUMBER, SMI, NUMBER, NO_OVERWRITE);
- GENERATE(Token::ADD, NUMBER, SMI, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::ADD, NUMBER, SMI, NUMBER, OVERWRITE_RIGHT);
- GENERATE(Token::ADD, SMI, INT32, INT32, NO_OVERWRITE);
- GENERATE(Token::ADD, SMI, INT32, INT32, OVERWRITE_LEFT);
- GENERATE(Token::ADD, SMI, INT32, NUMBER, NO_OVERWRITE);
- GENERATE(Token::ADD, SMI, NUMBER, NUMBER, NO_OVERWRITE);
- GENERATE(Token::ADD, SMI, NUMBER, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::ADD, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT);
- GENERATE(Token::ADD, SMI, SMI, INT32, OVERWRITE_LEFT);
- GENERATE(Token::ADD, SMI, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_AND, INT32, INT32, INT32, NO_OVERWRITE);
- GENERATE(Token::BIT_AND, INT32, INT32, INT32, OVERWRITE_LEFT);
- GENERATE(Token::BIT_AND, INT32, INT32, INT32, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_AND, INT32, INT32, SMI, NO_OVERWRITE);
- GENERATE(Token::BIT_AND, INT32, INT32, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_AND, INT32, SMI, INT32, NO_OVERWRITE);
- GENERATE(Token::BIT_AND, INT32, SMI, INT32, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_AND, INT32, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::BIT_AND, INT32, SMI, SMI, OVERWRITE_LEFT);
- GENERATE(Token::BIT_AND, INT32, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_AND, NUMBER, INT32, INT32, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_AND, NUMBER, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::BIT_AND, NUMBER, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_AND, SMI, INT32, INT32, NO_OVERWRITE);
- GENERATE(Token::BIT_AND, SMI, INT32, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_AND, SMI, NUMBER, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_AND, SMI, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::BIT_AND, SMI, SMI, SMI, OVERWRITE_LEFT);
- GENERATE(Token::BIT_AND, SMI, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_OR, INT32, INT32, INT32, OVERWRITE_LEFT);
- GENERATE(Token::BIT_OR, INT32, INT32, INT32, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_OR, INT32, INT32, SMI, OVERWRITE_LEFT);
- GENERATE(Token::BIT_OR, INT32, SMI, INT32, NO_OVERWRITE);
- GENERATE(Token::BIT_OR, INT32, SMI, INT32, OVERWRITE_LEFT);
- GENERATE(Token::BIT_OR, INT32, SMI, INT32, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_OR, INT32, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::BIT_OR, INT32, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_OR, NUMBER, SMI, INT32, NO_OVERWRITE);
- GENERATE(Token::BIT_OR, NUMBER, SMI, INT32, OVERWRITE_LEFT);
- GENERATE(Token::BIT_OR, NUMBER, SMI, INT32, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_OR, NUMBER, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::BIT_OR, NUMBER, SMI, SMI, OVERWRITE_LEFT);
- GENERATE(Token::BIT_OR, SMI, INT32, INT32, OVERWRITE_LEFT);
- GENERATE(Token::BIT_OR, SMI, INT32, INT32, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_OR, SMI, INT32, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_OR, SMI, SMI, SMI, OVERWRITE_LEFT);
- GENERATE(Token::BIT_OR, SMI, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_XOR, INT32, INT32, INT32, NO_OVERWRITE);
- GENERATE(Token::BIT_XOR, INT32, INT32, INT32, OVERWRITE_LEFT);
- GENERATE(Token::BIT_XOR, INT32, INT32, INT32, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_XOR, INT32, INT32, SMI, NO_OVERWRITE);
- GENERATE(Token::BIT_XOR, INT32, INT32, SMI, OVERWRITE_LEFT);
- GENERATE(Token::BIT_XOR, INT32, NUMBER, SMI, NO_OVERWRITE);
- GENERATE(Token::BIT_XOR, INT32, SMI, INT32, NO_OVERWRITE);
- GENERATE(Token::BIT_XOR, INT32, SMI, INT32, OVERWRITE_LEFT);
- GENERATE(Token::BIT_XOR, INT32, SMI, INT32, OVERWRITE_RIGHT);
- GENERATE(Token::BIT_XOR, NUMBER, INT32, INT32, NO_OVERWRITE);
- GENERATE(Token::BIT_XOR, NUMBER, SMI, INT32, NO_OVERWRITE);
- GENERATE(Token::BIT_XOR, NUMBER, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::BIT_XOR, SMI, INT32, INT32, NO_OVERWRITE);
- GENERATE(Token::BIT_XOR, SMI, INT32, INT32, OVERWRITE_LEFT);
- GENERATE(Token::BIT_XOR, SMI, INT32, SMI, OVERWRITE_LEFT);
- GENERATE(Token::BIT_XOR, SMI, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::BIT_XOR, SMI, SMI, SMI, OVERWRITE_LEFT);
- GENERATE(Token::BIT_XOR, SMI, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::DIV, INT32, INT32, INT32, NO_OVERWRITE);
- GENERATE(Token::DIV, INT32, INT32, NUMBER, NO_OVERWRITE);
- GENERATE(Token::DIV, INT32, NUMBER, NUMBER, NO_OVERWRITE);
- GENERATE(Token::DIV, INT32, NUMBER, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::DIV, INT32, SMI, INT32, NO_OVERWRITE);
- GENERATE(Token::DIV, INT32, SMI, NUMBER, NO_OVERWRITE);
- GENERATE(Token::DIV, NUMBER, INT32, NUMBER, NO_OVERWRITE);
- GENERATE(Token::DIV, NUMBER, INT32, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::DIV, NUMBER, NUMBER, NUMBER, NO_OVERWRITE);
- GENERATE(Token::DIV, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::DIV, NUMBER, NUMBER, NUMBER, OVERWRITE_RIGHT);
- GENERATE(Token::DIV, NUMBER, SMI, NUMBER, NO_OVERWRITE);
- GENERATE(Token::DIV, NUMBER, SMI, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::DIV, SMI, INT32, INT32, NO_OVERWRITE);
- GENERATE(Token::DIV, SMI, INT32, NUMBER, NO_OVERWRITE);
- GENERATE(Token::DIV, SMI, INT32, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::DIV, SMI, NUMBER, NUMBER, NO_OVERWRITE);
- GENERATE(Token::DIV, SMI, NUMBER, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::DIV, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT);
- GENERATE(Token::DIV, SMI, SMI, NUMBER, NO_OVERWRITE);
- GENERATE(Token::DIV, SMI, SMI, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::DIV, SMI, SMI, NUMBER, OVERWRITE_RIGHT);
- GENERATE(Token::DIV, SMI, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::DIV, SMI, SMI, SMI, OVERWRITE_LEFT);
- GENERATE(Token::DIV, SMI, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::MOD, NUMBER, SMI, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::MOD, SMI, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::MOD, SMI, SMI, SMI, OVERWRITE_LEFT);
- GENERATE(Token::MUL, INT32, INT32, INT32, NO_OVERWRITE);
- GENERATE(Token::MUL, INT32, INT32, NUMBER, NO_OVERWRITE);
- GENERATE(Token::MUL, INT32, NUMBER, NUMBER, NO_OVERWRITE);
- GENERATE(Token::MUL, INT32, NUMBER, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::MUL, INT32, SMI, INT32, NO_OVERWRITE);
- GENERATE(Token::MUL, INT32, SMI, INT32, OVERWRITE_LEFT);
- GENERATE(Token::MUL, INT32, SMI, NUMBER, NO_OVERWRITE);
- GENERATE(Token::MUL, NUMBER, INT32, NUMBER, NO_OVERWRITE);
- GENERATE(Token::MUL, NUMBER, INT32, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::MUL, NUMBER, INT32, NUMBER, OVERWRITE_RIGHT);
- GENERATE(Token::MUL, NUMBER, NUMBER, NUMBER, NO_OVERWRITE);
- GENERATE(Token::MUL, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::MUL, NUMBER, SMI, NUMBER, NO_OVERWRITE);
- GENERATE(Token::MUL, NUMBER, SMI, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::MUL, NUMBER, SMI, NUMBER, OVERWRITE_RIGHT);
- GENERATE(Token::MUL, SMI, INT32, INT32, NO_OVERWRITE);
- GENERATE(Token::MUL, SMI, INT32, INT32, OVERWRITE_LEFT);
- GENERATE(Token::MUL, SMI, INT32, NUMBER, NO_OVERWRITE);
- GENERATE(Token::MUL, SMI, NUMBER, NUMBER, NO_OVERWRITE);
- GENERATE(Token::MUL, SMI, NUMBER, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::MUL, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT);
- GENERATE(Token::MUL, SMI, SMI, INT32, NO_OVERWRITE);
- GENERATE(Token::MUL, SMI, SMI, NUMBER, NO_OVERWRITE);
- GENERATE(Token::MUL, SMI, SMI, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::MUL, SMI, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::MUL, SMI, SMI, SMI, OVERWRITE_LEFT);
- GENERATE(Token::MUL, SMI, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::SAR, INT32, SMI, INT32, OVERWRITE_RIGHT);
- GENERATE(Token::SAR, INT32, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::SAR, INT32, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::SAR, NUMBER, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::SAR, NUMBER, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::SAR, SMI, SMI, SMI, OVERWRITE_LEFT);
- GENERATE(Token::SAR, SMI, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::SHL, INT32, SMI, INT32, NO_OVERWRITE);
- GENERATE(Token::SHL, INT32, SMI, INT32, OVERWRITE_RIGHT);
- GENERATE(Token::SHL, INT32, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::SHL, INT32, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::SHL, NUMBER, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::SHL, SMI, SMI, INT32, NO_OVERWRITE);
- GENERATE(Token::SHL, SMI, SMI, INT32, OVERWRITE_LEFT);
- GENERATE(Token::SHL, SMI, SMI, INT32, OVERWRITE_RIGHT);
- GENERATE(Token::SHL, SMI, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::SHL, SMI, SMI, SMI, OVERWRITE_LEFT);
- GENERATE(Token::SHL, SMI, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::SHR, INT32, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::SHR, INT32, SMI, SMI, OVERWRITE_LEFT);
- GENERATE(Token::SHR, INT32, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::SHR, NUMBER, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::SHR, NUMBER, SMI, SMI, OVERWRITE_LEFT);
- GENERATE(Token::SHR, NUMBER, SMI, INT32, OVERWRITE_RIGHT);
- GENERATE(Token::SHR, SMI, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::SHR, SMI, SMI, SMI, OVERWRITE_LEFT);
- GENERATE(Token::SHR, SMI, SMI, SMI, OVERWRITE_RIGHT);
- GENERATE(Token::SUB, INT32, INT32, INT32, NO_OVERWRITE);
- GENERATE(Token::SUB, INT32, INT32, INT32, OVERWRITE_LEFT);
- GENERATE(Token::SUB, INT32, NUMBER, NUMBER, NO_OVERWRITE);
- GENERATE(Token::SUB, INT32, NUMBER, NUMBER, OVERWRITE_RIGHT);
- GENERATE(Token::SUB, INT32, SMI, INT32, OVERWRITE_LEFT);
- GENERATE(Token::SUB, INT32, SMI, INT32, OVERWRITE_RIGHT);
- GENERATE(Token::SUB, NUMBER, INT32, NUMBER, NO_OVERWRITE);
- GENERATE(Token::SUB, NUMBER, INT32, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::SUB, NUMBER, NUMBER, NUMBER, NO_OVERWRITE);
- GENERATE(Token::SUB, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::SUB, NUMBER, NUMBER, NUMBER, OVERWRITE_RIGHT);
- GENERATE(Token::SUB, NUMBER, SMI, NUMBER, NO_OVERWRITE);
- GENERATE(Token::SUB, NUMBER, SMI, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::SUB, NUMBER, SMI, NUMBER, OVERWRITE_RIGHT);
- GENERATE(Token::SUB, SMI, INT32, INT32, NO_OVERWRITE);
- GENERATE(Token::SUB, SMI, NUMBER, NUMBER, NO_OVERWRITE);
- GENERATE(Token::SUB, SMI, NUMBER, NUMBER, OVERWRITE_LEFT);
- GENERATE(Token::SUB, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT);
- GENERATE(Token::SUB, SMI, SMI, SMI, NO_OVERWRITE);
- GENERATE(Token::SUB, SMI, SMI, SMI, OVERWRITE_LEFT);
- GENERATE(Token::SUB, SMI, SMI, SMI, OVERWRITE_RIGHT);
-#undef GENERATE
-#define GENERATE(op, left_kind, fixed_right_arg_value, result_kind, mode) \
- do { \
- State state(isolate, op, mode); \
- state.left_kind_ = left_kind; \
- state.fixed_right_arg_.has_value = true; \
- state.fixed_right_arg_.value = fixed_right_arg_value; \
- state.right_kind_ = SMI; \
- state.result_kind_ = result_kind; \
- Generate(isolate, state); \
- } while (false)
- GENERATE(Token::MOD, SMI, 2, SMI, NO_OVERWRITE);
- GENERATE(Token::MOD, SMI, 4, SMI, NO_OVERWRITE);
- GENERATE(Token::MOD, SMI, 4, SMI, OVERWRITE_LEFT);
- GENERATE(Token::MOD, SMI, 8, SMI, NO_OVERWRITE);
- GENERATE(Token::MOD, SMI, 16, SMI, OVERWRITE_LEFT);
- GENERATE(Token::MOD, SMI, 32, SMI, NO_OVERWRITE);
- GENERATE(Token::MOD, SMI, 2048, SMI, NO_OVERWRITE);
-#undef GENERATE
-}
-
-
-Type* BinaryOpIC::State::GetResultType(Zone* zone) const {
- Kind result_kind = result_kind_;
- if (HasSideEffects()) {
- result_kind = NONE;
- } else if (result_kind == GENERIC && op_ == Token::ADD) {
- return Type::Union(Type::Number(zone), Type::String(zone), zone);
- } else if (result_kind == NUMBER && op_ == Token::SHR) {
- return Type::Unsigned32(zone);
- }
- DCHECK_NE(GENERIC, result_kind);
- return KindToType(result_kind, zone);
-}
-
-
-OStream& operator<<(OStream& os, const BinaryOpIC::State& s) {
- os << "(" << Token::Name(s.op_);
- if (s.mode_ == OVERWRITE_LEFT)
- os << "_ReuseLeft";
- else if (s.mode_ == OVERWRITE_RIGHT)
- os << "_ReuseRight";
- if (s.CouldCreateAllocationMementos()) os << "_CreateAllocationMementos";
- os << ":" << BinaryOpIC::State::KindToString(s.left_kind_) << "*";
- if (s.fixed_right_arg_.has_value) {
- os << s.fixed_right_arg_.value;
- } else {
- os << BinaryOpIC::State::KindToString(s.right_kind_);
- }
- return os << "->" << BinaryOpIC::State::KindToString(s.result_kind_) << ")";
-}
-
-
-void BinaryOpIC::State::Update(Handle<Object> left,
- Handle<Object> right,
- Handle<Object> result) {
- ExtraICState old_extra_ic_state = GetExtraICState();
-
- left_kind_ = UpdateKind(left, left_kind_);
- right_kind_ = UpdateKind(right, right_kind_);
-
- int32_t fixed_right_arg_value = 0;
- bool has_fixed_right_arg =
- op_ == Token::MOD &&
- right->ToInt32(&fixed_right_arg_value) &&
- fixed_right_arg_value > 0 &&
- IsPowerOf2(fixed_right_arg_value) &&
- FixedRightArgValueField::is_valid(WhichPowerOf2(fixed_right_arg_value)) &&
- (left_kind_ == SMI || left_kind_ == INT32) &&
- (result_kind_ == NONE || !fixed_right_arg_.has_value);
- fixed_right_arg_ = Maybe<int32_t>(has_fixed_right_arg,
- fixed_right_arg_value);
-
- result_kind_ = UpdateKind(result, result_kind_);
-
- if (!Token::IsTruncatingBinaryOp(op_)) {
- Kind input_kind = Max(left_kind_, right_kind_);
- if (result_kind_ < input_kind && input_kind <= NUMBER) {
- result_kind_ = input_kind;
- }
- }
-
- // We don't want to distinguish INT32 and NUMBER for string add (because
- // NumberToString can't make use of this anyway).
- if (left_kind_ == STRING && right_kind_ == INT32) {
- DCHECK_EQ(STRING, result_kind_);
- DCHECK_EQ(Token::ADD, op_);
- right_kind_ = NUMBER;
- } else if (right_kind_ == STRING && left_kind_ == INT32) {
- DCHECK_EQ(STRING, result_kind_);
- DCHECK_EQ(Token::ADD, op_);
- left_kind_ = NUMBER;
- }
-
- // Reset overwrite mode unless we can actually make use of it, or may be able
- // to make use of it at some point in the future.
- if ((mode_ == OVERWRITE_LEFT && left_kind_ > NUMBER) ||
- (mode_ == OVERWRITE_RIGHT && right_kind_ > NUMBER) ||
- result_kind_ > NUMBER) {
- mode_ = NO_OVERWRITE;
- }
-
- if (old_extra_ic_state == GetExtraICState()) {
- // Tagged operations can lead to non-truncating HChanges
- if (left->IsUndefined() || left->IsBoolean()) {
- left_kind_ = GENERIC;
- } else {
- DCHECK(right->IsUndefined() || right->IsBoolean());
- right_kind_ = GENERIC;
- }
- }
-}
-
-
-BinaryOpIC::State::Kind BinaryOpIC::State::UpdateKind(Handle<Object> object,
- Kind kind) const {
- Kind new_kind = GENERIC;
- bool is_truncating = Token::IsTruncatingBinaryOp(op());
- if (object->IsBoolean() && is_truncating) {
- // Booleans will be automatically truncated by HChange.
- new_kind = INT32;
- } else if (object->IsUndefined()) {
- // Undefined will be automatically truncated by HChange.
- new_kind = is_truncating ? INT32 : NUMBER;
- } else if (object->IsSmi()) {
- new_kind = SMI;
- } else if (object->IsHeapNumber()) {
- double value = Handle<HeapNumber>::cast(object)->value();
- new_kind = IsInt32Double(value) ? INT32 : NUMBER;
- } else if (object->IsString() && op() == Token::ADD) {
- new_kind = STRING;
- }
- if (new_kind == INT32 && SmiValuesAre32Bits()) {
- new_kind = NUMBER;
- }
- if (kind != NONE &&
- ((new_kind <= NUMBER && kind > NUMBER) ||
- (new_kind > NUMBER && kind <= NUMBER))) {
- new_kind = GENERIC;
- }
- return Max(kind, new_kind);
-}
-
-
-// static
-const char* BinaryOpIC::State::KindToString(Kind kind) {
- switch (kind) {
- case NONE: return "None";
- case SMI: return "Smi";
- case INT32: return "Int32";
- case NUMBER: return "Number";
- case STRING: return "String";
- case GENERIC: return "Generic";
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-// static
-Type* BinaryOpIC::State::KindToType(Kind kind, Zone* zone) {
- switch (kind) {
- case NONE: return Type::None(zone);
- case SMI: return Type::SignedSmall(zone);
- case INT32: return Type::Signed32(zone);
- case NUMBER: return Type::Number(zone);
- case STRING: return Type::String(zone);
- case GENERIC: return Type::Any(zone);
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-MaybeHandle<Object> BinaryOpIC::Transition(
- Handle<AllocationSite> allocation_site,
- Handle<Object> left,
- Handle<Object> right) {
- State state(isolate(), target()->extra_ic_state());
-
- // Compute the actual result using the builtin for the binary operation.
- Object* builtin = isolate()->js_builtins_object()->javascript_builtin(
- TokenToJSBuiltin(state.op()));
- Handle<JSFunction> function = handle(JSFunction::cast(builtin), isolate());
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(),
- result,
- Execution::Call(isolate(), function, left, 1, &right),
- Object);
-
- // Execution::Call can execute arbitrary JavaScript, hence potentially
- // update the state of this very IC, so we must update the stored state.
- UpdateTarget();
- // Compute the new state.
- State old_state(isolate(), target()->extra_ic_state());
- state.Update(left, right, result);
-
- // Check if we have a string operation here.
- Handle<Code> target;
- if (!allocation_site.is_null() || state.ShouldCreateAllocationMementos()) {
- // Setup the allocation site on-demand.
- if (allocation_site.is_null()) {
- allocation_site = isolate()->factory()->NewAllocationSite();
- }
-
- // Install the stub with an allocation site.
- BinaryOpICWithAllocationSiteStub stub(isolate(), state);
- target = stub.GetCodeCopyFromTemplate(allocation_site);
-
- // Sanity check the trampoline stub.
- DCHECK_EQ(*allocation_site, target->FindFirstAllocationSite());
- } else {
- // Install the generic stub.
- BinaryOpICStub stub(isolate(), state);
- target = stub.GetCode();
-
- // Sanity check the generic stub.
- DCHECK_EQ(NULL, target->FindFirstAllocationSite());
- }
- set_target(*target);
-
- if (FLAG_trace_ic) {
- OFStream os(stdout);
- os << "[BinaryOpIC" << old_state << " => " << state << " @ "
- << static_cast<void*>(*target) << " <- ";
- JavaScriptFrame::PrintTop(isolate(), stdout, false, true);
- if (!allocation_site.is_null()) {
- os << " using allocation site " << static_cast<void*>(*allocation_site);
- }
- os << "]" << endl;
- }
-
- // Patch the inlined smi code as necessary.
- if (!old_state.UseInlinedSmiCode() && state.UseInlinedSmiCode()) {
- PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
- } else if (old_state.UseInlinedSmiCode() && !state.UseInlinedSmiCode()) {
- PatchInlinedSmiCode(address(), DISABLE_INLINED_SMI_CHECK);
- }
-
- return result;
-}
-
-
-RUNTIME_FUNCTION(BinaryOpIC_Miss) {
- TimerEventScope<TimerEventIcMiss> timer(isolate);
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- Handle<Object> left = args.at<Object>(BinaryOpICStub::kLeft);
- Handle<Object> right = args.at<Object>(BinaryOpICStub::kRight);
- BinaryOpIC ic(isolate);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate,
- result,
- ic.Transition(Handle<AllocationSite>::null(), left, right));
- return *result;
-}
-
-
-RUNTIME_FUNCTION(BinaryOpIC_MissWithAllocationSite) {
- TimerEventScope<TimerEventIcMiss> timer(isolate);
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
- Handle<AllocationSite> allocation_site = args.at<AllocationSite>(
- BinaryOpWithAllocationSiteStub::kAllocationSite);
- Handle<Object> left = args.at<Object>(
- BinaryOpWithAllocationSiteStub::kLeft);
- Handle<Object> right = args.at<Object>(
- BinaryOpWithAllocationSiteStub::kRight);
- BinaryOpIC ic(isolate);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate,
- result,
- ic.Transition(allocation_site, left, right));
- return *result;
-}
-
-
-Code* CompareIC::GetRawUninitialized(Isolate* isolate, Token::Value op) {
- ICCompareStub stub(isolate, op, UNINITIALIZED, UNINITIALIZED, UNINITIALIZED);
- Code* code = NULL;
- CHECK(stub.FindCodeInCache(&code));
- return code;
-}
-
-
-Handle<Code> CompareIC::GetUninitialized(Isolate* isolate, Token::Value op) {
- ICCompareStub stub(isolate, op, UNINITIALIZED, UNINITIALIZED, UNINITIALIZED);
- return stub.GetCode();
-}
-
-
-const char* CompareIC::GetStateName(State state) {
- switch (state) {
- case UNINITIALIZED: return "UNINITIALIZED";
- case SMI: return "SMI";
- case NUMBER: return "NUMBER";
- case INTERNALIZED_STRING: return "INTERNALIZED_STRING";
- case STRING: return "STRING";
- case UNIQUE_NAME: return "UNIQUE_NAME";
- case OBJECT: return "OBJECT";
- case KNOWN_OBJECT: return "KNOWN_OBJECT";
- case GENERIC: return "GENERIC";
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-Type* CompareIC::StateToType(
- Zone* zone,
- CompareIC::State state,
- Handle<Map> map) {
- switch (state) {
- case CompareIC::UNINITIALIZED: return Type::None(zone);
- case CompareIC::SMI: return Type::SignedSmall(zone);
- case CompareIC::NUMBER: return Type::Number(zone);
- case CompareIC::STRING: return Type::String(zone);
- case CompareIC::INTERNALIZED_STRING: return Type::InternalizedString(zone);
- case CompareIC::UNIQUE_NAME: return Type::UniqueName(zone);
- case CompareIC::OBJECT: return Type::Receiver(zone);
- case CompareIC::KNOWN_OBJECT:
- return map.is_null() ? Type::Receiver(zone) : Type::Class(map, zone);
- case CompareIC::GENERIC: return Type::Any(zone);
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-void CompareIC::StubInfoToType(uint32_t stub_key, Type** left_type,
- Type** right_type, Type** overall_type,
- Handle<Map> map, Zone* zone) {
- State left_state, right_state, handler_state;
- ICCompareStub::DecodeKey(stub_key, &left_state, &right_state, &handler_state,
- NULL);
- *left_type = StateToType(zone, left_state);
- *right_type = StateToType(zone, right_state);
- *overall_type = StateToType(zone, handler_state, map);
-}
-
-
-CompareIC::State CompareIC::NewInputState(State old_state,
- Handle<Object> value) {
- switch (old_state) {
- case UNINITIALIZED:
- if (value->IsSmi()) return SMI;
- if (value->IsHeapNumber()) return NUMBER;
- if (value->IsInternalizedString()) return INTERNALIZED_STRING;
- if (value->IsString()) return STRING;
- if (value->IsSymbol()) return UNIQUE_NAME;
- if (value->IsJSObject()) return OBJECT;
- break;
- case SMI:
- if (value->IsSmi()) return SMI;
- if (value->IsHeapNumber()) return NUMBER;
- break;
- case NUMBER:
- if (value->IsNumber()) return NUMBER;
- break;
- case INTERNALIZED_STRING:
- if (value->IsInternalizedString()) return INTERNALIZED_STRING;
- if (value->IsString()) return STRING;
- if (value->IsSymbol()) return UNIQUE_NAME;
- break;
- case STRING:
- if (value->IsString()) return STRING;
- break;
- case UNIQUE_NAME:
- if (value->IsUniqueName()) return UNIQUE_NAME;
- break;
- case OBJECT:
- if (value->IsJSObject()) return OBJECT;
- break;
- case GENERIC:
- break;
- case KNOWN_OBJECT:
- UNREACHABLE();
- break;
- }
- return GENERIC;
-}
-
-
-CompareIC::State CompareIC::TargetState(State old_state,
- State old_left,
- State old_right,
- bool has_inlined_smi_code,
- Handle<Object> x,
- Handle<Object> y) {
- switch (old_state) {
- case UNINITIALIZED:
- if (x->IsSmi() && y->IsSmi()) return SMI;
- if (x->IsNumber() && y->IsNumber()) return NUMBER;
- if (Token::IsOrderedRelationalCompareOp(op_)) {
- // Ordered comparisons treat undefined as NaN, so the
- // NUMBER stub will do the right thing.
- if ((x->IsNumber() && y->IsUndefined()) ||
- (y->IsNumber() && x->IsUndefined())) {
- return NUMBER;
- }
- }
- if (x->IsInternalizedString() && y->IsInternalizedString()) {
- // We compare internalized strings as plain ones if we need to determine
- // the order in a non-equality compare.
- return Token::IsEqualityOp(op_) ? INTERNALIZED_STRING : STRING;
- }
- if (x->IsString() && y->IsString()) return STRING;
- if (!Token::IsEqualityOp(op_)) return GENERIC;
- if (x->IsUniqueName() && y->IsUniqueName()) return UNIQUE_NAME;
- if (x->IsJSObject() && y->IsJSObject()) {
- if (Handle<JSObject>::cast(x)->map() ==
- Handle<JSObject>::cast(y)->map()) {
- return KNOWN_OBJECT;
- } else {
- return OBJECT;
- }
- }
- return GENERIC;
- case SMI:
- return x->IsNumber() && y->IsNumber() ? NUMBER : GENERIC;
- case INTERNALIZED_STRING:
- DCHECK(Token::IsEqualityOp(op_));
- if (x->IsString() && y->IsString()) return STRING;
- if (x->IsUniqueName() && y->IsUniqueName()) return UNIQUE_NAME;
- return GENERIC;
- case NUMBER:
- // If the failure was due to one side changing from smi to heap number,
- // then keep the state (if other changed at the same time, we will get
- // a second miss and then go to generic).
- if (old_left == SMI && x->IsHeapNumber()) return NUMBER;
- if (old_right == SMI && y->IsHeapNumber()) return NUMBER;
- return GENERIC;
- case KNOWN_OBJECT:
- DCHECK(Token::IsEqualityOp(op_));
- if (x->IsJSObject() && y->IsJSObject()) return OBJECT;
- return GENERIC;
- case STRING:
- case UNIQUE_NAME:
- case OBJECT:
- case GENERIC:
- return GENERIC;
- }
- UNREACHABLE();
- return GENERIC; // Make the compiler happy.
-}
-
-
-Code* CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
- HandleScope scope(isolate());
- State previous_left, previous_right, previous_state;
- ICCompareStub::DecodeKey(target()->stub_key(), &previous_left,
- &previous_right, &previous_state, NULL);
- State new_left = NewInputState(previous_left, x);
- State new_right = NewInputState(previous_right, y);
- State state = TargetState(previous_state, previous_left, previous_right,
- HasInlinedSmiCode(address()), x, y);
- ICCompareStub stub(isolate(), op_, new_left, new_right, state);
- if (state == KNOWN_OBJECT) {
- stub.set_known_map(
- Handle<Map>(Handle<JSObject>::cast(x)->map(), isolate()));
- }
- Handle<Code> new_target = stub.GetCode();
- set_target(*new_target);
-
- if (FLAG_trace_ic) {
- PrintF("[CompareIC in ");
- JavaScriptFrame::PrintTop(isolate(), stdout, false, true);
- PrintF(" ((%s+%s=%s)->(%s+%s=%s))#%s @ %p]\n",
- GetStateName(previous_left),
- GetStateName(previous_right),
- GetStateName(previous_state),
- GetStateName(new_left),
- GetStateName(new_right),
- GetStateName(state),
- Token::Name(op_),
- static_cast<void*>(*stub.GetCode()));
- }
-
- // Activate inlined smi code.
- if (previous_state == UNINITIALIZED) {
- PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
- }
-
- return *new_target;
-}
-
-
-// Used from ICCompareStub::GenerateMiss in code-stubs-<arch>.cc.
-RUNTIME_FUNCTION(CompareIC_Miss) {
- TimerEventScope<TimerEventIcMiss> timer(isolate);
- HandleScope scope(isolate);
- DCHECK(args.length() == 3);
- CompareIC ic(isolate, static_cast<Token::Value>(args.smi_at(2)));
- return ic.UpdateCaches(args.at<Object>(0), args.at<Object>(1));
-}
-
-
-void CompareNilIC::Clear(Address address,
- Code* target,
- ConstantPoolArray* constant_pool) {
- if (IsCleared(target)) return;
- ExtraICState state = target->extra_ic_state();
-
- CompareNilICStub stub(target->GetIsolate(),
- state,
- HydrogenCodeStub::UNINITIALIZED);
- stub.ClearState();
-
- Code* code = NULL;
- CHECK(stub.FindCodeInCache(&code));
-
- SetTargetAtAddress(address, code, constant_pool);
-}
-
-
-Handle<Object> CompareNilIC::DoCompareNilSlow(Isolate* isolate,
- NilValue nil,
- Handle<Object> object) {
- if (object->IsNull() || object->IsUndefined()) {
- return handle(Smi::FromInt(true), isolate);
- }
- return handle(Smi::FromInt(object->IsUndetectableObject()), isolate);
-}
-
-
-Handle<Object> CompareNilIC::CompareNil(Handle<Object> object) {
- ExtraICState extra_ic_state = target()->extra_ic_state();
-
- CompareNilICStub stub(isolate(), extra_ic_state);
-
- // Extract the current supported types from the patched IC and calculate what
- // types must be supported as a result of the miss.
- bool already_monomorphic = stub.IsMonomorphic();
-
- stub.UpdateStatus(object);
-
- NilValue nil = stub.GetNilValue();
-
- // Find or create the specialized stub to support the new set of types.
- Handle<Code> code;
- if (stub.IsMonomorphic()) {
- Handle<Map> monomorphic_map(already_monomorphic && FirstTargetMap() != NULL
- ? FirstTargetMap()
- : HeapObject::cast(*object)->map());
- code = PropertyICCompiler::ComputeCompareNil(monomorphic_map, &stub);
- } else {
- code = stub.GetCode();
- }
- set_target(*code);
- return DoCompareNilSlow(isolate(), nil, object);
-}
-
-
-RUNTIME_FUNCTION(CompareNilIC_Miss) {
- TimerEventScope<TimerEventIcMiss> timer(isolate);
- HandleScope scope(isolate);
- Handle<Object> object = args.at<Object>(0);
- CompareNilIC ic(isolate);
- return *ic.CompareNil(object);
-}
-
-
-RUNTIME_FUNCTION(Unreachable) {
- UNREACHABLE();
- CHECK(false);
- return isolate->heap()->undefined_value();
-}
-
-
-Builtins::JavaScript BinaryOpIC::TokenToJSBuiltin(Token::Value op) {
- switch (op) {
- default:
- UNREACHABLE();
- case Token::ADD:
- return Builtins::ADD;
- break;
- case Token::SUB:
- return Builtins::SUB;
- break;
- case Token::MUL:
- return Builtins::MUL;
- break;
- case Token::DIV:
- return Builtins::DIV;
- break;
- case Token::MOD:
- return Builtins::MOD;
- break;
- case Token::BIT_OR:
- return Builtins::BIT_OR;
- break;
- case Token::BIT_AND:
- return Builtins::BIT_AND;
- break;
- case Token::BIT_XOR:
- return Builtins::BIT_XOR;
- break;
- case Token::SAR:
- return Builtins::SAR;
- break;
- case Token::SHR:
- return Builtins::SHR;
- break;
- case Token::SHL:
- return Builtins::SHL;
- break;
- }
-}
-
-
-Handle<Object> ToBooleanIC::ToBoolean(Handle<Object> object) {
- ToBooleanStub stub(isolate(), target()->extra_ic_state());
- bool to_boolean_value = stub.UpdateStatus(object);
- Handle<Code> code = stub.GetCode();
- set_target(*code);
- return handle(Smi::FromInt(to_boolean_value ? 1 : 0), isolate());
-}
-
-
-RUNTIME_FUNCTION(ToBooleanIC_Miss) {
- TimerEventScope<TimerEventIcMiss> timer(isolate);
- DCHECK(args.length() == 1);
- HandleScope scope(isolate);
- Handle<Object> object = args.at<Object>(0);
- ToBooleanIC ic(isolate);
- return *ic.ToBoolean(object);
-}
-
-
-static const Address IC_utilities[] = {
-#define ADDR(name) FUNCTION_ADDR(name),
- IC_UTIL_LIST(ADDR)
- NULL
-#undef ADDR
-};
-
-
-Address IC::AddressFromUtilityId(IC::UtilityId id) {
- return IC_utilities[id];
-}
-
-
-} } // namespace v8::internal
+++ /dev/null
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_IC_H_
-#define V8_IC_H_
-
-#include "src/macro-assembler.h"
-
-namespace v8 {
-namespace internal {
-
-
-const int kMaxKeyedPolymorphism = 4;
-
-
-// IC_UTIL_LIST defines all utility functions called from generated
-// inline caching code. The argument for the macro, ICU, is the function name.
-#define IC_UTIL_LIST(ICU) \
- ICU(LoadIC_Miss) \
- ICU(KeyedLoadIC_Miss) \
- ICU(CallIC_Miss) \
- ICU(CallIC_Customization_Miss) \
- ICU(StoreIC_Miss) \
- ICU(StoreIC_Slow) \
- ICU(SharedStoreIC_ExtendStorage) \
- ICU(KeyedStoreIC_Miss) \
- ICU(KeyedStoreIC_Slow) \
- /* Utilities for IC stubs. */ \
- ICU(StoreCallbackProperty) \
- ICU(LoadPropertyWithInterceptorOnly) \
- ICU(LoadPropertyWithInterceptor) \
- ICU(LoadElementWithInterceptor) \
- ICU(StorePropertyWithInterceptor) \
- ICU(CompareIC_Miss) \
- ICU(BinaryOpIC_Miss) \
- ICU(CompareNilIC_Miss) \
- ICU(Unreachable) \
- ICU(ToBooleanIC_Miss)
-//
-// IC is the base class for LoadIC, StoreIC, KeyedLoadIC, and KeyedStoreIC.
-//
-class IC {
- public:
- // The ids for utility called from the generated code.
- enum UtilityId {
- #define CONST_NAME(name) k##name,
- IC_UTIL_LIST(CONST_NAME)
- #undef CONST_NAME
- kUtilityCount
- };
-
- // Looks up the address of the named utility.
- static Address AddressFromUtilityId(UtilityId id);
-
- // Alias the inline cache state type to make the IC code more readable.
- typedef InlineCacheState State;
-
- // The IC code is either invoked with no extra frames on the stack
- // or with a single extra frame for supporting calls.
- enum FrameDepth {
- NO_EXTRA_FRAME = 0,
- EXTRA_CALL_FRAME = 1
- };
-
- // Construct the IC structure with the given number of extra
- // JavaScript frames on the stack.
- IC(FrameDepth depth, Isolate* isolate);
- virtual ~IC() {}
-
- State state() const { return state_; }
- inline Address address() const;
-
- // Compute the current IC state based on the target stub, receiver and name.
- void UpdateState(Handle<Object> receiver, Handle<Object> name);
-
- bool IsNameCompatibleWithPrototypeFailure(Handle<Object> name);
- void MarkPrototypeFailure(Handle<Object> name) {
- DCHECK(IsNameCompatibleWithPrototypeFailure(name));
- state_ = PROTOTYPE_FAILURE;
- }
-
- // If the stub contains weak maps then this function adds the stub to
- // the dependent code array of each weak map.
- static void RegisterWeakMapDependency(Handle<Code> stub);
-
- // This function is called when a weak map in the stub is dying,
- // invalidates the stub by setting maps in it to undefined.
- static void InvalidateMaps(Code* stub);
-
- // Clear the inline cache to initial state.
- static void Clear(Isolate* isolate,
- Address address,
- ConstantPoolArray* constant_pool);
-
-#ifdef DEBUG
- bool IsLoadStub() const {
- return target()->is_load_stub() || target()->is_keyed_load_stub();
- }
-
- bool IsStoreStub() const {
- return target()->is_store_stub() || target()->is_keyed_store_stub();
- }
-
- bool IsCallStub() const {
- return target()->is_call_stub();
- }
-#endif
-
- template <class TypeClass>
- static JSFunction* GetRootConstructor(TypeClass* type,
- Context* native_context);
- static inline Handle<Map> GetHandlerCacheHolder(HeapType* type,
- bool receiver_is_holder,
- Isolate* isolate,
- CacheHolderFlag* flag);
- static inline Handle<Map> GetICCacheHolder(HeapType* type, Isolate* isolate,
- CacheHolderFlag* flag);
-
- static bool IsCleared(Code* code) {
- InlineCacheState state = code->ic_state();
- return state == UNINITIALIZED || state == PREMONOMORPHIC;
- }
-
- // Utility functions to convert maps to types and back. There are two special
- // cases:
- // - The heap_number_map is used as a marker which includes heap numbers as
- // well as smis.
- // - The oddball map is only used for booleans.
- static Handle<Map> TypeToMap(HeapType* type, Isolate* isolate);
- template <class T>
- static typename T::TypeHandle MapToType(Handle<Map> map,
- typename T::Region* region);
-
- static Handle<HeapType> CurrentTypeOf(Handle<Object> object,
- Isolate* isolate);
-
- protected:
- // Get the call-site target; used for determining the state.
- Handle<Code> target() const { return target_; }
-
- Address fp() const { return fp_; }
- Address pc() const { return *pc_address_; }
- Isolate* isolate() const { return isolate_; }
-
- // Get the shared function info of the caller.
- SharedFunctionInfo* GetSharedFunctionInfo() const;
- // Get the code object of the caller.
- Code* GetCode() const;
- // Get the original (non-breakpointed) code object of the caller.
- Code* GetOriginalCode() const;
-
- // Set the call-site target.
- void set_target(Code* code) {
-#ifdef VERIFY_HEAP
- code->VerifyEmbeddedObjectsDependency();
-#endif
- SetTargetAtAddress(address(), code, constant_pool());
- target_set_ = true;
- }
-
- bool is_target_set() { return target_set_; }
-
- char TransitionMarkFromState(IC::State state);
- void TraceIC(const char* type, Handle<Object> name);
- void TraceIC(const char* type, Handle<Object> name, State old_state,
- State new_state);
-
- MaybeHandle<Object> TypeError(const char* type,
- Handle<Object> object,
- Handle<Object> key);
- MaybeHandle<Object> ReferenceError(const char* type, Handle<Name> name);
-
- // Access the target code for the given IC address.
- static inline Code* GetTargetAtAddress(Address address,
- ConstantPoolArray* constant_pool);
- static inline void SetTargetAtAddress(Address address,
- Code* target,
- ConstantPoolArray* constant_pool);
- static void OnTypeFeedbackChanged(Isolate* isolate, Address address,
- State old_state, State new_state,
- bool target_remains_ic_stub);
- static void PostPatching(Address address, Code* target, Code* old_target);
-
- // Compute the handler either by compiling or by retrieving a cached version.
- Handle<Code> ComputeHandler(LookupIterator* lookup,
- Handle<Object> value = Handle<Code>::null());
- virtual Handle<Code> CompileHandler(LookupIterator* lookup,
- Handle<Object> value,
- CacheHolderFlag cache_holder) {
- UNREACHABLE();
- return Handle<Code>::null();
- }
-
- void UpdateMonomorphicIC(Handle<Code> handler, Handle<Name> name);
- bool UpdatePolymorphicIC(Handle<Name> name, Handle<Code> code);
- void UpdateMegamorphicCache(HeapType* type, Name* name, Code* code);
-
- void CopyICToMegamorphicCache(Handle<Name> name);
- bool IsTransitionOfMonomorphicTarget(Map* source_map, Map* target_map);
- void PatchCache(Handle<Name> name, Handle<Code> code);
- Code::Kind kind() const { return kind_; }
- Code::Kind handler_kind() const {
- if (kind_ == Code::KEYED_LOAD_IC) return Code::LOAD_IC;
- DCHECK(kind_ == Code::LOAD_IC || kind_ == Code::STORE_IC ||
- kind_ == Code::KEYED_STORE_IC);
- return kind_;
- }
- virtual Handle<Code> megamorphic_stub() {
- UNREACHABLE();
- return Handle<Code>::null();
- }
-
- bool TryRemoveInvalidPrototypeDependentStub(Handle<Object> receiver,
- Handle<String> name);
-
- ExtraICState extra_ic_state() const { return extra_ic_state_; }
- void set_extra_ic_state(ExtraICState state) {
- extra_ic_state_ = state;
- }
-
- Handle<HeapType> receiver_type() { return receiver_type_; }
- void update_receiver_type(Handle<Object> receiver) {
- receiver_type_ = CurrentTypeOf(receiver, isolate_);
- }
-
- void TargetMaps(MapHandleList* list) {
- FindTargetMaps();
- for (int i = 0; i < target_maps_.length(); i++) {
- list->Add(target_maps_.at(i));
- }
- }
-
- void TargetTypes(TypeHandleList* list) {
- FindTargetMaps();
- for (int i = 0; i < target_maps_.length(); i++) {
- list->Add(IC::MapToType<HeapType>(target_maps_.at(i), isolate_));
- }
- }
-
- Map* FirstTargetMap() {
- FindTargetMaps();
- return target_maps_.length() > 0 ? *target_maps_.at(0) : NULL;
- }
-
- protected:
- void UpdateTarget() {
- target_ = handle(raw_target(), isolate_);
- }
-
- private:
- Code* raw_target() const {
- return GetTargetAtAddress(address(), constant_pool());
- }
- inline ConstantPoolArray* constant_pool() const;
- inline ConstantPoolArray* raw_constant_pool() const;
-
- void FindTargetMaps() {
- if (target_maps_set_) return;
- target_maps_set_ = true;
- if (state_ == MONOMORPHIC) {
- Map* map = target_->FindFirstMap();
- if (map != NULL) target_maps_.Add(handle(map));
- } else if (state_ != UNINITIALIZED && state_ != PREMONOMORPHIC) {
- target_->FindAllMaps(&target_maps_);
- }
- }
-
- // Frame pointer for the frame that uses (calls) the IC.
- Address fp_;
-
- // All access to the program counter of an IC structure is indirect
- // to make the code GC safe. This feature is crucial since
- // GetProperty and SetProperty are called and they in turn might
- // invoke the garbage collector.
- Address* pc_address_;
-
- Isolate* isolate_;
-
- // The constant pool of the code which originally called the IC (which might
- // be for the breakpointed copy of the original code).
- Handle<ConstantPoolArray> raw_constant_pool_;
-
- // The original code target that missed.
- Handle<Code> target_;
- bool target_set_;
- State state_;
- Code::Kind kind_;
- Handle<HeapType> receiver_type_;
- MaybeHandle<Code> maybe_handler_;
-
- ExtraICState extra_ic_state_;
- MapHandleList target_maps_;
- bool target_maps_set_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(IC);
-};
-
-
-// An IC_Utility encapsulates IC::UtilityId. It exists mainly because you
-// cannot make forward declarations to an enum.
-class IC_Utility {
- public:
- explicit IC_Utility(IC::UtilityId id)
- : address_(IC::AddressFromUtilityId(id)), id_(id) {}
-
- Address address() const { return address_; }
-
- IC::UtilityId id() const { return id_; }
- private:
- Address address_;
- IC::UtilityId id_;
-};
-
-
-class CallIC: public IC {
- public:
- enum CallType { METHOD, FUNCTION };
-
- class State V8_FINAL BASE_EMBEDDED {
- public:
- explicit State(ExtraICState extra_ic_state);
-
- State(int argc, CallType call_type)
- : argc_(argc), call_type_(call_type) {
- }
-
- ExtraICState GetExtraICState() const;
-
- static void GenerateAheadOfTime(
- Isolate*, void (*Generate)(Isolate*, const State&));
-
- int arg_count() const { return argc_; }
- CallType call_type() const { return call_type_; }
-
- bool CallAsMethod() const { return call_type_ == METHOD; }
-
- private:
- class ArgcBits: public BitField<int, 0, Code::kArgumentsBits> {};
- class CallTypeBits: public BitField<CallType, Code::kArgumentsBits, 1> {};
-
- const int argc_;
- const CallType call_type_;
- };
-
- explicit CallIC(Isolate* isolate)
- : IC(EXTRA_CALL_FRAME, isolate) {
- }
-
- void PatchMegamorphic(Handle<Object> function, Handle<FixedArray> vector,
- Handle<Smi> slot);
-
- void HandleMiss(Handle<Object> receiver,
- Handle<Object> function,
- Handle<FixedArray> vector,
- Handle<Smi> slot);
-
- // Returns true if a custom handler was installed.
- bool DoCustomHandler(Handle<Object> receiver,
- Handle<Object> function,
- Handle<FixedArray> vector,
- Handle<Smi> slot,
- const State& state);
-
- // Code generator routines.
- static Handle<Code> initialize_stub(Isolate* isolate,
- int argc,
- CallType call_type);
-
- static void Clear(Isolate* isolate, Address address, Code* target,
- ConstantPoolArray* constant_pool);
-
- private:
- inline IC::State FeedbackToState(Handle<FixedArray> vector,
- Handle<Smi> slot) const;
-};
-
-
-OStream& operator<<(OStream& os, const CallIC::State& s);
-
-
-class LoadIC: public IC {
- public:
- enum ParameterIndices {
- kReceiverIndex,
- kNameIndex,
- kParameterCount
- };
- static const Register ReceiverRegister();
- static const Register NameRegister();
-
- // With flag vector-ics, there is an additional argument. And for calls from
- // crankshaft, yet another.
- static const Register SlotRegister();
- static const Register VectorRegister();
-
- class State V8_FINAL BASE_EMBEDDED {
- public:
- explicit State(ExtraICState extra_ic_state)
- : state_(extra_ic_state) {}
-
- explicit State(ContextualMode mode)
- : state_(ContextualModeBits::encode(mode)) {}
-
- ExtraICState GetExtraICState() const { return state_; }
-
- ContextualMode contextual_mode() const {
- return ContextualModeBits::decode(state_);
- }
-
- private:
- class ContextualModeBits: public BitField<ContextualMode, 0, 1> {};
- STATIC_ASSERT(static_cast<int>(NOT_CONTEXTUAL) == 0);
-
- const ExtraICState state_;
- };
-
- static ExtraICState ComputeExtraICState(ContextualMode contextual_mode) {
- return State(contextual_mode).GetExtraICState();
- }
-
- static ContextualMode GetContextualMode(ExtraICState state) {
- return State(state).contextual_mode();
- }
-
- ContextualMode contextual_mode() const {
- return GetContextualMode(extra_ic_state());
- }
-
- explicit LoadIC(FrameDepth depth, Isolate* isolate)
- : IC(depth, isolate) {
- DCHECK(IsLoadStub());
- }
-
- // Returns if this IC is for contextual (no explicit receiver)
- // access to properties.
- bool IsUndeclaredGlobal(Handle<Object> receiver) {
- if (receiver->IsGlobalObject()) {
- return contextual_mode() == CONTEXTUAL;
- } else {
- DCHECK(contextual_mode() != CONTEXTUAL);
- return false;
- }
- }
-
- // Code generator routines.
- static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
- static void GeneratePreMonomorphic(MacroAssembler* masm) {
- GenerateMiss(masm);
- }
- static void GenerateMiss(MacroAssembler* masm);
- static void GenerateMegamorphic(MacroAssembler* masm);
- static void GenerateNormal(MacroAssembler* masm);
- static void GenerateRuntimeGetProperty(MacroAssembler* masm);
-
- static Handle<Code> initialize_stub(Isolate* isolate,
- ExtraICState extra_state);
-
- MUST_USE_RESULT MaybeHandle<Object> Load(Handle<Object> object,
- Handle<Name> name);
-
- protected:
- void set_target(Code* code) {
- // The contextual mode must be preserved across IC patching.
- DCHECK(GetContextualMode(code->extra_ic_state()) ==
- GetContextualMode(target()->extra_ic_state()));
-
- IC::set_target(code);
- }
-
- Handle<Code> slow_stub() const {
- if (kind() == Code::LOAD_IC) {
- return isolate()->builtins()->LoadIC_Slow();
- } else {
- DCHECK_EQ(Code::KEYED_LOAD_IC, kind());
- return isolate()->builtins()->KeyedLoadIC_Slow();
- }
- }
-
- virtual Handle<Code> megamorphic_stub();
-
- // Update the inline cache and the global stub cache based on the
- // lookup result.
- void UpdateCaches(LookupIterator* lookup);
-
- virtual Handle<Code> CompileHandler(LookupIterator* lookup,
- Handle<Object> unused,
- CacheHolderFlag cache_holder);
-
- private:
- virtual Handle<Code> pre_monomorphic_stub() const;
- static Handle<Code> pre_monomorphic_stub(Isolate* isolate,
- ExtraICState extra_state);
-
- Handle<Code> SimpleFieldLoad(FieldIndex index);
-
- static void Clear(Isolate* isolate,
- Address address,
- Code* target,
- ConstantPoolArray* constant_pool);
-
- friend class IC;
-};
-
-
-class KeyedLoadIC: public LoadIC {
- public:
- explicit KeyedLoadIC(FrameDepth depth, Isolate* isolate)
- : LoadIC(depth, isolate) {
- DCHECK(target()->is_keyed_load_stub());
- }
-
- MUST_USE_RESULT MaybeHandle<Object> Load(Handle<Object> object,
- Handle<Object> key);
-
- // Code generator routines.
- static void GenerateMiss(MacroAssembler* masm);
- static void GenerateRuntimeGetProperty(MacroAssembler* masm);
- static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
- static void GeneratePreMonomorphic(MacroAssembler* masm) {
- GenerateMiss(masm);
- }
- static void GenerateGeneric(MacroAssembler* masm);
- static void GenerateString(MacroAssembler* masm);
- static void GenerateIndexedInterceptor(MacroAssembler* masm);
- static void GenerateSloppyArguments(MacroAssembler* masm);
-
- // Bit mask to be tested against bit field for the cases when
- // generic stub should go into slow case.
- // Access check is necessary explicitly since generic stub does not perform
- // map checks.
- static const int kSlowCaseBitFieldMask =
- (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor);
-
- static Handle<Code> generic_stub(Isolate* isolate);
- static Handle<Code> pre_monomorphic_stub(Isolate* isolate);
-
- protected:
- Handle<Code> LoadElementStub(Handle<JSObject> receiver);
- virtual Handle<Code> pre_monomorphic_stub() const {
- return pre_monomorphic_stub(isolate());
- }
-
- private:
- Handle<Code> generic_stub() const { return generic_stub(isolate()); }
- Handle<Code> indexed_interceptor_stub() {
- return isolate()->builtins()->KeyedLoadIC_IndexedInterceptor();
- }
- Handle<Code> sloppy_arguments_stub() {
- return isolate()->builtins()->KeyedLoadIC_SloppyArguments();
- }
- Handle<Code> string_stub() {
- return isolate()->builtins()->KeyedLoadIC_String();
- }
-
- static void Clear(Isolate* isolate,
- Address address,
- Code* target,
- ConstantPoolArray* constant_pool);
-
- friend class IC;
-};
-
-
-class StoreIC: public IC {
- public:
- class StrictModeState: public BitField<StrictMode, 1, 1> {};
- static ExtraICState ComputeExtraICState(StrictMode flag) {
- return StrictModeState::encode(flag);
- }
- static StrictMode GetStrictMode(ExtraICState state) {
- return StrictModeState::decode(state);
- }
-
- // For convenience, a statically declared encoding of strict mode extra
- // IC state.
- static const ExtraICState kStrictModeState =
- 1 << StrictModeState::kShift;
-
- enum ParameterIndices {
- kReceiverIndex,
- kNameIndex,
- kValueIndex,
- kParameterCount
- };
- static const Register ReceiverRegister();
- static const Register NameRegister();
- static const Register ValueRegister();
-
- StoreIC(FrameDepth depth, Isolate* isolate)
- : IC(depth, isolate) {
- DCHECK(IsStoreStub());
- }
-
- StrictMode strict_mode() const {
- return StrictModeState::decode(extra_ic_state());
- }
-
- // Code generators for stub routines. Only called once at startup.
- static void GenerateSlow(MacroAssembler* masm);
- static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
- static void GeneratePreMonomorphic(MacroAssembler* masm) {
- GenerateMiss(masm);
- }
- static void GenerateMiss(MacroAssembler* masm);
- static void GenerateMegamorphic(MacroAssembler* masm);
- static void GenerateNormal(MacroAssembler* masm);
- static void GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictMode strict_mode);
-
- static Handle<Code> initialize_stub(Isolate* isolate,
- StrictMode strict_mode);
-
- MUST_USE_RESULT MaybeHandle<Object> Store(
- Handle<Object> object,
- Handle<Name> name,
- Handle<Object> value,
- JSReceiver::StoreFromKeyed store_mode =
- JSReceiver::CERTAINLY_NOT_STORE_FROM_KEYED);
-
- bool LookupForWrite(LookupIterator* it, Handle<Object> value,
- JSReceiver::StoreFromKeyed store_mode);
-
- protected:
- virtual Handle<Code> megamorphic_stub();
-
- // Stub accessors.
- virtual Handle<Code> generic_stub() const;
-
- virtual Handle<Code> slow_stub() const {
- return isolate()->builtins()->StoreIC_Slow();
- }
-
- virtual Handle<Code> pre_monomorphic_stub() const {
- return pre_monomorphic_stub(isolate(), strict_mode());
- }
-
- static Handle<Code> pre_monomorphic_stub(Isolate* isolate,
- StrictMode strict_mode);
-
- // Update the inline cache and the global stub cache based on the
- // lookup result.
- void UpdateCaches(LookupIterator* lookup, Handle<Object> value,
- JSReceiver::StoreFromKeyed store_mode);
- virtual Handle<Code> CompileHandler(LookupIterator* lookup,
- Handle<Object> value,
- CacheHolderFlag cache_holder);
-
- private:
- void set_target(Code* code) {
- // Strict mode must be preserved across IC patching.
- DCHECK(GetStrictMode(code->extra_ic_state()) ==
- GetStrictMode(target()->extra_ic_state()));
- IC::set_target(code);
- }
-
- static void Clear(Isolate* isolate,
- Address address,
- Code* target,
- ConstantPoolArray* constant_pool);
-
- friend class IC;
-};
-
-
-enum KeyedStoreCheckMap {
- kDontCheckMap,
- kCheckMap
-};
-
-
-enum KeyedStoreIncrementLength {
- kDontIncrementLength,
- kIncrementLength
-};
-
-
-class KeyedStoreIC: public StoreIC {
- public:
- // ExtraICState bits (building on IC)
- // ExtraICState bits
- class ExtraICStateKeyedAccessStoreMode:
- public BitField<KeyedAccessStoreMode, 2, 4> {}; // NOLINT
-
- static ExtraICState ComputeExtraICState(StrictMode flag,
- KeyedAccessStoreMode mode) {
- return StrictModeState::encode(flag) |
- ExtraICStateKeyedAccessStoreMode::encode(mode);
- }
-
- static KeyedAccessStoreMode GetKeyedAccessStoreMode(
- ExtraICState extra_state) {
- return ExtraICStateKeyedAccessStoreMode::decode(extra_state);
- }
-
- // The map register isn't part of the normal call specification, but
- // ElementsTransitionAndStoreStub, used in polymorphic keyed store
- // stub implementations requires it to be initialized.
- static const Register MapRegister();
-
- KeyedStoreIC(FrameDepth depth, Isolate* isolate)
- : StoreIC(depth, isolate) {
- DCHECK(target()->is_keyed_store_stub());
- }
-
- MUST_USE_RESULT MaybeHandle<Object> Store(Handle<Object> object,
- Handle<Object> name,
- Handle<Object> value);
-
- // Code generators for stub routines. Only called once at startup.
- static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
- static void GeneratePreMonomorphic(MacroAssembler* masm) {
- GenerateMiss(masm);
- }
- static void GenerateMiss(MacroAssembler* masm);
- static void GenerateSlow(MacroAssembler* masm);
- static void GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictMode strict_mode);
- static void GenerateGeneric(MacroAssembler* masm, StrictMode strict_mode);
- static void GenerateSloppyArguments(MacroAssembler* masm);
-
- protected:
- virtual Handle<Code> pre_monomorphic_stub() const {
- return pre_monomorphic_stub(isolate(), strict_mode());
- }
- static Handle<Code> pre_monomorphic_stub(Isolate* isolate,
- StrictMode strict_mode) {
- if (strict_mode == STRICT) {
- return isolate->builtins()->KeyedStoreIC_PreMonomorphic_Strict();
- } else {
- return isolate->builtins()->KeyedStoreIC_PreMonomorphic();
- }
- }
- virtual Handle<Code> slow_stub() const {
- return isolate()->builtins()->KeyedStoreIC_Slow();
- }
- virtual Handle<Code> megamorphic_stub() {
- if (strict_mode() == STRICT) {
- return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
- } else {
- return isolate()->builtins()->KeyedStoreIC_Generic();
- }
- }
-
- Handle<Code> StoreElementStub(Handle<JSObject> receiver,
- KeyedAccessStoreMode store_mode);
-
- private:
- void set_target(Code* code) {
- // Strict mode must be preserved across IC patching.
- DCHECK(GetStrictMode(code->extra_ic_state()) == strict_mode());
- IC::set_target(code);
- }
-
- // Stub accessors.
- virtual Handle<Code> generic_stub() const {
- if (strict_mode() == STRICT) {
- return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
- } else {
- return isolate()->builtins()->KeyedStoreIC_Generic();
- }
- }
-
- Handle<Code> sloppy_arguments_stub() {
- return isolate()->builtins()->KeyedStoreIC_SloppyArguments();
- }
-
- static void Clear(Isolate* isolate,
- Address address,
- Code* target,
- ConstantPoolArray* constant_pool);
-
- KeyedAccessStoreMode GetStoreMode(Handle<JSObject> receiver,
- Handle<Object> key,
- Handle<Object> value);
-
- Handle<Map> ComputeTransitionedMap(Handle<Map> map,
- KeyedAccessStoreMode store_mode);
-
- friend class IC;
-};
-
-
-// Mode to overwrite BinaryExpression values.
-enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT };
-
-// Type Recording BinaryOpIC, that records the types of the inputs and outputs.
-class BinaryOpIC: public IC {
- public:
- class State V8_FINAL BASE_EMBEDDED {
- public:
- State(Isolate* isolate, ExtraICState extra_ic_state);
-
- State(Isolate* isolate, Token::Value op, OverwriteMode mode)
- : op_(op), mode_(mode), left_kind_(NONE), right_kind_(NONE),
- result_kind_(NONE), isolate_(isolate) {
- DCHECK_LE(FIRST_TOKEN, op);
- DCHECK_LE(op, LAST_TOKEN);
- }
-
- InlineCacheState GetICState() const {
- if (Max(left_kind_, right_kind_) == NONE) {
- return ::v8::internal::UNINITIALIZED;
- }
- if (Max(left_kind_, right_kind_) == GENERIC) {
- return ::v8::internal::MEGAMORPHIC;
- }
- if (Min(left_kind_, right_kind_) == GENERIC) {
- return ::v8::internal::GENERIC;
- }
- return ::v8::internal::MONOMORPHIC;
- }
-
- ExtraICState GetExtraICState() const;
-
- static void GenerateAheadOfTime(
- Isolate*, void (*Generate)(Isolate*, const State&));
-
- bool CanReuseDoubleBox() const {
- return (result_kind_ > SMI && result_kind_ <= NUMBER) &&
- ((mode_ == OVERWRITE_LEFT &&
- left_kind_ > SMI && left_kind_ <= NUMBER) ||
- (mode_ == OVERWRITE_RIGHT &&
- right_kind_ > SMI && right_kind_ <= NUMBER));
- }
-
- // Returns true if the IC _could_ create allocation mementos.
- bool CouldCreateAllocationMementos() const {
- if (left_kind_ == STRING || right_kind_ == STRING) {
- DCHECK_EQ(Token::ADD, op_);
- return true;
- }
- return false;
- }
-
- // Returns true if the IC _should_ create allocation mementos.
- bool ShouldCreateAllocationMementos() const {
- return FLAG_allocation_site_pretenuring &&
- CouldCreateAllocationMementos();
- }
-
- bool HasSideEffects() const {
- return Max(left_kind_, right_kind_) == GENERIC;
- }
-
- // Returns true if the IC should enable the inline smi code (i.e. if either
- // parameter may be a smi).
- bool UseInlinedSmiCode() const {
- return KindMaybeSmi(left_kind_) || KindMaybeSmi(right_kind_);
- }
-
- static const int FIRST_TOKEN = Token::BIT_OR;
- static const int LAST_TOKEN = Token::MOD;
-
- Token::Value op() const { return op_; }
- OverwriteMode mode() const { return mode_; }
- Maybe<int> fixed_right_arg() const { return fixed_right_arg_; }
-
- Type* GetLeftType(Zone* zone) const {
- return KindToType(left_kind_, zone);
- }
- Type* GetRightType(Zone* zone) const {
- return KindToType(right_kind_, zone);
- }
- Type* GetResultType(Zone* zone) const;
-
- void Update(Handle<Object> left,
- Handle<Object> right,
- Handle<Object> result);
-
- Isolate* isolate() const { return isolate_; }
-
- private:
- friend OStream& operator<<(OStream& os, const BinaryOpIC::State& s);
-
- enum Kind { NONE, SMI, INT32, NUMBER, STRING, GENERIC };
-
- Kind UpdateKind(Handle<Object> object, Kind kind) const;
-
- static const char* KindToString(Kind kind);
- static Type* KindToType(Kind kind, Zone* zone);
- static bool KindMaybeSmi(Kind kind) {
- return (kind >= SMI && kind <= NUMBER) || kind == GENERIC;
- }
-
- // We truncate the last bit of the token.
- STATIC_ASSERT(LAST_TOKEN - FIRST_TOKEN < (1 << 4));
- class OpField: public BitField<int, 0, 4> {};
- class OverwriteModeField: public BitField<OverwriteMode, 4, 2> {};
- class ResultKindField: public BitField<Kind, 6, 3> {};
- class LeftKindField: public BitField<Kind, 9, 3> {};
- // When fixed right arg is set, we don't need to store the right kind.
- // Thus the two fields can overlap.
- class HasFixedRightArgField: public BitField<bool, 12, 1> {};
- class FixedRightArgValueField: public BitField<int, 13, 4> {};
- class RightKindField: public BitField<Kind, 13, 3> {};
-
- Token::Value op_;
- OverwriteMode mode_;
- Kind left_kind_;
- Kind right_kind_;
- Kind result_kind_;
- Maybe<int> fixed_right_arg_;
- Isolate* isolate_;
- };
-
- explicit BinaryOpIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) { }
-
- static Builtins::JavaScript TokenToJSBuiltin(Token::Value op);
-
- MaybeHandle<Object> Transition(Handle<AllocationSite> allocation_site,
- Handle<Object> left,
- Handle<Object> right) V8_WARN_UNUSED_RESULT;
-};
-
-
-OStream& operator<<(OStream& os, const BinaryOpIC::State& s);
-
-
-class CompareIC: public IC {
- public:
- // The type/state lattice is defined by the following inequations:
- // UNINITIALIZED < ...
- // ... < GENERIC
- // SMI < NUMBER
- // INTERNALIZED_STRING < STRING
- // KNOWN_OBJECT < OBJECT
- enum State {
- UNINITIALIZED,
- SMI,
- NUMBER,
- STRING,
- INTERNALIZED_STRING,
- UNIQUE_NAME, // Symbol or InternalizedString
- OBJECT, // JSObject
- KNOWN_OBJECT, // JSObject with specific map (faster check)
- GENERIC
- };
-
- static State NewInputState(State old_state, Handle<Object> value);
-
- static Type* StateToType(Zone* zone,
- State state,
- Handle<Map> map = Handle<Map>());
-
- static void StubInfoToType(uint32_t stub_key, Type** left_type,
- Type** right_type, Type** overall_type,
- Handle<Map> map, Zone* zone);
-
- CompareIC(Isolate* isolate, Token::Value op)
- : IC(EXTRA_CALL_FRAME, isolate), op_(op) { }
-
- // Update the inline cache for the given operands.
- Code* UpdateCaches(Handle<Object> x, Handle<Object> y);
-
-
- // Factory method for getting an uninitialized compare stub.
- static Handle<Code> GetUninitialized(Isolate* isolate, Token::Value op);
-
- // Helper function for computing the condition for a compare operation.
- static Condition ComputeCondition(Token::Value op);
-
- static const char* GetStateName(State state);
-
- private:
- static bool HasInlinedSmiCode(Address address);
-
- State TargetState(State old_state,
- State old_left,
- State old_right,
- bool has_inlined_smi_code,
- Handle<Object> x,
- Handle<Object> y);
-
- bool strict() const { return op_ == Token::EQ_STRICT; }
- Condition GetCondition() const { return ComputeCondition(op_); }
-
- static Code* GetRawUninitialized(Isolate* isolate, Token::Value op);
-
- static void Clear(Isolate* isolate,
- Address address,
- Code* target,
- ConstantPoolArray* constant_pool);
-
- Token::Value op_;
-
- friend class IC;
-};
-
-
-class CompareNilIC: public IC {
- public:
- explicit CompareNilIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) {}
-
- Handle<Object> CompareNil(Handle<Object> object);
-
- static Handle<Code> GetUninitialized();
-
- static void Clear(Address address,
- Code* target,
- ConstantPoolArray* constant_pool);
-
- static Handle<Object> DoCompareNilSlow(Isolate* isolate, NilValue nil,
- Handle<Object> object);
-};
-
-
-class ToBooleanIC: public IC {
- public:
- explicit ToBooleanIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) { }
-
- Handle<Object> ToBoolean(Handle<Object> object);
-};
-
-
-// Helper for BinaryOpIC and CompareIC.
-enum InlinedSmiCheck { ENABLE_INLINED_SMI_CHECK, DISABLE_INLINED_SMI_CHECK };
-void PatchInlinedSmiCode(Address address, InlinedSmiCheck check);
-
-DECLARE_RUNTIME_FUNCTION(KeyedLoadIC_MissFromStubFailure);
-DECLARE_RUNTIME_FUNCTION(KeyedStoreIC_MissFromStubFailure);
-DECLARE_RUNTIME_FUNCTION(UnaryOpIC_Miss);
-DECLARE_RUNTIME_FUNCTION(StoreIC_MissFromStubFailure);
-DECLARE_RUNTIME_FUNCTION(ElementsTransitionAndStoreIC_Miss);
-DECLARE_RUNTIME_FUNCTION(BinaryOpIC_Miss);
-DECLARE_RUNTIME_FUNCTION(BinaryOpIC_MissWithAllocationSite);
-DECLARE_RUNTIME_FUNCTION(CompareNilIC_Miss);
-DECLARE_RUNTIME_FUNCTION(ToBooleanIC_Miss);
-
-
-} } // namespace v8::internal
-
-#endif // V8_IC_H_
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_ARM
+
+#include "src/codegen.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+// ----------------------------------------------------------------------------
+// Static IC stub generators.
+//
+
+#define __ ACCESS_MASM(masm)
+
+
+static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
+ Label* global_object) {
+ // Register usage:
+ // type: holds the receiver instance type on entry.
+ __ cmp(type, Operand(JS_GLOBAL_OBJECT_TYPE));
+ __ b(eq, global_object);
+ __ cmp(type, Operand(JS_BUILTINS_OBJECT_TYPE));
+ __ b(eq, global_object);
+ __ cmp(type, Operand(JS_GLOBAL_PROXY_TYPE));
+ __ b(eq, global_object);
+}
+
+
+// Helper function used from LoadIC GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+// label is done.
+// name: Property name. It is not clobbered if a jump to the miss label is
+// done
+// result: Register for the result. It is only updated if a jump to the miss
+// label is not done. Can be the same as elements or name clobbering
+// one of these in the case of not jumping to the miss label.
+// The two scratch registers need to be different from elements, name and
+// result.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
+ Register elements, Register name,
+ Register result, Register scratch1,
+ Register scratch2) {
+ // Main use of the scratch registers.
+ // scratch1: Used as temporary and to hold the capacity of the property
+ // dictionary.
+ // scratch2: Used as temporary.
+ Label done;
+
+ // Probe the dictionary.
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
+ name, scratch1, scratch2);
+
+ // If probing finds an entry check that the value is a normal
+ // property.
+ __ bind(&done); // scratch2 == elements + 4 * index
+ const int kElementsStartOffset =
+ NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+ const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+ __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
+ __ tst(scratch1, Operand(PropertyDetails::TypeField::kMask << kSmiTagSize));
+ __ b(ne, miss);
+
+ // Get the value at the masked, scaled index and return.
+ __ ldr(result,
+ FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
+}
+
+
+// Helper function used from StoreIC::GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+// label is done.
+// name: Property name. It is not clobbered if a jump to the miss label is
+// done
+// value: The value to store.
+// The two scratch registers need to be different from elements, name and
+// result.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
+ Register elements, Register name,
+ Register value, Register scratch1,
+ Register scratch2) {
+ // Main use of the scratch registers.
+ // scratch1: Used as temporary and to hold the capacity of the property
+ // dictionary.
+ // scratch2: Used as temporary.
+ Label done;
+
+ // Probe the dictionary.
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
+ name, scratch1, scratch2);
+
+ // If probing finds an entry in the dictionary check that the value
+ // is a normal property that is not read only.
+ __ bind(&done); // scratch2 == elements + 4 * index
+ const int kElementsStartOffset =
+ NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+ const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+ const int kTypeAndReadOnlyMask =
+ (PropertyDetails::TypeField::kMask |
+ PropertyDetails::AttributesField::encode(READ_ONLY))
+ << kSmiTagSize;
+ __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
+ __ tst(scratch1, Operand(kTypeAndReadOnlyMask));
+ __ b(ne, miss);
+
+ // Store the value at the masked, scaled index and return.
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ add(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
+ __ str(value, MemOperand(scratch2));
+
+ // Update the write barrier. Make sure not to clobber the value.
+ __ mov(scratch1, value);
+ __ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
+}
+
+
+// Checks the receiver for special cases (value type, slow case bits).
+// Falls through for regular JS object.
+static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
+ Register receiver, Register map,
+ Register scratch,
+ int interceptor_bit, Label* slow) {
+ // Check that the object isn't a smi.
+ __ JumpIfSmi(receiver, slow);
+ // Get the map of the receiver.
+ __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ // Check bit field.
+ __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ tst(scratch,
+ Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
+ __ b(ne, slow);
+ // Check that the object is some kind of JS object EXCEPT JS Value type.
+ // In the case that the object is a value-wrapper object,
+ // we enter the runtime system to make sure that indexing into string
+ // objects work as intended.
+ DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
+ __ ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ __ cmp(scratch, Operand(JS_OBJECT_TYPE));
+ __ b(lt, slow);
+}
+
+
+// Loads an indexed element from a fast case array.
+// If not_fast_array is NULL, doesn't perform the elements map check.
+static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
+ Register key, Register elements,
+ Register scratch1, Register scratch2,
+ Register result, Label* not_fast_array,
+ Label* out_of_range) {
+ // Register use:
+ //
+ // receiver - holds the receiver on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // key - holds the smi key on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // elements - holds the elements of the receiver on exit.
+ //
+ // result - holds the result on exit if the load succeeded.
+ // Allowed to be the the same as 'receiver' or 'key'.
+ // Unchanged on bailout so 'receiver' and 'key' can be safely
+ // used by further computation.
+ //
+ // Scratch registers:
+ //
+ // scratch1 - used to hold elements map and elements length.
+ // Holds the elements map if not_fast_array branch is taken.
+ //
+ // scratch2 - used to hold the loaded value.
+
+ __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ if (not_fast_array != NULL) {
+ // Check that the object is in fast mode and writable.
+ __ ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
+ __ cmp(scratch1, ip);
+ __ b(ne, not_fast_array);
+ } else {
+ __ AssertFastElements(elements);
+ }
+ // Check that the key (index) is within bounds.
+ __ ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ cmp(key, Operand(scratch1));
+ __ b(hs, out_of_range);
+ // Fast case: Do the load.
+ __ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ ldr(scratch2, MemOperand::PointerAddressFromSmiKey(scratch1, key));
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ cmp(scratch2, ip);
+ // In case the loaded value is the_hole we have to consult GetProperty
+ // to ensure the prototype chain is searched.
+ __ b(eq, out_of_range);
+ __ mov(result, scratch2);
+}
+
+
+// Checks whether a key is an array index string or a unique name.
+// Falls through if a key is a unique name.
+static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
+ Register map, Register hash,
+ Label* index_string, Label* not_unique) {
+ // The key is not a smi.
+ Label unique;
+ // Is it a name?
+ __ CompareObjectType(key, map, hash, LAST_UNIQUE_NAME_TYPE);
+ __ b(hi, not_unique);
+ STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
+ __ b(eq, &unique);
+
+ // Is the string an array index, with cached numeric value?
+ __ ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
+ __ tst(hash, Operand(Name::kContainsCachedArrayIndexMask));
+ __ b(eq, index_string);
+
+ // Is the string internalized? We know it's a string, so a single
+ // bit test is enough.
+ // map: key map
+ __ ldrb(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kInternalizedTag == 0);
+ __ tst(hash, Operand(kIsNotInternalizedMask));
+ __ b(ne, not_unique);
+
+ __ bind(&unique);
+}
+
+
+void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
+ // The return address is in lr.
+ Register receiver = ReceiverRegister();
+ Register name = NameRegister();
+ DCHECK(receiver.is(r1));
+ DCHECK(name.is(r2));
+
+ // Probe the stub cache.
+ Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
+ Code::ComputeHandlerFlags(Code::LOAD_IC));
+ masm->isolate()->stub_cache()->GenerateProbe(masm, flags, receiver, name, r3,
+ r4, r5, r6);
+
+ // Cache miss: Jump to runtime.
+ GenerateMiss(masm);
+}
+
+
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
+ Register dictionary = r0;
+ DCHECK(!dictionary.is(ReceiverRegister()));
+ DCHECK(!dictionary.is(NameRegister()));
+
+ Label slow;
+
+ __ ldr(dictionary,
+ FieldMemOperand(ReceiverRegister(), JSObject::kPropertiesOffset));
+ GenerateDictionaryLoad(masm, &slow, dictionary, NameRegister(), r0, r3, r4);
+ __ Ret();
+
+ // Dictionary load failed, go slow (but don't miss).
+ __ bind(&slow);
+ GenerateRuntimeGetProperty(masm);
+}
+
+
+// A register that isn't one of the parameters to the load ic.
+static const Register LoadIC_TempRegister() { return r3; }
+
+
+void LoadIC::GenerateMiss(MacroAssembler* masm) {
+ // The return address is in lr.
+ Isolate* isolate = masm->isolate();
+
+ __ IncrementCounter(isolate->counters()->load_miss(), 1, r3, r4);
+
+ __ mov(LoadIC_TempRegister(), ReceiverRegister());
+ __ Push(LoadIC_TempRegister(), NameRegister());
+
+ // Perform tail call to the entry.
+ ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+ // The return address is in lr.
+
+ __ mov(LoadIC_TempRegister(), ReceiverRegister());
+ __ Push(LoadIC_TempRegister(), NameRegister());
+
+ __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
+}
+
+
+static MemOperand GenerateMappedArgumentsLookup(
+ MacroAssembler* masm, Register object, Register key, Register scratch1,
+ Register scratch2, Register scratch3, Label* unmapped_case,
+ Label* slow_case) {
+ Heap* heap = masm->isolate()->heap();
+
+ // Check that the receiver is a JSObject. Because of the map check
+ // later, we do not need to check for interceptors or whether it
+ // requires access checks.
+ __ JumpIfSmi(object, slow_case);
+ // Check that the object is some kind of JSObject.
+ __ CompareObjectType(object, scratch1, scratch2, FIRST_JS_RECEIVER_TYPE);
+ __ b(lt, slow_case);
+
+ // Check that the key is a positive smi.
+ __ tst(key, Operand(0x80000001));
+ __ b(ne, slow_case);
+
+ // Load the elements into scratch1 and check its map.
+ Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
+ __ ldr(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
+ __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
+
+ // Check if element is in the range of mapped arguments. If not, jump
+ // to the unmapped lookup with the parameter map in scratch1.
+ __ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
+ __ sub(scratch2, scratch2, Operand(Smi::FromInt(2)));
+ __ cmp(key, Operand(scratch2));
+ __ b(cs, unmapped_case);
+
+ // Load element index and check whether it is the hole.
+ const int kOffset =
+ FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
+
+ __ mov(scratch3, Operand(kPointerSize >> 1));
+ __ mul(scratch3, key, scratch3);
+ __ add(scratch3, scratch3, Operand(kOffset));
+
+ __ ldr(scratch2, MemOperand(scratch1, scratch3));
+ __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
+ __ cmp(scratch2, scratch3);
+ __ b(eq, unmapped_case);
+
+ // Load value from context and return it. We can reuse scratch1 because
+ // we do not jump to the unmapped lookup (which requires the parameter
+ // map in scratch1).
+ __ ldr(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+ __ mov(scratch3, Operand(kPointerSize >> 1));
+ __ mul(scratch3, scratch2, scratch3);
+ __ add(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag));
+ return MemOperand(scratch1, scratch3);
+}
+
+
+static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
+ Register key,
+ Register parameter_map,
+ Register scratch,
+ Label* slow_case) {
+ // Element is in arguments backing store, which is referenced by the
+ // second element of the parameter_map. The parameter_map register
+ // must be loaded with the parameter map of the arguments object and is
+ // overwritten.
+ const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
+ Register backing_store = parameter_map;
+ __ ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
+ Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
+ __ CheckMap(backing_store, scratch, fixed_array_map, slow_case,
+ DONT_DO_SMI_CHECK);
+ __ ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
+ __ cmp(key, Operand(scratch));
+ __ b(cs, slow_case);
+ __ mov(scratch, Operand(kPointerSize >> 1));
+ __ mul(scratch, key, scratch);
+ __ add(scratch, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ return MemOperand(backing_store, scratch);
+}
+
+
+void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
+ // The return address is in lr.
+ Register receiver = ReceiverRegister();
+ Register key = NameRegister();
+ DCHECK(receiver.is(r1));
+ DCHECK(key.is(r2));
+
+ Label slow, notin;
+ MemOperand mapped_location = GenerateMappedArgumentsLookup(
+ masm, receiver, key, r0, r3, r4, ¬in, &slow);
+ __ ldr(r0, mapped_location);
+ __ Ret();
+ __ bind(¬in);
+ // The unmapped lookup expects that the parameter map is in r0.
+ MemOperand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, key, r0, r3, &slow);
+ __ ldr(r0, unmapped_location);
+ __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
+ __ cmp(r0, r3);
+ __ b(eq, &slow);
+ __ Ret();
+ __ bind(&slow);
+ GenerateMiss(masm);
+}
+
+
+void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
+ Register receiver = ReceiverRegister();
+ Register key = NameRegister();
+ Register value = ValueRegister();
+ DCHECK(receiver.is(r1));
+ DCHECK(key.is(r2));
+ DCHECK(value.is(r0));
+
+ Label slow, notin;
+ MemOperand mapped_location = GenerateMappedArgumentsLookup(
+ masm, receiver, key, r3, r4, r5, ¬in, &slow);
+ __ str(value, mapped_location);
+ __ add(r6, r3, r5);
+ __ mov(r9, value);
+ __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
+ __ Ret();
+ __ bind(¬in);
+ // The unmapped lookup expects that the parameter map is in r3.
+ MemOperand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, key, r3, r4, &slow);
+ __ str(value, unmapped_location);
+ __ add(r6, r3, r4);
+ __ mov(r9, value);
+ __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
+ __ Ret();
+ __ bind(&slow);
+ GenerateMiss(masm);
+}
+
+
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
+ // The return address is in lr.
+ Isolate* isolate = masm->isolate();
+
+ __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, r3, r4);
+
+ __ Push(ReceiverRegister(), NameRegister());
+
+ // Perform tail call to the entry.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
+
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+// IC register specifications
+const Register LoadIC::ReceiverRegister() { return r1; }
+const Register LoadIC::NameRegister() { return r2; }
+
+
+const Register LoadIC::SlotRegister() {
+ DCHECK(FLAG_vector_ics);
+ return r0;
+}
+
+
+const Register LoadIC::VectorRegister() {
+ DCHECK(FLAG_vector_ics);
+ return r3;
+}
+
+
+const Register StoreIC::ReceiverRegister() { return r1; }
+const Register StoreIC::NameRegister() { return r2; }
+const Register StoreIC::ValueRegister() { return r0; }
+
+
+const Register KeyedStoreIC::MapRegister() { return r3; }
+
+
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+ // The return address is in lr.
+
+ __ Push(ReceiverRegister(), NameRegister());
+
+ __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
+}
+
+
+void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
+ // The return address is in lr.
+ Label slow, check_name, index_smi, index_name, property_array_property;
+ Label probe_dictionary, check_number_dictionary;
+
+ Register key = NameRegister();
+ Register receiver = ReceiverRegister();
+ DCHECK(key.is(r2));
+ DCHECK(receiver.is(r1));
+
+ Isolate* isolate = masm->isolate();
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(key, &check_name);
+ __ bind(&index_smi);
+ // Now the key is known to be a smi. This place is also jumped to from below
+ // where a numeric string is converted to a smi.
+
+ GenerateKeyedLoadReceiverCheck(masm, receiver, r0, r3,
+ Map::kHasIndexedInterceptor, &slow);
+
+ // Check the receiver's map to see if it has fast elements.
+ __ CheckFastElements(r0, r3, &check_number_dictionary);
+
+ GenerateFastArrayLoad(masm, receiver, key, r0, r3, r4, r0, NULL, &slow);
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, r4, r3);
+ __ Ret();
+
+ __ bind(&check_number_dictionary);
+ __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ ldr(r3, FieldMemOperand(r4, JSObject::kMapOffset));
+
+ // Check whether the elements is a number dictionary.
+ // r3: elements map
+ // r4: elements
+ __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
+ __ cmp(r3, ip);
+ __ b(ne, &slow);
+ __ SmiUntag(r0, key);
+ __ LoadFromNumberDictionary(&slow, r4, key, r0, r0, r3, r5);
+ __ Ret();
+
+ // Slow case, key and receiver still in r2 and r1.
+ __ bind(&slow);
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), 1, r4,
+ r3);
+ GenerateRuntimeGetProperty(masm);
+
+ __ bind(&check_name);
+ GenerateKeyNameCheck(masm, key, r0, r3, &index_name, &slow);
+
+ GenerateKeyedLoadReceiverCheck(masm, receiver, r0, r3,
+ Map::kHasNamedInterceptor, &slow);
+
+ // If the receiver is a fast-case object, check the keyed lookup
+ // cache. Otherwise probe the dictionary.
+ __ ldr(r3, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
+ __ cmp(r4, ip);
+ __ b(eq, &probe_dictionary);
+
+ // Load the map of the receiver, compute the keyed lookup cache hash
+ // based on 32 bits of the map pointer and the name hash.
+ __ ldr(r0, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ mov(r3, Operand(r0, ASR, KeyedLookupCache::kMapHashShift));
+ __ ldr(r4, FieldMemOperand(key, Name::kHashFieldOffset));
+ __ eor(r3, r3, Operand(r4, ASR, Name::kHashShift));
+ int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
+ __ And(r3, r3, Operand(mask));
+
+ // Load the key (consisting of map and unique name) from the cache and
+ // check for match.
+ Label load_in_object_property;
+ static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
+ Label hit_on_nth_entry[kEntriesPerBucket];
+ ExternalReference cache_keys =
+ ExternalReference::keyed_lookup_cache_keys(isolate);
+
+ __ mov(r4, Operand(cache_keys));
+ __ add(r4, r4, Operand(r3, LSL, kPointerSizeLog2 + 1));
+
+ for (int i = 0; i < kEntriesPerBucket - 1; i++) {
+ Label try_next_entry;
+ // Load map and move r4 to next entry.
+ __ ldr(r5, MemOperand(r4, kPointerSize * 2, PostIndex));
+ __ cmp(r0, r5);
+ __ b(ne, &try_next_entry);
+ __ ldr(r5, MemOperand(r4, -kPointerSize)); // Load name
+ __ cmp(key, r5);
+ __ b(eq, &hit_on_nth_entry[i]);
+ __ bind(&try_next_entry);
+ }
+
+ // Last entry: Load map and move r4 to name.
+ __ ldr(r5, MemOperand(r4, kPointerSize, PostIndex));
+ __ cmp(r0, r5);
+ __ b(ne, &slow);
+ __ ldr(r5, MemOperand(r4));
+ __ cmp(key, r5);
+ __ b(ne, &slow);
+
+ // Get field offset.
+ // r0 : receiver's map
+ // r3 : lookup cache index
+ ExternalReference cache_field_offsets =
+ ExternalReference::keyed_lookup_cache_field_offsets(isolate);
+
+ // Hit on nth entry.
+ for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
+ __ bind(&hit_on_nth_entry[i]);
+ __ mov(r4, Operand(cache_field_offsets));
+ if (i != 0) {
+ __ add(r3, r3, Operand(i));
+ }
+ __ ldr(r5, MemOperand(r4, r3, LSL, kPointerSizeLog2));
+ __ ldrb(r6, FieldMemOperand(r0, Map::kInObjectPropertiesOffset));
+ __ sub(r5, r5, r6, SetCC);
+ __ b(ge, &property_array_property);
+ if (i != 0) {
+ __ jmp(&load_in_object_property);
+ }
+ }
+
+ // Load in-object property.
+ __ bind(&load_in_object_property);
+ __ ldrb(r6, FieldMemOperand(r0, Map::kInstanceSizeOffset));
+ __ add(r6, r6, r5); // Index from start of object.
+ __ sub(receiver, receiver, Operand(kHeapObjectTag)); // Remove the heap tag.
+ __ ldr(r0, MemOperand(receiver, r6, LSL, kPointerSizeLog2));
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
+ r4, r3);
+ __ Ret();
+
+ // Load property array property.
+ __ bind(&property_array_property);
+ __ ldr(receiver, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ add(receiver, receiver, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ ldr(r0, MemOperand(receiver, r5, LSL, kPointerSizeLog2));
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
+ r4, r3);
+ __ Ret();
+
+ // Do a quick inline probe of the receiver's dictionary, if it
+ // exists.
+ __ bind(&probe_dictionary);
+ // r3: elements
+ __ ldr(r0, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
+ GenerateGlobalInstanceTypeCheck(masm, r0, &slow);
+ // Load the property to r0.
+ GenerateDictionaryLoad(masm, &slow, r3, key, r0, r5, r4);
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 1, r4,
+ r3);
+ __ Ret();
+
+ __ bind(&index_name);
+ __ IndexFromHash(r3, key);
+ // Now jump to the place where smi keys are handled.
+ __ jmp(&index_smi);
+}
+
+
+void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
+ // Return address is in lr.
+ Label miss;
+
+ Register receiver = ReceiverRegister();
+ Register index = NameRegister();
+ Register scratch = r3;
+ Register result = r0;
+ DCHECK(!scratch.is(receiver) && !scratch.is(index));
+
+ StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ &miss, // When index out of range.
+ STRING_INDEX_IS_ARRAY_INDEX);
+ char_at_generator.GenerateFast(masm);
+ __ Ret();
+
+ StubRuntimeCallHelper call_helper;
+ char_at_generator.GenerateSlow(masm, call_helper);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
+ // Return address is in lr.
+ Label slow;
+
+ Register receiver = ReceiverRegister();
+ Register key = NameRegister();
+ Register scratch1 = r3;
+ Register scratch2 = r4;
+ DCHECK(!scratch1.is(receiver) && !scratch1.is(key));
+ DCHECK(!scratch2.is(receiver) && !scratch2.is(key));
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &slow);
+
+ // Check that the key is an array index, that is Uint32.
+ __ NonNegativeSmiTst(key);
+ __ b(ne, &slow);
+
+ // Get the map of the receiver.
+ __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
+
+ // Check that it has indexed interceptor and access checks
+ // are not enabled for this object.
+ __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset));
+ __ and_(scratch2, scratch2, Operand(kSlowCaseBitFieldMask));
+ __ cmp(scratch2, Operand(1 << Map::kHasIndexedInterceptor));
+ __ b(ne, &slow);
+
+ // Everything is fine, call runtime.
+ __ Push(receiver, key); // Receiver, key.
+
+ // Perform tail call to the entry.
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(kLoadElementWithInterceptor),
+ masm->isolate()),
+ 2, 1);
+
+ __ bind(&slow);
+ GenerateMiss(masm);
+}
+
+
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
+ // Push receiver, key and value for runtime call.
+ __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void StoreIC::GenerateSlow(MacroAssembler* masm) {
+ // Push receiver, key and value for runtime call.
+ __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
+
+ // The slow case calls into the runtime to complete the store without causing
+ // an IC miss that would otherwise cause a transition to the generic stub.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
+ // Push receiver, key and value for runtime call.
+ __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
+
+ // The slow case calls into the runtime to complete the store without causing
+ // an IC miss that would otherwise cause a transition to the generic stub.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictMode strict_mode) {
+ // Push receiver, key and value for runtime call.
+ __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
+
+ __ mov(r0, Operand(Smi::FromInt(strict_mode))); // Strict mode.
+ __ Push(r0);
+
+ __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
+}
+
+
+static void KeyedStoreGenerateGenericHelper(
+ MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
+ KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
+ Register value, Register key, Register receiver, Register receiver_map,
+ Register elements_map, Register elements) {
+ Label transition_smi_elements;
+ Label finish_object_store, non_double_value, transition_double_elements;
+ Label fast_double_without_map_check;
+
+ // Fast case: Do the store, could be either Object or double.
+ __ bind(fast_object);
+ Register scratch_value = r4;
+ Register address = r5;
+ if (check_map == kCheckMap) {
+ __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ cmp(elements_map,
+ Operand(masm->isolate()->factory()->fixed_array_map()));
+ __ b(ne, fast_double);
+ }
+
+ // HOLECHECK: guards "A[i] = V"
+ // We have to go to the runtime if the current value is the hole because
+ // there may be a callback on the element
+ Label holecheck_passed1;
+ __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ ldr(scratch_value,
+ MemOperand::PointerAddressFromSmiKey(address, key, PreIndex));
+ __ cmp(scratch_value, Operand(masm->isolate()->factory()->the_hole_value()));
+ __ b(ne, &holecheck_passed1);
+ __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
+ slow);
+
+ __ bind(&holecheck_passed1);
+
+ // Smi stores don't require further checks.
+ Label non_smi_value;
+ __ JumpIfNotSmi(value, &non_smi_value);
+
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ add(scratch_value, key, Operand(Smi::FromInt(1)));
+ __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ }
+ // It's irrelevant whether array is smi-only or not when writing a smi.
+ __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ str(value, MemOperand::PointerAddressFromSmiKey(address, key));
+ __ Ret();
+
+ __ bind(&non_smi_value);
+ // Escape to elements kind transition case.
+ __ CheckFastObjectElements(receiver_map, scratch_value,
+ &transition_smi_elements);
+
+ // Fast elements array, store the value to the elements backing store.
+ __ bind(&finish_object_store);
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ add(scratch_value, key, Operand(Smi::FromInt(1)));
+ __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ }
+ __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ add(address, address, Operand::PointerOffsetFromSmiKey(key));
+ __ str(value, MemOperand(address));
+ // Update write barrier for the elements array address.
+ __ mov(scratch_value, value); // Preserve the value which is returned.
+ __ RecordWrite(elements, address, scratch_value, kLRHasNotBeenSaved,
+ kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ Ret();
+
+ __ bind(fast_double);
+ if (check_map == kCheckMap) {
+ // Check for fast double array case. If this fails, call through to the
+ // runtime.
+ __ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex);
+ __ b(ne, slow);
+ }
+
+ // HOLECHECK: guards "A[i] double hole?"
+ // We have to see if the double version of the hole is present. If so
+ // go to the runtime.
+ __ add(address, elements,
+ Operand((FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32)) -
+ kHeapObjectTag));
+ __ ldr(scratch_value,
+ MemOperand(address, key, LSL, kPointerSizeLog2, PreIndex));
+ __ cmp(scratch_value, Operand(kHoleNanUpper32));
+ __ b(ne, &fast_double_without_map_check);
+ __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
+ slow);
+
+ __ bind(&fast_double_without_map_check);
+ __ StoreNumberToDoubleElements(value, key, elements, r3, d0,
+ &transition_double_elements);
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ add(scratch_value, key, Operand(Smi::FromInt(1)));
+ __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ }
+ __ Ret();
+
+ __ bind(&transition_smi_elements);
+ // Transition the array appropriately depending on the value type.
+ __ ldr(r4, FieldMemOperand(value, HeapObject::kMapOffset));
+ __ CompareRoot(r4, Heap::kHeapNumberMapRootIndex);
+ __ b(ne, &non_double_value);
+
+ // Value is a double. Transition FAST_SMI_ELEMENTS ->
+ // FAST_DOUBLE_ELEMENTS and complete the store.
+ __ LoadTransitionedArrayMapConditional(
+ FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, r4, slow);
+ AllocationSiteMode mode =
+ AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
+ receiver_map, mode, slow);
+ __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ jmp(&fast_double_without_map_check);
+
+ __ bind(&non_double_value);
+ // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
+ receiver_map, r4, slow);
+ mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
+ masm, receiver, key, value, receiver_map, mode, slow);
+ __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ jmp(&finish_object_store);
+
+ __ bind(&transition_double_elements);
+ // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
+ // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
+ // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
+ __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
+ receiver_map, r4, slow);
+ mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateDoubleToObject(
+ masm, receiver, key, value, receiver_map, mode, slow);
+ __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ jmp(&finish_object_store);
+}
+
+
+void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
+ StrictMode strict_mode) {
+ // ---------- S t a t e --------------
+ // -- r0 : value
+ // -- r1 : key
+ // -- r2 : receiver
+ // -- lr : return address
+ // -----------------------------------
+ Label slow, fast_object, fast_object_grow;
+ Label fast_double, fast_double_grow;
+ Label array, extra, check_if_double_array;
+
+ // Register usage.
+ Register value = ValueRegister();
+ Register key = NameRegister();
+ Register receiver = ReceiverRegister();
+ DCHECK(receiver.is(r1));
+ DCHECK(key.is(r2));
+ DCHECK(value.is(r0));
+ Register receiver_map = r3;
+ Register elements_map = r6;
+ Register elements = r9; // Elements array of the receiver.
+ // r4 and r5 are used as general scratch registers.
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(key, &slow);
+ // Check that the object isn't a smi.
+ __ JumpIfSmi(receiver, &slow);
+ // Get the map of the object.
+ __ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ // Check that the receiver does not require access checks and is not observed.
+ // The generic stub does not perform map checks or handle observed objects.
+ __ ldrb(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
+ __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
+ __ b(ne, &slow);
+ // Check if the object is a JS array or not.
+ __ ldrb(r4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
+ __ cmp(r4, Operand(JS_ARRAY_TYPE));
+ __ b(eq, &array);
+ // Check that the object is some kind of JSObject.
+ __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
+ __ b(lt, &slow);
+
+ // Object case: Check key against length in the elements array.
+ __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ // Check array bounds. Both the key and the length of FixedArray are smis.
+ __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ cmp(key, Operand(ip));
+ __ b(lo, &fast_object);
+
+ // Slow case, handle jump to runtime.
+ __ bind(&slow);
+ // Entry registers are intact.
+ // r0: value.
+ // r1: key.
+ // r2: receiver.
+ GenerateRuntimeSetProperty(masm, strict_mode);
+
+ // Extra capacity case: Check if there is extra capacity to
+ // perform the store and update the length. Used for adding one
+ // element to the array by writing to array[array.length].
+ __ bind(&extra);
+ // Condition code from comparing key and array length is still available.
+ __ b(ne, &slow); // Only support writing to writing to array[array.length].
+ // Check for room in the elements backing store.
+ // Both the key and the length of FixedArray are smis.
+ __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ cmp(key, Operand(ip));
+ __ b(hs, &slow);
+ __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ cmp(elements_map, Operand(masm->isolate()->factory()->fixed_array_map()));
+ __ b(ne, &check_if_double_array);
+ __ jmp(&fast_object_grow);
+
+ __ bind(&check_if_double_array);
+ __ cmp(elements_map,
+ Operand(masm->isolate()->factory()->fixed_double_array_map()));
+ __ b(ne, &slow);
+ __ jmp(&fast_double_grow);
+
+ // Array case: Get the length and the elements array from the JS
+ // array. Check that the array is in fast mode (and writable); if it
+ // is the length is always a smi.
+ __ bind(&array);
+ __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+
+ // Check the key against the length in the array.
+ __ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ cmp(key, Operand(ip));
+ __ b(hs, &extra);
+
+ KeyedStoreGenerateGenericHelper(
+ masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
+ value, key, receiver, receiver_map, elements_map, elements);
+ KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
+ &slow, kDontCheckMap, kIncrementLength, value,
+ key, receiver, receiver_map, elements_map,
+ elements);
+}
+
+
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+ Register receiver = ReceiverRegister();
+ Register name = NameRegister();
+ DCHECK(receiver.is(r1));
+ DCHECK(name.is(r2));
+ DCHECK(ValueRegister().is(r0));
+
+ // Get the receiver from the stack and probe the stub cache.
+ Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
+ Code::ComputeHandlerFlags(Code::STORE_IC));
+
+ masm->isolate()->stub_cache()->GenerateProbe(masm, flags, receiver, name, r3,
+ r4, r5, r6);
+
+ // Cache miss: Jump to runtime.
+ GenerateMiss(masm);
+}
+
+
+void StoreIC::GenerateMiss(MacroAssembler* masm) {
+ __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
+
+ // Perform tail call to the entry.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void StoreIC::GenerateNormal(MacroAssembler* masm) {
+ Label miss;
+ Register receiver = ReceiverRegister();
+ Register name = NameRegister();
+ Register value = ValueRegister();
+ Register dictionary = r3;
+ DCHECK(receiver.is(r1));
+ DCHECK(name.is(r2));
+ DCHECK(value.is(r0));
+
+ __ ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+
+ GenerateDictionaryStore(masm, &miss, dictionary, name, value, r4, r5);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->store_normal_hit(), 1, r4, r5);
+ __ Ret();
+
+ __ bind(&miss);
+ __ IncrementCounter(counters->store_normal_miss(), 1, r4, r5);
+ GenerateMiss(masm);
+}
+
+
+void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictMode strict_mode) {
+ __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
+
+ __ mov(r0, Operand(Smi::FromInt(strict_mode)));
+ __ Push(r0);
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
+}
+
+
+#undef __
+
+
+Condition CompareIC::ComputeCondition(Token::Value op) {
+ switch (op) {
+ case Token::EQ_STRICT:
+ case Token::EQ:
+ return eq;
+ case Token::LT:
+ return lt;
+ case Token::GT:
+ return gt;
+ case Token::LTE:
+ return le;
+ case Token::GTE:
+ return ge;
+ default:
+ UNREACHABLE();
+ return kNoCondition;
+ }
+}
+
+
+bool CompareIC::HasInlinedSmiCode(Address address) {
+ // The address of the instruction following the call.
+ Address cmp_instruction_address =
+ Assembler::return_address_from_call_start(address);
+
+ // If the instruction following the call is not a cmp rx, #yyy, nothing
+ // was inlined.
+ Instr instr = Assembler::instr_at(cmp_instruction_address);
+ return Assembler::IsCmpImmediate(instr);
+}
+
+
+void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
+ Address cmp_instruction_address =
+ Assembler::return_address_from_call_start(address);
+
+ // If the instruction following the call is not a cmp rx, #yyy, nothing
+ // was inlined.
+ Instr instr = Assembler::instr_at(cmp_instruction_address);
+ if (!Assembler::IsCmpImmediate(instr)) {
+ return;
+ }
+
+ // The delta to the start of the map check instruction and the
+ // condition code uses at the patched jump.
+ int delta = Assembler::GetCmpImmediateRawImmediate(instr);
+ delta += Assembler::GetCmpImmediateRegister(instr).code() * kOff12Mask;
+ // If the delta is 0 the instruction is cmp r0, #0 which also signals that
+ // nothing was inlined.
+ if (delta == 0) {
+ return;
+ }
+
+ if (FLAG_trace_ic) {
+ PrintF("[ patching ic at %p, cmp=%p, delta=%d\n", address,
+ cmp_instruction_address, delta);
+ }
+
+ Address patch_address =
+ cmp_instruction_address - delta * Instruction::kInstrSize;
+ Instr instr_at_patch = Assembler::instr_at(patch_address);
+ Instr branch_instr =
+ Assembler::instr_at(patch_address + Instruction::kInstrSize);
+ // This is patching a conditional "jump if not smi/jump if smi" site.
+ // Enabling by changing from
+ // cmp rx, rx
+ // b eq/ne, <target>
+ // to
+ // tst rx, #kSmiTagMask
+ // b ne/eq, <target>
+ // and vice-versa to be disabled again.
+ CodePatcher patcher(patch_address, 2);
+ Register reg = Assembler::GetRn(instr_at_patch);
+ if (check == ENABLE_INLINED_SMI_CHECK) {
+ DCHECK(Assembler::IsCmpRegister(instr_at_patch));
+ DCHECK_EQ(Assembler::GetRn(instr_at_patch).code(),
+ Assembler::GetRm(instr_at_patch).code());
+ patcher.masm()->tst(reg, Operand(kSmiTagMask));
+ } else {
+ DCHECK(check == DISABLE_INLINED_SMI_CHECK);
+ DCHECK(Assembler::IsTstImmediate(instr_at_patch));
+ patcher.masm()->cmp(reg, reg);
+ }
+ DCHECK(Assembler::IsBranch(branch_instr));
+ if (Assembler::GetCondition(branch_instr) == eq) {
+ patcher.EmitCondition(ne);
+ } else {
+ DCHECK(Assembler::GetCondition(branch_instr) == ne);
+ patcher.EmitCondition(eq);
+ }
+}
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_ARM
+
+#include "src/ic/ic-compiler.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
+ MacroAssembler* masm, Label* miss_label, Register receiver,
+ Handle<Name> name, Register scratch0, Register scratch1) {
+ DCHECK(name->IsUniqueName());
+ DCHECK(!receiver.is(scratch0));
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
+ __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
+
+ Label done;
+
+ const int kInterceptorOrAccessCheckNeededMask =
+ (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
+
+ // Bail out if the receiver has a named interceptor or requires access checks.
+ Register map = scratch1;
+ __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ ldrb(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ tst(scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
+ __ b(ne, miss_label);
+
+ // Check that receiver is a JSObject.
+ __ ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ __ cmp(scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ b(lt, miss_label);
+
+ // Load properties array.
+ Register properties = scratch0;
+ __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ // Check that the properties array is a dictionary.
+ __ ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset));
+ Register tmp = properties;
+ __ LoadRoot(tmp, Heap::kHashTableMapRootIndex);
+ __ cmp(map, tmp);
+ __ b(ne, miss_label);
+
+ // Restore the temporarily used register.
+ __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+
+
+ NameDictionaryLookupStub::GenerateNegativeLookup(
+ masm, miss_label, &done, receiver, properties, name, scratch1);
+ __ bind(&done);
+ __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
+ MacroAssembler* masm, int index, Register prototype, Label* miss) {
+ Isolate* isolate = masm->isolate();
+ // Get the global function with the given index.
+ Handle<JSFunction> function(
+ JSFunction::cast(isolate->native_context()->get(index)));
+
+ // Check we're still in the same context.
+ Register scratch = prototype;
+ const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
+ __ ldr(scratch, MemOperand(cp, offset));
+ __ ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+ __ ldr(scratch, MemOperand(scratch, Context::SlotOffset(index)));
+ __ Move(ip, function);
+ __ cmp(ip, scratch);
+ __ b(ne, miss);
+
+ // Load its initial map. The global functions all have initial maps.
+ __ Move(prototype, Handle<Map>(function->initial_map()));
+ // Load the prototype from the initial map.
+ __ ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
+ MacroAssembler* masm, Register receiver, Register scratch1,
+ Register scratch2, Label* miss_label) {
+ __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
+ __ mov(r0, scratch1);
+ __ Ret();
+}
+
+
+// Generate code to check that a global property cell is empty. Create
+// the property cell at compilation time if no cell exists for the
+// property.
+void PropertyHandlerCompiler::GenerateCheckPropertyCell(
+ MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
+ Register scratch, Label* miss) {
+ Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
+ DCHECK(cell->value()->IsTheHole());
+ __ mov(scratch, Operand(cell));
+ __ ldr(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ cmp(scratch, ip);
+ __ b(ne, miss);
+}
+
+
+static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
+ Register holder, Register name,
+ Handle<JSObject> holder_obj) {
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex == 1);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 2);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 3);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 4);
+ __ push(name);
+ Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
+ DCHECK(!masm->isolate()->heap()->InNewSpace(*interceptor));
+ Register scratch = name;
+ __ mov(scratch, Operand(interceptor));
+ __ push(scratch);
+ __ push(receiver);
+ __ push(holder);
+}
+
+
+static void CompileCallLoadPropertyWithInterceptor(
+ MacroAssembler* masm, Register receiver, Register holder, Register name,
+ Handle<JSObject> holder_obj, IC::UtilityId id) {
+ PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+ __ CallExternalReference(ExternalReference(IC_Utility(id), masm->isolate()),
+ NamedLoadHandlerCompiler::kInterceptorArgsLength);
+}
+
+
+// Generate call to api function.
+void PropertyHandlerCompiler::GenerateFastApiCall(
+ MacroAssembler* masm, const CallOptimization& optimization,
+ Handle<Map> receiver_map, Register receiver, Register scratch_in,
+ bool is_store, int argc, Register* values) {
+ DCHECK(!receiver.is(scratch_in));
+ __ push(receiver);
+ // Write the arguments to stack frame.
+ for (int i = 0; i < argc; i++) {
+ Register arg = values[argc - 1 - i];
+ DCHECK(!receiver.is(arg));
+ DCHECK(!scratch_in.is(arg));
+ __ push(arg);
+ }
+ DCHECK(optimization.is_simple_api_call());
+
+ // Abi for CallApiFunctionStub.
+ Register callee = r0;
+ Register call_data = r4;
+ Register holder = r2;
+ Register api_function_address = r1;
+
+ // Put holder in place.
+ CallOptimization::HolderLookup holder_lookup;
+ Handle<JSObject> api_holder =
+ optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
+ switch (holder_lookup) {
+ case CallOptimization::kHolderIsReceiver:
+ __ Move(holder, receiver);
+ break;
+ case CallOptimization::kHolderFound:
+ __ Move(holder, api_holder);
+ break;
+ case CallOptimization::kHolderNotFound:
+ UNREACHABLE();
+ break;
+ }
+
+ Isolate* isolate = masm->isolate();
+ Handle<JSFunction> function = optimization.constant_function();
+ Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+ Handle<Object> call_data_obj(api_call_info->data(), isolate);
+
+ // Put callee in place.
+ __ Move(callee, function);
+
+ bool call_data_undefined = false;
+ // Put call_data in place.
+ if (isolate->heap()->InNewSpace(*call_data_obj)) {
+ __ Move(call_data, api_call_info);
+ __ ldr(call_data, FieldMemOperand(call_data, CallHandlerInfo::kDataOffset));
+ } else if (call_data_obj->IsUndefined()) {
+ call_data_undefined = true;
+ __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
+ } else {
+ __ Move(call_data, call_data_obj);
+ }
+
+ // Put api_function_address in place.
+ Address function_address = v8::ToCData<Address>(api_call_info->callback());
+ ApiFunction fun(function_address);
+ ExternalReference::Type type = ExternalReference::DIRECT_API_CALL;
+ ExternalReference ref = ExternalReference(&fun, type, masm->isolate());
+ __ mov(api_function_address, Operand(ref));
+
+ // Jump to stub.
+ CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc);
+ __ TailCallStub(&stub);
+}
+
+
+void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
+ Handle<Code> code) {
+ __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
+ Handle<Name> name) {
+ if (!label->is_unused()) {
+ __ bind(label);
+ __ mov(this->name(), Operand(name));
+ }
+}
+
+
+// Generate StoreTransition code, value is passed in r0 register.
+// When leaving generated code after success, the receiver_reg and name_reg
+// may be clobbered. Upon branch to miss_label, the receiver and name
+// registers have their original values.
+void NamedStoreHandlerCompiler::GenerateStoreTransition(
+ Handle<Map> transition, Handle<Name> name, Register receiver_reg,
+ Register storage_reg, Register value_reg, Register scratch1,
+ Register scratch2, Register scratch3, Label* miss_label, Label* slow) {
+ // r0 : value
+ Label exit;
+
+ int descriptor = transition->LastAdded();
+ DescriptorArray* descriptors = transition->instance_descriptors();
+ PropertyDetails details = descriptors->GetDetails(descriptor);
+ Representation representation = details.representation();
+ DCHECK(!representation.IsNone());
+
+ if (details.type() == CONSTANT) {
+ Handle<Object> constant(descriptors->GetValue(descriptor), isolate());
+ __ Move(scratch1, constant);
+ __ cmp(value_reg, scratch1);
+ __ b(ne, miss_label);
+ } else if (representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_label);
+ } else if (representation.IsHeapObject()) {
+ __ JumpIfSmi(value_reg, miss_label);
+ HeapType* field_type = descriptors->GetFieldType(descriptor);
+ HeapType::Iterator<Map> it = field_type->Classes();
+ if (!it.Done()) {
+ __ ldr(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
+ Label do_store;
+ while (true) {
+ __ CompareMap(scratch1, it.Current(), &do_store);
+ it.Advance();
+ if (it.Done()) {
+ __ b(ne, miss_label);
+ break;
+ }
+ __ b(eq, &do_store);
+ }
+ __ bind(&do_store);
+ }
+ } else if (representation.IsDouble()) {
+ Label do_store, heap_number;
+ __ LoadRoot(scratch3, Heap::kMutableHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(storage_reg, scratch1, scratch2, scratch3, slow,
+ TAG_RESULT, MUTABLE);
+
+ __ JumpIfNotSmi(value_reg, &heap_number);
+ __ SmiUntag(scratch1, value_reg);
+ __ vmov(s0, scratch1);
+ __ vcvt_f64_s32(d0, s0);
+ __ jmp(&do_store);
+
+ __ bind(&heap_number);
+ __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex, miss_label,
+ DONT_DO_SMI_CHECK);
+ __ vldr(d0, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
+
+ __ bind(&do_store);
+ __ vstr(d0, FieldMemOperand(storage_reg, HeapNumber::kValueOffset));
+ }
+
+ // Stub never generated for objects that require access checks.
+ DCHECK(!transition->is_access_check_needed());
+
+ // Perform map transition for the receiver if necessary.
+ if (details.type() == FIELD &&
+ Map::cast(transition->GetBackPointer())->unused_property_fields() == 0) {
+ // The properties must be extended before we can store the value.
+ // We jump to a runtime call that extends the properties array.
+ __ push(receiver_reg);
+ __ mov(r2, Operand(transition));
+ __ Push(r2, r0);
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
+ isolate()),
+ 3, 1);
+ return;
+ }
+
+ // Update the map of the object.
+ __ mov(scratch1, Operand(transition));
+ __ str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
+
+ // Update the write barrier for the map field.
+ __ RecordWriteField(receiver_reg, HeapObject::kMapOffset, scratch1, scratch2,
+ kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+ if (details.type() == CONSTANT) {
+ DCHECK(value_reg.is(r0));
+ __ Ret();
+ return;
+ }
+
+ int index = transition->instance_descriptors()->GetFieldIndex(
+ transition->LastAdded());
+
+ // Adjust for the number of properties stored in the object. Even in the
+ // face of a transition we can use the old map here because the size of the
+ // object and the number of in-object properties is not going to change.
+ index -= transition->inobject_properties();
+
+ // TODO(verwaest): Share this code as a code stub.
+ SmiCheck smi_check =
+ representation.IsTagged() ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
+ if (index < 0) {
+ // Set the property straight into the object.
+ int offset = transition->instance_size() + (index * kPointerSize);
+ if (representation.IsDouble()) {
+ __ str(storage_reg, FieldMemOperand(receiver_reg, offset));
+ } else {
+ __ str(value_reg, FieldMemOperand(receiver_reg, offset));
+ }
+
+ if (!representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ if (!representation.IsDouble()) {
+ __ mov(storage_reg, value_reg);
+ }
+ __ RecordWriteField(receiver_reg, offset, storage_reg, scratch1,
+ kLRHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, smi_check);
+ }
+ } else {
+ // Write to the properties array.
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ // Get the properties array
+ __ ldr(scratch1,
+ FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
+ if (representation.IsDouble()) {
+ __ str(storage_reg, FieldMemOperand(scratch1, offset));
+ } else {
+ __ str(value_reg, FieldMemOperand(scratch1, offset));
+ }
+
+ if (!representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ if (!representation.IsDouble()) {
+ __ mov(storage_reg, value_reg);
+ }
+ __ RecordWriteField(scratch1, offset, storage_reg, receiver_reg,
+ kLRHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, smi_check);
+ }
+ }
+
+ // Return the value (register r0).
+ DCHECK(value_reg.is(r0));
+ __ bind(&exit);
+ __ Ret();
+}
+
+
+void NamedStoreHandlerCompiler::GenerateStoreField(LookupIterator* lookup,
+ Register value_reg,
+ Label* miss_label) {
+ DCHECK(lookup->representation().IsHeapObject());
+ __ JumpIfSmi(value_reg, miss_label);
+ HeapType::Iterator<Map> it = lookup->GetFieldType()->Classes();
+ __ ldr(scratch1(), FieldMemOperand(value_reg, HeapObject::kMapOffset));
+ Label do_store;
+ while (true) {
+ __ CompareMap(scratch1(), it.Current(), &do_store);
+ it.Advance();
+ if (it.Done()) {
+ __ b(ne, miss_label);
+ break;
+ }
+ __ b(eq, &do_store);
+ }
+ __ bind(&do_store);
+
+ StoreFieldStub stub(isolate(), lookup->GetFieldIndex(),
+ lookup->representation());
+ GenerateTailCall(masm(), stub.GetCode());
+}
+
+
+Register PropertyHandlerCompiler::CheckPrototypes(
+ Register object_reg, Register holder_reg, Register scratch1,
+ Register scratch2, Handle<Name> name, Label* miss,
+ PrototypeCheckType check) {
+ Handle<Map> receiver_map(IC::TypeToMap(*type(), isolate()));
+
+ // Make sure there's no overlap between holder and object registers.
+ DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
+ DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) &&
+ !scratch2.is(scratch1));
+
+ // Keep track of the current object in register reg.
+ Register reg = object_reg;
+ int depth = 0;
+
+ Handle<JSObject> current = Handle<JSObject>::null();
+ if (type()->IsConstant()) {
+ current = Handle<JSObject>::cast(type()->AsConstant()->Value());
+ }
+ Handle<JSObject> prototype = Handle<JSObject>::null();
+ Handle<Map> current_map = receiver_map;
+ Handle<Map> holder_map(holder()->map());
+ // Traverse the prototype chain and check the maps in the prototype chain for
+ // fast and global objects or do negative lookup for normal objects.
+ while (!current_map.is_identical_to(holder_map)) {
+ ++depth;
+
+ // Only global objects and objects that do not require access
+ // checks are allowed in stubs.
+ DCHECK(current_map->IsJSGlobalProxyMap() ||
+ !current_map->is_access_check_needed());
+
+ prototype = handle(JSObject::cast(current_map->prototype()));
+ if (current_map->is_dictionary_map() &&
+ !current_map->IsJSGlobalObjectMap()) {
+ DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
+ if (!name->IsUniqueName()) {
+ DCHECK(name->IsString());
+ name = factory()->InternalizeString(Handle<String>::cast(name));
+ }
+ DCHECK(current.is_null() ||
+ current->property_dictionary()->FindEntry(name) ==
+ NameDictionary::kNotFound);
+
+ GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
+ scratch2);
+
+ __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+ reg = holder_reg; // From now on the object will be in holder_reg.
+ __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
+ } else {
+ Register map_reg = scratch1;
+ if (depth != 1 || check == CHECK_ALL_MAPS) {
+ // CheckMap implicitly loads the map of |reg| into |map_reg|.
+ __ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK);
+ } else {
+ __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
+ }
+
+ // Check access rights to the global object. This has to happen after
+ // the map check so that we know that the object is actually a global
+ // object.
+ // This allows us to install generated handlers for accesses to the
+ // global proxy (as opposed to using slow ICs). See corresponding code
+ // in LookupForRead().
+ if (current_map->IsJSGlobalProxyMap()) {
+ __ CheckAccessGlobalProxy(reg, scratch2, miss);
+ } else if (current_map->IsJSGlobalObjectMap()) {
+ GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
+ name, scratch2, miss);
+ }
+
+ reg = holder_reg; // From now on the object will be in holder_reg.
+
+ // Two possible reasons for loading the prototype from the map:
+ // (1) Can't store references to new space in code.
+ // (2) Handler is shared for all receivers with the same prototype
+ // map (but not necessarily the same prototype instance).
+ bool load_prototype_from_map =
+ heap()->InNewSpace(*prototype) || depth == 1;
+ if (load_prototype_from_map) {
+ __ ldr(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
+ } else {
+ __ mov(reg, Operand(prototype));
+ }
+ }
+
+ // Go to the next object in the prototype chain.
+ current = prototype;
+ current_map = handle(current->map());
+ }
+
+ // Log the check depth.
+ LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
+
+ if (depth != 0 || check == CHECK_ALL_MAPS) {
+ // Check the holder map.
+ __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK);
+ }
+
+ // Perform security check for access to the global object.
+ DCHECK(current_map->IsJSGlobalProxyMap() ||
+ !current_map->is_access_check_needed());
+ if (current_map->IsJSGlobalProxyMap()) {
+ __ CheckAccessGlobalProxy(reg, scratch1, miss);
+ }
+
+ // Return the register containing the holder.
+ return reg;
+}
+
+
+void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
+ if (!miss->is_unused()) {
+ Label success;
+ __ b(&success);
+ __ bind(miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+ __ bind(&success);
+ }
+}
+
+
+void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
+ if (!miss->is_unused()) {
+ Label success;
+ __ b(&success);
+ GenerateRestoreName(miss, name);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+ __ bind(&success);
+ }
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
+ // Return the constant value.
+ __ Move(r0, value);
+ __ Ret();
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadCallback(
+ Register reg, Handle<ExecutableAccessorInfo> callback) {
+ // Build AccessorInfo::args_ list on the stack and push property name below
+ // the exit frame to make GC aware of them and store pointers to them.
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
+ DCHECK(!scratch2().is(reg));
+ DCHECK(!scratch3().is(reg));
+ DCHECK(!scratch4().is(reg));
+ __ push(receiver());
+ if (heap()->InNewSpace(callback->data())) {
+ __ Move(scratch3(), callback);
+ __ ldr(scratch3(),
+ FieldMemOperand(scratch3(), ExecutableAccessorInfo::kDataOffset));
+ } else {
+ __ Move(scratch3(), Handle<Object>(callback->data(), isolate()));
+ }
+ __ push(scratch3());
+ __ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex);
+ __ mov(scratch4(), scratch3());
+ __ Push(scratch3(), scratch4());
+ __ mov(scratch4(), Operand(ExternalReference::isolate_address(isolate())));
+ __ Push(scratch4(), reg);
+ __ mov(scratch2(), sp); // scratch2 = PropertyAccessorInfo::args_
+ __ push(name());
+
+ // Abi for CallApiGetter
+ Register getter_address_reg = r2;
+
+ Address getter_address = v8::ToCData<Address>(callback->getter());
+ ApiFunction fun(getter_address);
+ ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
+ ExternalReference ref = ExternalReference(&fun, type, isolate());
+ __ mov(getter_address_reg, Operand(ref));
+
+ CallApiGetterStub stub(isolate());
+ __ TailCallStub(&stub);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
+ LookupIterator* it, Register holder_reg) {
+ DCHECK(holder()->HasNamedInterceptor());
+ DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+
+ // Compile the interceptor call, followed by inline code to load the
+ // property from further up the prototype chain if the call fails.
+ // Check that the maps haven't changed.
+ DCHECK(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
+
+ // Preserve the receiver register explicitly whenever it is different from the
+ // holder and it is needed should the interceptor return without any result.
+ // The ACCESSOR case needs the receiver to be passed into C++ code, the FIELD
+ // case might cause a miss during the prototype check.
+ bool must_perform_prototype_check =
+ !holder().is_identical_to(it->GetHolder<JSObject>());
+ bool must_preserve_receiver_reg =
+ !receiver().is(holder_reg) &&
+ (it->property_kind() == LookupIterator::ACCESSOR ||
+ must_perform_prototype_check);
+
+ // Save necessary data before invoking an interceptor.
+ // Requires a frame to make GC aware of pushed pointers.
+ {
+ FrameAndConstantPoolScope frame_scope(masm(), StackFrame::INTERNAL);
+ if (must_preserve_receiver_reg) {
+ __ Push(receiver(), holder_reg, this->name());
+ } else {
+ __ Push(holder_reg, this->name());
+ }
+ // Invoke an interceptor. Note: map checks from receiver to
+ // interceptor's holder has been compiled before (see a caller
+ // of this method.)
+ CompileCallLoadPropertyWithInterceptor(
+ masm(), receiver(), holder_reg, this->name(), holder(),
+ IC::kLoadPropertyWithInterceptorOnly);
+
+ // Check if interceptor provided a value for property. If it's
+ // the case, return immediately.
+ Label interceptor_failed;
+ __ LoadRoot(scratch1(), Heap::kNoInterceptorResultSentinelRootIndex);
+ __ cmp(r0, scratch1());
+ __ b(eq, &interceptor_failed);
+ frame_scope.GenerateLeaveFrame();
+ __ Ret();
+
+ __ bind(&interceptor_failed);
+ __ pop(this->name());
+ __ pop(holder_reg);
+ if (must_preserve_receiver_reg) {
+ __ pop(receiver());
+ }
+ // Leave the internal frame.
+ }
+
+ GenerateLoadPostInterceptor(it, holder_reg);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
+ // Call the runtime system to load the interceptor.
+ DCHECK(holder()->HasNamedInterceptor());
+ DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+ PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
+ holder());
+
+ ExternalReference ref = ExternalReference(
+ IC_Utility(IC::kLoadPropertyWithInterceptor), isolate());
+ __ TailCallExternalReference(
+ ref, NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
+}
+
+
+Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
+ Handle<JSObject> object, Handle<Name> name,
+ Handle<ExecutableAccessorInfo> callback) {
+ Register holder_reg = Frontend(receiver(), name);
+
+ __ push(receiver()); // receiver
+ __ push(holder_reg);
+ __ mov(ip, Operand(callback)); // callback info
+ __ push(ip);
+ __ mov(ip, Operand(name));
+ __ Push(ip, value());
+
+ // Do tail-call to the runtime system.
+ ExternalReference store_callback_property =
+ ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
+ __ TailCallExternalReference(store_callback_property, 5, 1);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
+ MacroAssembler* masm, Handle<HeapType> type, Register receiver,
+ Handle<JSFunction> setter) {
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -----------------------------------
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+
+ // Save value register, so we can restore it later.
+ __ push(value());
+
+ if (!setter.is_null()) {
+ // Call the JavaScript setter with receiver and value on the stack.
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ ldr(receiver,
+ FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+ }
+ __ Push(receiver, value());
+ ParameterCount actual(1);
+ ParameterCount expected(setter);
+ __ InvokeFunction(setter, expected, actual, CALL_FUNCTION,
+ NullCallWrapper());
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // We have to return the passed value, not the return value of the setter.
+ __ pop(r0);
+
+ // Restore context register.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ }
+ __ Ret();
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
+ Handle<Name> name) {
+ __ Push(receiver(), this->name(), value());
+
+ // Do tail-call to the runtime system.
+ ExternalReference store_ic_property = ExternalReference(
+ IC_Utility(IC::kStorePropertyWithInterceptor), isolate());
+ __ TailCallExternalReference(store_ic_property, 3, 1);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+Register* PropertyAccessCompiler::load_calling_convention() {
+ // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ Register receiver = LoadIC::ReceiverRegister();
+ Register name = LoadIC::NameRegister();
+ static Register registers[] = {receiver, name, r3, r0, r4, r5};
+ return registers;
+}
+
+
+Register* PropertyAccessCompiler::store_calling_convention() {
+ // receiver, name, scratch1, scratch2, scratch3.
+ Register receiver = StoreIC::ReceiverRegister();
+ Register name = StoreIC::NameRegister();
+ DCHECK(r3.is(KeyedStoreIC::MapRegister()));
+ static Register registers[] = {receiver, name, r3, r4, r5};
+ return registers;
+}
+
+
+Register NamedStoreHandlerCompiler::value() { return StoreIC::ValueRegister(); }
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
+ MacroAssembler* masm, Handle<HeapType> type, Register receiver,
+ Handle<JSFunction> getter) {
+ // ----------- S t a t e -------------
+ // -- r0 : receiver
+ // -- r2 : name
+ // -- lr : return address
+ // -----------------------------------
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+
+ if (!getter.is_null()) {
+ // Call the JavaScript getter with the receiver on the stack.
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ ldr(receiver,
+ FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+ }
+ __ push(receiver);
+ ParameterCount actual(0);
+ ParameterCount expected(getter);
+ __ InvokeFunction(getter, expected, actual, CALL_FUNCTION,
+ NullCallWrapper());
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // Restore context register.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ }
+ __ Ret();
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
+ Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
+ Label miss;
+ FrontendHeader(receiver(), name, &miss);
+
+ // Get the value from the cell.
+ Register result = StoreIC::ValueRegister();
+ __ mov(result, Operand(cell));
+ __ ldr(result, FieldMemOperand(result, Cell::kValueOffset));
+
+ // Check for deleted property if property can actually be deleted.
+ if (is_configurable) {
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ cmp(result, ip);
+ __ b(eq, &miss);
+ }
+
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->named_load_global_stub(), 1, r1, r3);
+ __ Ret();
+
+ FrontendFooter(name, &miss);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::NORMAL, name);
+}
+
+
+Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
+ CodeHandleList* handlers,
+ Handle<Name> name,
+ Code::StubType type,
+ IcCheckType check) {
+ Label miss;
+
+ if (check == PROPERTY &&
+ (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
+ // In case we are compiling an IC for dictionary loads and stores, just
+ // check whether the name is unique.
+ if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
+ __ JumpIfNotUniqueName(this->name(), &miss);
+ } else {
+ __ cmp(this->name(), Operand(name));
+ __ b(ne, &miss);
+ }
+ }
+
+ Label number_case;
+ Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
+ __ JumpIfSmi(receiver(), smi_target);
+
+ // Polymorphic keyed stores may use the map register
+ Register map_reg = scratch1();
+ DCHECK(kind() != Code::KEYED_STORE_IC ||
+ map_reg.is(KeyedStoreIC::MapRegister()));
+
+ int receiver_count = types->length();
+ int number_of_handled_maps = 0;
+ __ ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
+ for (int current = 0; current < receiver_count; ++current) {
+ Handle<HeapType> type = types->at(current);
+ Handle<Map> map = IC::TypeToMap(*type, isolate());
+ if (!map->is_deprecated()) {
+ number_of_handled_maps++;
+ __ mov(ip, Operand(map));
+ __ cmp(map_reg, ip);
+ if (type->Is(HeapType::Number())) {
+ DCHECK(!number_case.is_unused());
+ __ bind(&number_case);
+ }
+ __ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq);
+ }
+ }
+ DCHECK(number_of_handled_maps != 0);
+
+ __ bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ // Return the generated code.
+ InlineCacheState state =
+ number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
+ return GetCode(kind(), type, name, state);
+}
+
+
+Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
+ MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
+ MapHandleList* transitioned_maps) {
+ Label miss;
+ __ JumpIfSmi(receiver(), &miss);
+
+ int receiver_count = receiver_maps->length();
+ __ ldr(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
+ for (int i = 0; i < receiver_count; ++i) {
+ __ mov(ip, Operand(receiver_maps->at(i)));
+ __ cmp(scratch1(), ip);
+ if (transitioned_maps->at(i).is_null()) {
+ __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq);
+ } else {
+ Label next_map;
+ __ b(ne, &next_map);
+ __ mov(transition_map(), Operand(transitioned_maps->at(i)));
+ __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, al);
+ __ bind(&next_map);
+ }
+ }
+
+ __ bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ // Return the generated code.
+ return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void ElementHandlerCompiler::GenerateLoadDictionaryElement(
+ MacroAssembler* masm) {
+ // The return address is in lr.
+ Label slow, miss;
+
+ Register key = LoadIC::NameRegister();
+ Register receiver = LoadIC::ReceiverRegister();
+ DCHECK(receiver.is(r1));
+ DCHECK(key.is(r2));
+
+ __ UntagAndJumpIfNotSmi(r6, key, &miss);
+ __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ LoadFromNumberDictionary(&slow, r4, key, r0, r6, r3, r5);
+ __ Ret();
+
+ __ bind(&slow);
+ __ IncrementCounter(
+ masm->isolate()->counters()->keyed_load_external_array_slow(), 1, r2, r3);
+
+ TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
+
+ // Miss case, call the runtime.
+ __ bind(&miss);
+
+ TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
+}
+
+
+#undef __
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_ARM
+
+#include "src/codegen.h"
+#include "src/ic/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
+ Code::Flags flags, StubCache::Table table,
+ Register receiver, Register name,
+ // Number of the cache entry, not scaled.
+ Register offset, Register scratch, Register scratch2,
+ Register offset_scratch) {
+ ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
+ ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
+ ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
+
+ uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
+ uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
+ uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address());
+
+ // Check the relative positions of the address fields.
+ DCHECK(value_off_addr > key_off_addr);
+ DCHECK((value_off_addr - key_off_addr) % 4 == 0);
+ DCHECK((value_off_addr - key_off_addr) < (256 * 4));
+ DCHECK(map_off_addr > key_off_addr);
+ DCHECK((map_off_addr - key_off_addr) % 4 == 0);
+ DCHECK((map_off_addr - key_off_addr) < (256 * 4));
+
+ Label miss;
+ Register base_addr = scratch;
+ scratch = no_reg;
+
+ // Multiply by 3 because there are 3 fields per entry (name, code, map).
+ __ add(offset_scratch, offset, Operand(offset, LSL, 1));
+
+ // Calculate the base address of the entry.
+ __ mov(base_addr, Operand(key_offset));
+ __ add(base_addr, base_addr, Operand(offset_scratch, LSL, kPointerSizeLog2));
+
+ // Check that the key in the entry matches the name.
+ __ ldr(ip, MemOperand(base_addr, 0));
+ __ cmp(name, ip);
+ __ b(ne, &miss);
+
+ // Check the map matches.
+ __ ldr(ip, MemOperand(base_addr, map_off_addr - key_off_addr));
+ __ ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ cmp(ip, scratch2);
+ __ b(ne, &miss);
+
+ // Get the code entry from the cache.
+ Register code = scratch2;
+ scratch2 = no_reg;
+ __ ldr(code, MemOperand(base_addr, value_off_addr - key_off_addr));
+
+ // Check that the flags match what we're looking for.
+ Register flags_reg = base_addr;
+ base_addr = no_reg;
+ __ ldr(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
+ // It's a nice optimization if this constant is encodable in the bic insn.
+
+ uint32_t mask = Code::kFlagsNotUsedInLookup;
+ DCHECK(__ ImmediateFitsAddrMode1Instruction(mask));
+ __ bic(flags_reg, flags_reg, Operand(mask));
+ __ cmp(flags_reg, Operand(flags));
+ __ b(ne, &miss);
+
+#ifdef DEBUG
+ if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+ __ jmp(&miss);
+ } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+ __ jmp(&miss);
+ }
+#endif
+
+ // Jump to the first instruction in the code stub.
+ __ add(pc, code, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ // Miss: fall through.
+ __ bind(&miss);
+}
+
+
+void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
+ Register receiver, Register name,
+ Register scratch, Register extra, Register extra2,
+ Register extra3) {
+ Isolate* isolate = masm->isolate();
+ Label miss;
+
+ // Make sure that code is valid. The multiplying code relies on the
+ // entry size being 12.
+ DCHECK(sizeof(Entry) == 12);
+
+ // Make sure the flags does not name a specific type.
+ DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
+
+ // Make sure that there are no register conflicts.
+ DCHECK(!scratch.is(receiver));
+ DCHECK(!scratch.is(name));
+ DCHECK(!extra.is(receiver));
+ DCHECK(!extra.is(name));
+ DCHECK(!extra.is(scratch));
+ DCHECK(!extra2.is(receiver));
+ DCHECK(!extra2.is(name));
+ DCHECK(!extra2.is(scratch));
+ DCHECK(!extra2.is(extra));
+
+ // Check scratch, extra and extra2 registers are valid.
+ DCHECK(!scratch.is(no_reg));
+ DCHECK(!extra.is(no_reg));
+ DCHECK(!extra2.is(no_reg));
+ DCHECK(!extra3.is(no_reg));
+
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
+ extra3);
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &miss);
+
+ // Get the map of the receiver and compute the hash.
+ __ ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
+ __ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ add(scratch, scratch, Operand(ip));
+ uint32_t mask = kPrimaryTableSize - 1;
+ // We shift out the last two bits because they are not part of the hash and
+ // they are always 01 for maps.
+ __ mov(scratch, Operand(scratch, LSR, kCacheIndexShift));
+ // Mask down the eor argument to the minimum to keep the immediate
+ // ARM-encodable.
+ __ eor(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask));
+ // Prefer and_ to ubfx here because ubfx takes 2 cycles.
+ __ and_(scratch, scratch, Operand(mask));
+
+ // Probe the primary table.
+ ProbeTable(isolate, masm, flags, kPrimary, receiver, name, scratch, extra,
+ extra2, extra3);
+
+ // Primary miss: Compute hash for secondary probe.
+ __ sub(scratch, scratch, Operand(name, LSR, kCacheIndexShift));
+ uint32_t mask2 = kSecondaryTableSize - 1;
+ __ add(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask2));
+ __ and_(scratch, scratch, Operand(mask2));
+
+ // Probe the secondary table.
+ ProbeTable(isolate, masm, flags, kSecondary, receiver, name, scratch, extra,
+ extra2, extra3);
+
+ // Cache miss: Fall-through and let caller handle the miss by
+ // entering the runtime system.
+ __ bind(&miss);
+ __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2,
+ extra3);
+}
+
+
+#undef __
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "src/codegen.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define __ ACCESS_MASM(masm)
+
+
+// "type" holds an instance type on entry and is not clobbered.
+// Generated code branch on "global_object" if type is any kind of global
+// JS object.
+static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
+ Label* global_object) {
+ __ Cmp(type, JS_GLOBAL_OBJECT_TYPE);
+ __ Ccmp(type, JS_BUILTINS_OBJECT_TYPE, ZFlag, ne);
+ __ Ccmp(type, JS_GLOBAL_PROXY_TYPE, ZFlag, ne);
+ __ B(eq, global_object);
+}
+
+
+// Helper function used from LoadIC GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+// label is done.
+// name: Property name. It is not clobbered if a jump to the miss label is
+// done
+// result: Register for the result. It is only updated if a jump to the miss
+// label is not done.
+// The scratch registers need to be different from elements, name and result.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
+ Register elements, Register name,
+ Register result, Register scratch1,
+ Register scratch2) {
+ DCHECK(!AreAliased(elements, name, scratch1, scratch2));
+ DCHECK(!AreAliased(result, scratch1, scratch2));
+
+ Label done;
+
+ // Probe the dictionary.
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
+ name, scratch1, scratch2);
+
+ // If probing finds an entry check that the value is a normal property.
+ __ Bind(&done);
+
+ static const int kElementsStartOffset =
+ NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+ static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+ __ Ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
+ __ Tst(scratch1, Smi::FromInt(PropertyDetails::TypeField::kMask));
+ __ B(ne, miss);
+
+ // Get the value at the masked, scaled index and return.
+ __ Ldr(result,
+ FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
+}
+
+
+// Helper function used from StoreIC::GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+// label is done.
+// name: Property name. It is not clobbered if a jump to the miss label is
+// done
+// value: The value to store (never clobbered).
+//
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
+ Register elements, Register name,
+ Register value, Register scratch1,
+ Register scratch2) {
+ DCHECK(!AreAliased(elements, name, value, scratch1, scratch2));
+
+ Label done;
+
+ // Probe the dictionary.
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
+ name, scratch1, scratch2);
+
+ // If probing finds an entry in the dictionary check that the value
+ // is a normal property that is not read only.
+ __ Bind(&done);
+
+ static const int kElementsStartOffset =
+ NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+ static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+ static const int kTypeAndReadOnlyMask =
+ PropertyDetails::TypeField::kMask |
+ PropertyDetails::AttributesField::encode(READ_ONLY);
+ __ Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset));
+ __ Tst(scratch1, kTypeAndReadOnlyMask);
+ __ B(ne, miss);
+
+ // Store the value at the masked, scaled index and return.
+ static const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ Add(scratch2, scratch2, kValueOffset - kHeapObjectTag);
+ __ Str(value, MemOperand(scratch2));
+
+ // Update the write barrier. Make sure not to clobber the value.
+ __ Mov(scratch1, value);
+ __ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
+}
+
+
+// Checks the receiver for special cases (value type, slow case bits).
+// Falls through for regular JS object and return the map of the
+// receiver in 'map_scratch' if the receiver is not a SMI.
+static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
+ Register receiver,
+ Register map_scratch,
+ Register scratch,
+ int interceptor_bit, Label* slow) {
+ DCHECK(!AreAliased(map_scratch, scratch));
+
+ // Check that the object isn't a smi.
+ __ JumpIfSmi(receiver, slow);
+ // Get the map of the receiver.
+ __ Ldr(map_scratch, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ // Check bit field.
+ __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kBitFieldOffset));
+ __ Tbnz(scratch, Map::kIsAccessCheckNeeded, slow);
+ __ Tbnz(scratch, interceptor_bit, slow);
+
+ // Check that the object is some kind of JS object EXCEPT JS Value type.
+ // In the case that the object is a value-wrapper object, we enter the
+ // runtime system to make sure that indexing into string objects work
+ // as intended.
+ STATIC_ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
+ __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset));
+ __ Cmp(scratch, JS_OBJECT_TYPE);
+ __ B(lt, slow);
+}
+
+
+// Loads an indexed element from a fast case array.
+// If not_fast_array is NULL, doesn't perform the elements map check.
+//
+// receiver - holds the receiver on entry.
+// Unchanged unless 'result' is the same register.
+//
+// key - holds the smi key on entry.
+// Unchanged unless 'result' is the same register.
+//
+// elements - holds the elements of the receiver on exit.
+//
+// elements_map - holds the elements map on exit if the not_fast_array branch is
+// taken. Otherwise, this is used as a scratch register.
+//
+// result - holds the result on exit if the load succeeded.
+// Allowed to be the the same as 'receiver' or 'key'.
+// Unchanged on bailout so 'receiver' and 'key' can be safely
+// used by further computation.
+static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
+ Register key, Register elements,
+ Register elements_map, Register scratch2,
+ Register result, Label* not_fast_array,
+ Label* slow) {
+ DCHECK(!AreAliased(receiver, key, elements, elements_map, scratch2));
+
+ // Check for fast array.
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ if (not_fast_array != NULL) {
+ // Check that the object is in fast mode and writable.
+ __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(elements_map, Heap::kFixedArrayMapRootIndex,
+ not_fast_array);
+ } else {
+ __ AssertFastElements(elements);
+ }
+
+ // The elements_map register is only used for the not_fast_array path, which
+ // was handled above. From this point onward it is a scratch register.
+ Register scratch1 = elements_map;
+
+ // Check that the key (index) is within bounds.
+ __ Ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ Cmp(key, scratch1);
+ __ B(hs, slow);
+
+ // Fast case: Do the load.
+ __ Add(scratch1, elements, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ SmiUntag(scratch2, key);
+ __ Ldr(scratch2, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
+
+ // In case the loaded value is the_hole we have to consult GetProperty
+ // to ensure the prototype chain is searched.
+ __ JumpIfRoot(scratch2, Heap::kTheHoleValueRootIndex, slow);
+
+ // Move the value to the result register.
+ // 'result' can alias with 'receiver' or 'key' but these two must be
+ // preserved if we jump to 'slow'.
+ __ Mov(result, scratch2);
+}
+
+
+// Checks whether a key is an array index string or a unique name.
+// Falls through if a key is a unique name.
+// The map of the key is returned in 'map_scratch'.
+// If the jump to 'index_string' is done the hash of the key is left
+// in 'hash_scratch'.
+static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
+ Register map_scratch, Register hash_scratch,
+ Label* index_string, Label* not_unique) {
+ DCHECK(!AreAliased(key, map_scratch, hash_scratch));
+
+ // Is the key a name?
+ Label unique;
+ __ JumpIfObjectType(key, map_scratch, hash_scratch, LAST_UNIQUE_NAME_TYPE,
+ not_unique, hi);
+ STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
+ __ B(eq, &unique);
+
+ // Is the string an array index with cached numeric value?
+ __ Ldr(hash_scratch.W(), FieldMemOperand(key, Name::kHashFieldOffset));
+ __ TestAndBranchIfAllClear(hash_scratch, Name::kContainsCachedArrayIndexMask,
+ index_string);
+
+ // Is the string internalized? We know it's a string, so a single bit test is
+ // enough.
+ __ Ldrb(hash_scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kInternalizedTag == 0);
+ __ TestAndBranchIfAnySet(hash_scratch, kIsNotInternalizedMask, not_unique);
+
+ __ Bind(&unique);
+ // Fall through if the key is a unique name.
+}
+
+
+// Neither 'object' nor 'key' are modified by this function.
+//
+// If the 'unmapped_case' or 'slow_case' exit is taken, the 'map' register is
+// left with the object's elements map. Otherwise, it is used as a scratch
+// register.
+static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
+ Register object, Register key,
+ Register map, Register scratch1,
+ Register scratch2,
+ Label* unmapped_case,
+ Label* slow_case) {
+ DCHECK(!AreAliased(object, key, map, scratch1, scratch2));
+
+ Heap* heap = masm->isolate()->heap();
+
+ // Check that the receiver is a JSObject. Because of the elements
+ // map check later, we do not need to check for interceptors or
+ // whether it requires access checks.
+ __ JumpIfSmi(object, slow_case);
+ // Check that the object is some kind of JSObject.
+ __ JumpIfObjectType(object, map, scratch1, FIRST_JS_RECEIVER_TYPE, slow_case,
+ lt);
+
+ // Check that the key is a positive smi.
+ __ JumpIfNotSmi(key, slow_case);
+ __ Tbnz(key, kXSignBit, slow_case);
+
+ // Load the elements object and check its map.
+ Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
+ __ Ldr(map, FieldMemOperand(object, JSObject::kElementsOffset));
+ __ CheckMap(map, scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
+
+ // Check if element is in the range of mapped arguments. If not, jump
+ // to the unmapped lookup.
+ __ Ldr(scratch1, FieldMemOperand(map, FixedArray::kLengthOffset));
+ __ Sub(scratch1, scratch1, Smi::FromInt(2));
+ __ Cmp(key, scratch1);
+ __ B(hs, unmapped_case);
+
+ // Load element index and check whether it is the hole.
+ static const int offset =
+ FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
+
+ __ Add(scratch1, map, offset);
+ __ SmiUntag(scratch2, key);
+ __ Ldr(scratch1, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
+ __ JumpIfRoot(scratch1, Heap::kTheHoleValueRootIndex, unmapped_case);
+
+ // Load value from context and return it.
+ __ Ldr(scratch2, FieldMemOperand(map, FixedArray::kHeaderSize));
+ __ SmiUntag(scratch1);
+ __ Lsl(scratch1, scratch1, kPointerSizeLog2);
+ __ Add(scratch1, scratch1, Context::kHeaderSize - kHeapObjectTag);
+ // The base of the result (scratch2) is passed to RecordWrite in
+ // KeyedStoreIC::GenerateSloppyArguments and it must be a HeapObject.
+ return MemOperand(scratch2, scratch1);
+}
+
+
+// The 'parameter_map' register must be loaded with the parameter map of the
+// arguments object and is overwritten.
+static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
+ Register key,
+ Register parameter_map,
+ Register scratch,
+ Label* slow_case) {
+ DCHECK(!AreAliased(key, parameter_map, scratch));
+
+ // Element is in arguments backing store, which is referenced by the
+ // second element of the parameter_map.
+ const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
+ Register backing_store = parameter_map;
+ __ Ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
+ Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
+ __ CheckMap(backing_store, scratch, fixed_array_map, slow_case,
+ DONT_DO_SMI_CHECK);
+ __ Ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
+ __ Cmp(key, scratch);
+ __ B(hs, slow_case);
+
+ __ Add(backing_store, backing_store,
+ FixedArray::kHeaderSize - kHeapObjectTag);
+ __ SmiUntag(scratch, key);
+ return MemOperand(backing_store, scratch, LSL, kPointerSizeLog2);
+}
+
+
+void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
+ // The return address is in lr.
+ Register receiver = ReceiverRegister();
+ Register name = NameRegister();
+ DCHECK(receiver.is(x1));
+ DCHECK(name.is(x2));
+
+ // Probe the stub cache.
+ Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
+ Code::ComputeHandlerFlags(Code::LOAD_IC));
+ masm->isolate()->stub_cache()->GenerateProbe(masm, flags, receiver, name, x3,
+ x4, x5, x6);
+
+ // Cache miss: Jump to runtime.
+ GenerateMiss(masm);
+}
+
+
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
+ Register dictionary = x0;
+ DCHECK(!dictionary.is(ReceiverRegister()));
+ DCHECK(!dictionary.is(NameRegister()));
+ Label slow;
+
+ __ Ldr(dictionary,
+ FieldMemOperand(ReceiverRegister(), JSObject::kPropertiesOffset));
+ GenerateDictionaryLoad(masm, &slow, dictionary, NameRegister(), x0, x3, x4);
+ __ Ret();
+
+ // Dictionary load failed, go slow (but don't miss).
+ __ Bind(&slow);
+ GenerateRuntimeGetProperty(masm);
+}
+
+
+void LoadIC::GenerateMiss(MacroAssembler* masm) {
+ // The return address is in lr.
+ Isolate* isolate = masm->isolate();
+ ASM_LOCATION("LoadIC::GenerateMiss");
+
+ __ IncrementCounter(isolate->counters()->load_miss(), 1, x3, x4);
+
+ // Perform tail call to the entry.
+ __ Push(ReceiverRegister(), NameRegister());
+ ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+ // The return address is in lr.
+ __ Push(ReceiverRegister(), NameRegister());
+ __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
+}
+
+
+void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
+ // The return address is in lr.
+ Register result = x0;
+ Register receiver = ReceiverRegister();
+ Register key = NameRegister();
+ DCHECK(receiver.is(x1));
+ DCHECK(key.is(x2));
+
+ Label miss, unmapped;
+
+ Register map_scratch = x0;
+ MemOperand mapped_location = GenerateMappedArgumentsLookup(
+ masm, receiver, key, map_scratch, x3, x4, &unmapped, &miss);
+ __ Ldr(result, mapped_location);
+ __ Ret();
+
+ __ Bind(&unmapped);
+ // Parameter map is left in map_scratch when a jump on unmapped is done.
+ MemOperand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, key, map_scratch, x3, &miss);
+ __ Ldr(result, unmapped_location);
+ __ JumpIfRoot(result, Heap::kTheHoleValueRootIndex, &miss);
+ __ Ret();
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
+ ASM_LOCATION("KeyedStoreIC::GenerateSloppyArguments");
+ Label slow, notin;
+ Register value = ValueRegister();
+ Register key = NameRegister();
+ Register receiver = ReceiverRegister();
+ DCHECK(receiver.is(x1));
+ DCHECK(key.is(x2));
+ DCHECK(value.is(x0));
+
+ Register map = x3;
+
+ // These registers are used by GenerateMappedArgumentsLookup to build a
+ // MemOperand. They are live for as long as the MemOperand is live.
+ Register mapped1 = x4;
+ Register mapped2 = x5;
+
+ MemOperand mapped = GenerateMappedArgumentsLookup(
+ masm, receiver, key, map, mapped1, mapped2, ¬in, &slow);
+ Operand mapped_offset = mapped.OffsetAsOperand();
+ __ Str(value, mapped);
+ __ Add(x10, mapped.base(), mapped_offset);
+ __ Mov(x11, value);
+ __ RecordWrite(mapped.base(), x10, x11, kLRHasNotBeenSaved, kDontSaveFPRegs);
+ __ Ret();
+
+ __ Bind(¬in);
+
+ // These registers are used by GenerateMappedArgumentsLookup to build a
+ // MemOperand. They are live for as long as the MemOperand is live.
+ Register unmapped1 = map; // This is assumed to alias 'map'.
+ Register unmapped2 = x4;
+ MemOperand unmapped =
+ GenerateUnmappedArgumentsLookup(masm, key, unmapped1, unmapped2, &slow);
+ Operand unmapped_offset = unmapped.OffsetAsOperand();
+ __ Str(value, unmapped);
+ __ Add(x10, unmapped.base(), unmapped_offset);
+ __ Mov(x11, value);
+ __ RecordWrite(unmapped.base(), x10, x11, kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
+ __ Ret();
+ __ Bind(&slow);
+ GenerateMiss(masm);
+}
+
+
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
+ // The return address is in lr.
+ Isolate* isolate = masm->isolate();
+
+ __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, x10, x11);
+
+ __ Push(ReceiverRegister(), NameRegister());
+
+ // Perform tail call to the entry.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
+
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+// IC register specifications
+const Register LoadIC::ReceiverRegister() { return x1; }
+const Register LoadIC::NameRegister() { return x2; }
+
+const Register LoadIC::SlotRegister() {
+ DCHECK(FLAG_vector_ics);
+ return x0;
+}
+
+
+const Register LoadIC::VectorRegister() {
+ DCHECK(FLAG_vector_ics);
+ return x3;
+}
+
+
+const Register StoreIC::ReceiverRegister() { return x1; }
+const Register StoreIC::NameRegister() { return x2; }
+const Register StoreIC::ValueRegister() { return x0; }
+
+
+const Register KeyedStoreIC::MapRegister() { return x3; }
+
+
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+ // The return address is in lr.
+ __ Push(ReceiverRegister(), NameRegister());
+ __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
+}
+
+
+static void GenerateKeyedLoadWithSmiKey(MacroAssembler* masm, Register key,
+ Register receiver, Register scratch1,
+ Register scratch2, Register scratch3,
+ Register scratch4, Register scratch5,
+ Label* slow) {
+ DCHECK(!AreAliased(key, receiver, scratch1, scratch2, scratch3, scratch4,
+ scratch5));
+
+ Isolate* isolate = masm->isolate();
+ Label check_number_dictionary;
+ // If we can load the value, it should be returned in x0.
+ Register result = x0;
+
+ GenerateKeyedLoadReceiverCheck(masm, receiver, scratch1, scratch2,
+ Map::kHasIndexedInterceptor, slow);
+
+ // Check the receiver's map to see if it has fast elements.
+ __ CheckFastElements(scratch1, scratch2, &check_number_dictionary);
+
+ GenerateFastArrayLoad(masm, receiver, key, scratch3, scratch2, scratch1,
+ result, NULL, slow);
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1,
+ scratch1, scratch2);
+ __ Ret();
+
+ __ Bind(&check_number_dictionary);
+ __ Ldr(scratch3, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ Ldr(scratch2, FieldMemOperand(scratch3, JSObject::kMapOffset));
+
+ // Check whether we have a number dictionary.
+ __ JumpIfNotRoot(scratch2, Heap::kHashTableMapRootIndex, slow);
+
+ __ LoadFromNumberDictionary(slow, scratch3, key, result, scratch1, scratch2,
+ scratch4, scratch5);
+ __ Ret();
+}
+
+static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm, Register key,
+ Register receiver, Register scratch1,
+ Register scratch2, Register scratch3,
+ Register scratch4, Register scratch5,
+ Label* slow) {
+ DCHECK(!AreAliased(key, receiver, scratch1, scratch2, scratch3, scratch4,
+ scratch5));
+
+ Isolate* isolate = masm->isolate();
+ Label probe_dictionary, property_array_property;
+ // If we can load the value, it should be returned in x0.
+ Register result = x0;
+
+ GenerateKeyedLoadReceiverCheck(masm, receiver, scratch1, scratch2,
+ Map::kHasNamedInterceptor, slow);
+
+ // If the receiver is a fast-case object, check the keyed lookup cache.
+ // Otherwise probe the dictionary.
+ __ Ldr(scratch2, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset));
+ __ JumpIfRoot(scratch3, Heap::kHashTableMapRootIndex, &probe_dictionary);
+
+ // We keep the map of the receiver in scratch1.
+ Register receiver_map = scratch1;
+
+ // Load the map of the receiver, compute the keyed lookup cache hash
+ // based on 32 bits of the map pointer and the name hash.
+ __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Mov(scratch2, Operand(receiver_map, ASR, KeyedLookupCache::kMapHashShift));
+ __ Ldr(scratch3.W(), FieldMemOperand(key, Name::kHashFieldOffset));
+ __ Eor(scratch2, scratch2, Operand(scratch3, ASR, Name::kHashShift));
+ int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
+ __ And(scratch2, scratch2, mask);
+
+ // Load the key (consisting of map and unique name) from the cache and
+ // check for match.
+ Label load_in_object_property;
+ static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
+ Label hit_on_nth_entry[kEntriesPerBucket];
+ ExternalReference cache_keys =
+ ExternalReference::keyed_lookup_cache_keys(isolate);
+
+ __ Mov(scratch3, cache_keys);
+ __ Add(scratch3, scratch3, Operand(scratch2, LSL, kPointerSizeLog2 + 1));
+
+ for (int i = 0; i < kEntriesPerBucket - 1; i++) {
+ Label try_next_entry;
+ // Load map and make scratch3 pointing to the next entry.
+ __ Ldr(scratch4, MemOperand(scratch3, kPointerSize * 2, PostIndex));
+ __ Cmp(receiver_map, scratch4);
+ __ B(ne, &try_next_entry);
+ __ Ldr(scratch4, MemOperand(scratch3, -kPointerSize)); // Load name
+ __ Cmp(key, scratch4);
+ __ B(eq, &hit_on_nth_entry[i]);
+ __ Bind(&try_next_entry);
+ }
+
+ // Last entry.
+ __ Ldr(scratch4, MemOperand(scratch3, kPointerSize, PostIndex));
+ __ Cmp(receiver_map, scratch4);
+ __ B(ne, slow);
+ __ Ldr(scratch4, MemOperand(scratch3));
+ __ Cmp(key, scratch4);
+ __ B(ne, slow);
+
+ // Get field offset.
+ ExternalReference cache_field_offsets =
+ ExternalReference::keyed_lookup_cache_field_offsets(isolate);
+
+ // Hit on nth entry.
+ for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
+ __ Bind(&hit_on_nth_entry[i]);
+ __ Mov(scratch3, cache_field_offsets);
+ if (i != 0) {
+ __ Add(scratch2, scratch2, i);
+ }
+ __ Ldr(scratch4.W(), MemOperand(scratch3, scratch2, LSL, 2));
+ __ Ldrb(scratch5,
+ FieldMemOperand(receiver_map, Map::kInObjectPropertiesOffset));
+ __ Subs(scratch4, scratch4, scratch5);
+ __ B(ge, &property_array_property);
+ if (i != 0) {
+ __ B(&load_in_object_property);
+ }
+ }
+
+ // Load in-object property.
+ __ Bind(&load_in_object_property);
+ __ Ldrb(scratch5, FieldMemOperand(receiver_map, Map::kInstanceSizeOffset));
+ __ Add(scratch5, scratch5, scratch4); // Index from start of object.
+ __ Sub(receiver, receiver, kHeapObjectTag); // Remove the heap tag.
+ __ Ldr(result, MemOperand(receiver, scratch5, LSL, kPointerSizeLog2));
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
+ scratch1, scratch2);
+ __ Ret();
+
+ // Load property array property.
+ __ Bind(&property_array_property);
+ __ Ldr(scratch1, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Ldr(result, MemOperand(scratch1, scratch4, LSL, kPointerSizeLog2));
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
+ scratch1, scratch2);
+ __ Ret();
+
+ // Do a quick inline probe of the receiver's dictionary, if it exists.
+ __ Bind(&probe_dictionary);
+ __ Ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ GenerateGlobalInstanceTypeCheck(masm, scratch1, slow);
+ // Load the property.
+ GenerateDictionaryLoad(masm, slow, scratch2, key, result, scratch1, scratch3);
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 1,
+ scratch1, scratch2);
+ __ Ret();
+}
+
+
+void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
+ // The return address is in lr.
+ Label slow, check_name, index_smi, index_name;
+
+ Register key = NameRegister();
+ Register receiver = ReceiverRegister();
+ DCHECK(key.is(x2));
+ DCHECK(receiver.is(x1));
+
+ __ JumpIfNotSmi(key, &check_name);
+ __ Bind(&index_smi);
+ // Now the key is known to be a smi. This place is also jumped to from below
+ // where a numeric string is converted to a smi.
+ GenerateKeyedLoadWithSmiKey(masm, key, receiver, x7, x3, x4, x5, x6, &slow);
+
+ // Slow case.
+ __ Bind(&slow);
+ __ IncrementCounter(masm->isolate()->counters()->keyed_load_generic_slow(), 1,
+ x4, x3);
+ GenerateRuntimeGetProperty(masm);
+
+ __ Bind(&check_name);
+ GenerateKeyNameCheck(masm, key, x0, x3, &index_name, &slow);
+
+ GenerateKeyedLoadWithNameKey(masm, key, receiver, x7, x3, x4, x5, x6, &slow);
+
+ __ Bind(&index_name);
+ __ IndexFromHash(x3, key);
+ // Now jump to the place where smi keys are handled.
+ __ B(&index_smi);
+}
+
+
+void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
+ // Return address is in lr.
+ Label miss;
+
+ Register receiver = ReceiverRegister();
+ Register index = NameRegister();
+ Register result = x0;
+ Register scratch = x3;
+ DCHECK(!scratch.is(receiver) && !scratch.is(index));
+
+ StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ &miss, // When index out of range.
+ STRING_INDEX_IS_ARRAY_INDEX);
+ char_at_generator.GenerateFast(masm);
+ __ Ret();
+
+ StubRuntimeCallHelper call_helper;
+ char_at_generator.GenerateSlow(masm, call_helper);
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
+ // Return address is in lr.
+ Label slow;
+
+ Register receiver = ReceiverRegister();
+ Register key = NameRegister();
+ Register scratch1 = x3;
+ Register scratch2 = x4;
+ DCHECK(!AreAliased(scratch1, scratch2, receiver, key));
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &slow);
+
+ // Check that the key is an array index, that is Uint32.
+ __ TestAndBranchIfAnySet(key, kSmiTagMask | kSmiSignMask, &slow);
+
+ // Get the map of the receiver.
+ Register map = scratch1;
+ __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+
+ // Check that it has indexed interceptor and access checks
+ // are not enabled for this object.
+ __ Ldrb(scratch2, FieldMemOperand(map, Map::kBitFieldOffset));
+ DCHECK(kSlowCaseBitFieldMask == ((1 << Map::kIsAccessCheckNeeded) |
+ (1 << Map::kHasIndexedInterceptor)));
+ __ Tbnz(scratch2, Map::kIsAccessCheckNeeded, &slow);
+ __ Tbz(scratch2, Map::kHasIndexedInterceptor, &slow);
+
+ // Everything is fine, call runtime.
+ __ Push(receiver, key);
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(kLoadElementWithInterceptor),
+ masm->isolate()),
+ 2, 1);
+
+ __ Bind(&slow);
+ GenerateMiss(masm);
+}
+
+
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
+ ASM_LOCATION("KeyedStoreIC::GenerateMiss");
+
+ // Push receiver, key and value for runtime call.
+ __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
+ ASM_LOCATION("KeyedStoreIC::GenerateSlow");
+
+ // Push receiver, key and value for runtime call.
+ __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
+
+ // The slow case calls into the runtime to complete the store without causing
+ // an IC miss that would otherwise cause a transition to the generic stub.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictMode strict_mode) {
+ ASM_LOCATION("KeyedStoreIC::GenerateRuntimeSetProperty");
+
+ // Push receiver, key and value for runtime call.
+ __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
+
+ // Push strict_mode for runtime call.
+ __ Mov(x10, Smi::FromInt(strict_mode));
+ __ Push(x10);
+
+ __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
+}
+
+
+static void KeyedStoreGenerateGenericHelper(
+ MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
+ KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
+ Register value, Register key, Register receiver, Register receiver_map,
+ Register elements_map, Register elements) {
+ DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
+ x10, x11));
+
+ Label transition_smi_elements;
+ Label transition_double_elements;
+ Label fast_double_without_map_check;
+ Label non_double_value;
+ Label finish_store;
+
+ __ Bind(fast_object);
+ if (check_map == kCheckMap) {
+ __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ Cmp(elements_map,
+ Operand(masm->isolate()->factory()->fixed_array_map()));
+ __ B(ne, fast_double);
+ }
+
+ // HOLECHECK: guards "A[i] = V"
+ // We have to go to the runtime if the current value is the hole because there
+ // may be a callback on the element.
+ Label holecheck_passed;
+ __ Add(x10, elements, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
+ __ Ldr(x11, MemOperand(x10));
+ __ JumpIfNotRoot(x11, Heap::kTheHoleValueRootIndex, &holecheck_passed);
+ __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
+ __ bind(&holecheck_passed);
+
+ // Smi stores don't require further checks.
+ __ JumpIfSmi(value, &finish_store);
+
+ // Escape to elements kind transition case.
+ __ CheckFastObjectElements(receiver_map, x10, &transition_smi_elements);
+
+ __ Bind(&finish_store);
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ Add(x10, key, Smi::FromInt(1));
+ __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ }
+
+ Register address = x11;
+ __ Add(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Add(address, address, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
+ __ Str(value, MemOperand(address));
+
+ Label dont_record_write;
+ __ JumpIfSmi(value, &dont_record_write);
+
+ // Update write barrier for the elements array address.
+ __ Mov(x10, value); // Preserve the value which is returned.
+ __ RecordWrite(elements, address, x10, kLRHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ __ Bind(&dont_record_write);
+ __ Ret();
+
+
+ __ Bind(fast_double);
+ if (check_map == kCheckMap) {
+ // Check for fast double array case. If this fails, call through to the
+ // runtime.
+ __ JumpIfNotRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex, slow);
+ }
+
+ // HOLECHECK: guards "A[i] double hole?"
+ // We have to see if the double version of the hole is present. If so go to
+ // the runtime.
+ __ Add(x10, elements, FixedDoubleArray::kHeaderSize - kHeapObjectTag);
+ __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
+ __ Ldr(x11, MemOperand(x10));
+ __ CompareAndBranch(x11, kHoleNanInt64, ne, &fast_double_without_map_check);
+ __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
+
+ __ Bind(&fast_double_without_map_check);
+ __ StoreNumberToDoubleElements(value, key, elements, x10, d0,
+ &transition_double_elements);
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ Add(x10, key, Smi::FromInt(1));
+ __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ }
+ __ Ret();
+
+
+ __ Bind(&transition_smi_elements);
+ // Transition the array appropriately depending on the value type.
+ __ Ldr(x10, FieldMemOperand(value, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(x10, Heap::kHeapNumberMapRootIndex, &non_double_value);
+
+ // Value is a double. Transition FAST_SMI_ELEMENTS ->
+ // FAST_DOUBLE_ELEMENTS and complete the store.
+ __ LoadTransitionedArrayMapConditional(
+ FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, x10, x11, slow);
+ AllocationSiteMode mode =
+ AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
+ receiver_map, mode, slow);
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ B(&fast_double_without_map_check);
+
+ __ Bind(&non_double_value);
+ // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS.
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
+ receiver_map, x10, x11, slow);
+
+ mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
+ masm, receiver, key, value, receiver_map, mode, slow);
+
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ B(&finish_store);
+
+ __ Bind(&transition_double_elements);
+ // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
+ // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
+ // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
+ __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
+ receiver_map, x10, x11, slow);
+ mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateDoubleToObject(
+ masm, receiver, key, value, receiver_map, mode, slow);
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ B(&finish_store);
+}
+
+
+void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
+ StrictMode strict_mode) {
+ ASM_LOCATION("KeyedStoreIC::GenerateGeneric");
+ Label slow;
+ Label array;
+ Label fast_object;
+ Label extra;
+ Label fast_object_grow;
+ Label fast_double_grow;
+ Label fast_double;
+
+ Register value = ValueRegister();
+ Register key = NameRegister();
+ Register receiver = ReceiverRegister();
+ DCHECK(receiver.is(x1));
+ DCHECK(key.is(x2));
+ DCHECK(value.is(x0));
+
+ Register receiver_map = x3;
+ Register elements = x4;
+ Register elements_map = x5;
+
+ __ JumpIfNotSmi(key, &slow);
+ __ JumpIfSmi(receiver, &slow);
+ __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+
+ // Check that the receiver does not require access checks and is not observed.
+ // The generic stub does not perform map checks or handle observed objects.
+ __ Ldrb(x10, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
+ __ TestAndBranchIfAnySet(
+ x10, (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kIsObserved), &slow);
+
+ // Check if the object is a JS array or not.
+ Register instance_type = x10;
+ __ CompareInstanceType(receiver_map, instance_type, JS_ARRAY_TYPE);
+ __ B(eq, &array);
+ // Check that the object is some kind of JSObject.
+ __ Cmp(instance_type, FIRST_JS_OBJECT_TYPE);
+ __ B(lt, &slow);
+
+ // Object case: Check key against length in the elements array.
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ // Check array bounds. Both the key and the length of FixedArray are smis.
+ __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ Cmp(x10, Operand::UntagSmi(key));
+ __ B(hi, &fast_object);
+
+
+ __ Bind(&slow);
+ // Slow case, handle jump to runtime.
+ // Live values:
+ // x0: value
+ // x1: key
+ // x2: receiver
+ GenerateRuntimeSetProperty(masm, strict_mode);
+
+
+ __ Bind(&extra);
+ // Extra capacity case: Check if there is extra capacity to
+ // perform the store and update the length. Used for adding one
+ // element to the array by writing to array[array.length].
+
+ // Check for room in the elements backing store.
+ // Both the key and the length of FixedArray are smis.
+ __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ Cmp(x10, Operand::UntagSmi(key));
+ __ B(ls, &slow);
+
+ __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ Cmp(elements_map, Operand(masm->isolate()->factory()->fixed_array_map()));
+ __ B(eq, &fast_object_grow);
+ __ Cmp(elements_map,
+ Operand(masm->isolate()->factory()->fixed_double_array_map()));
+ __ B(eq, &fast_double_grow);
+ __ B(&slow);
+
+
+ __ Bind(&array);
+ // Array case: Get the length and the elements array from the JS
+ // array. Check that the array is in fast mode (and writable); if it
+ // is the length is always a smi.
+
+ __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+
+ // Check the key against the length in the array.
+ __ Ldrsw(x10, UntagSmiFieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Cmp(x10, Operand::UntagSmi(key));
+ __ B(eq, &extra); // We can handle the case where we are appending 1 element.
+ __ B(lo, &slow);
+
+ KeyedStoreGenerateGenericHelper(
+ masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
+ value, key, receiver, receiver_map, elements_map, elements);
+ KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
+ &slow, kDontCheckMap, kIncrementLength, value,
+ key, receiver, receiver_map, elements_map,
+ elements);
+}
+
+
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+ Register receiver = ReceiverRegister();
+ Register name = NameRegister();
+ DCHECK(!AreAliased(receiver, name, ValueRegister(), x3, x4, x5, x6));
+
+ // Probe the stub cache.
+ Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
+ Code::ComputeHandlerFlags(Code::STORE_IC));
+ masm->isolate()->stub_cache()->GenerateProbe(masm, flags, receiver, name, x3,
+ x4, x5, x6);
+
+ // Cache miss: Jump to runtime.
+ GenerateMiss(masm);
+}
+
+
+void StoreIC::GenerateMiss(MacroAssembler* masm) {
+ __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
+
+ // Tail call to the entry.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void StoreIC::GenerateNormal(MacroAssembler* masm) {
+ Label miss;
+ Register value = ValueRegister();
+ Register receiver = ReceiverRegister();
+ Register name = NameRegister();
+ Register dictionary = x3;
+ DCHECK(!AreAliased(value, receiver, name, x3, x4, x5));
+
+ __ Ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+
+ GenerateDictionaryStore(masm, &miss, dictionary, name, value, x4, x5);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->store_normal_hit(), 1, x4, x5);
+ __ Ret();
+
+ // Cache miss: Jump to runtime.
+ __ Bind(&miss);
+ __ IncrementCounter(counters->store_normal_miss(), 1, x4, x5);
+ GenerateMiss(masm);
+}
+
+
+void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictMode strict_mode) {
+ ASM_LOCATION("StoreIC::GenerateRuntimeSetProperty");
+
+ __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
+
+ __ Mov(x10, Smi::FromInt(strict_mode));
+ __ Push(x10);
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
+}
+
+
+void StoreIC::GenerateSlow(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- x0 : value
+ // -- x1 : receiver
+ // -- x2 : name
+ // -- lr : return address
+ // -----------------------------------
+
+ // Push receiver, name and value for runtime call.
+ __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
+
+ // The slow case calls into the runtime to complete the store without causing
+ // an IC miss that would otherwise cause a transition to the generic stub.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+Condition CompareIC::ComputeCondition(Token::Value op) {
+ switch (op) {
+ case Token::EQ_STRICT:
+ case Token::EQ:
+ return eq;
+ case Token::LT:
+ return lt;
+ case Token::GT:
+ return gt;
+ case Token::LTE:
+ return le;
+ case Token::GTE:
+ return ge;
+ default:
+ UNREACHABLE();
+ return al;
+ }
+}
+
+
+bool CompareIC::HasInlinedSmiCode(Address address) {
+ // The address of the instruction following the call.
+ Address info_address = Assembler::return_address_from_call_start(address);
+
+ InstructionSequence* patch_info = InstructionSequence::At(info_address);
+ return patch_info->IsInlineData();
+}
+
+
+// Activate a SMI fast-path by patching the instructions generated by
+// JumpPatchSite::EmitJumpIf(Not)Smi(), using the information encoded by
+// JumpPatchSite::EmitPatchInfo().
+void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
+ // The patch information is encoded in the instruction stream using
+ // instructions which have no side effects, so we can safely execute them.
+ // The patch information is encoded directly after the call to the helper
+ // function which is requesting this patch operation.
+ Address info_address = Assembler::return_address_from_call_start(address);
+ InlineSmiCheckInfo info(info_address);
+
+ // Check and decode the patch information instruction.
+ if (!info.HasSmiCheck()) {
+ return;
+ }
+
+ if (FLAG_trace_ic) {
+ PrintF("[ Patching ic at %p, marker=%p, SMI check=%p\n", address,
+ info_address, reinterpret_cast<void*>(info.SmiCheck()));
+ }
+
+ // Patch and activate code generated by JumpPatchSite::EmitJumpIfNotSmi()
+ // and JumpPatchSite::EmitJumpIfSmi().
+ // Changing
+ // tb(n)z xzr, #0, <target>
+ // to
+ // tb(!n)z test_reg, #0, <target>
+ Instruction* to_patch = info.SmiCheck();
+ PatchingAssembler patcher(to_patch, 1);
+ DCHECK(to_patch->IsTestBranch());
+ DCHECK(to_patch->ImmTestBranchBit5() == 0);
+ DCHECK(to_patch->ImmTestBranchBit40() == 0);
+
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagMask == 1);
+
+ int branch_imm = to_patch->ImmTestBranch();
+ Register smi_reg;
+ if (check == ENABLE_INLINED_SMI_CHECK) {
+ DCHECK(to_patch->Rt() == xzr.code());
+ smi_reg = info.SmiRegister();
+ } else {
+ DCHECK(check == DISABLE_INLINED_SMI_CHECK);
+ DCHECK(to_patch->Rt() != xzr.code());
+ smi_reg = xzr;
+ }
+
+ if (to_patch->Mask(TestBranchMask) == TBZ) {
+ // This is JumpIfNotSmi(smi_reg, branch_imm).
+ patcher.tbnz(smi_reg, 0, branch_imm);
+ } else {
+ DCHECK(to_patch->Mask(TestBranchMask) == TBNZ);
+ // This is JumpIfSmi(smi_reg, branch_imm).
+ patcher.tbz(smi_reg, 0, branch_imm);
+ }
+}
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "src/ic/ic-compiler.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
+ MacroAssembler* masm, Label* miss_label, Register receiver,
+ Handle<Name> name, Register scratch0, Register scratch1) {
+ DCHECK(!AreAliased(receiver, scratch0, scratch1));
+ DCHECK(name->IsUniqueName());
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
+ __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
+
+ Label done;
+
+ const int kInterceptorOrAccessCheckNeededMask =
+ (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
+
+ // Bail out if the receiver has a named interceptor or requires access checks.
+ Register map = scratch1;
+ __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Ldrb(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ Tst(scratch0, kInterceptorOrAccessCheckNeededMask);
+ __ B(ne, miss_label);
+
+ // Check that receiver is a JSObject.
+ __ Ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ __ Cmp(scratch0, FIRST_SPEC_OBJECT_TYPE);
+ __ B(lt, miss_label);
+
+ // Load properties array.
+ Register properties = scratch0;
+ __ Ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ // Check that the properties array is a dictionary.
+ __ Ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(map, Heap::kHashTableMapRootIndex, miss_label);
+
+ NameDictionaryLookupStub::GenerateNegativeLookup(
+ masm, miss_label, &done, receiver, properties, name, scratch1);
+ __ Bind(&done);
+ __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
+ MacroAssembler* masm, int index, Register prototype, Label* miss) {
+ Isolate* isolate = masm->isolate();
+ // Get the global function with the given index.
+ Handle<JSFunction> function(
+ JSFunction::cast(isolate->native_context()->get(index)));
+
+ // Check we're still in the same context.
+ Register scratch = prototype;
+ __ Ldr(scratch, GlobalObjectMemOperand());
+ __ Ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+ __ Ldr(scratch, ContextMemOperand(scratch, index));
+ __ Cmp(scratch, Operand(function));
+ __ B(ne, miss);
+
+ // Load its initial map. The global functions all have initial maps.
+ __ Mov(prototype, Operand(Handle<Map>(function->initial_map())));
+ // Load the prototype from the initial map.
+ __ Ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
+ MacroAssembler* masm, Register receiver, Register scratch1,
+ Register scratch2, Label* miss_label) {
+ __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
+ // TryGetFunctionPrototype can't put the result directly in x0 because the
+ // 3 inputs registers can't alias and we call this function from
+ // LoadIC::GenerateFunctionPrototype, where receiver is x0. So we explicitly
+ // move the result in x0.
+ __ Mov(x0, scratch1);
+ __ Ret();
+}
+
+
+// Generate code to check that a global property cell is empty. Create
+// the property cell at compilation time if no cell exists for the
+// property.
+void PropertyHandlerCompiler::GenerateCheckPropertyCell(
+ MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
+ Register scratch, Label* miss) {
+ Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
+ DCHECK(cell->value()->IsTheHole());
+ __ Mov(scratch, Operand(cell));
+ __ Ldr(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
+ __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, miss);
+}
+
+
+static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
+ Register holder, Register name,
+ Handle<JSObject> holder_obj) {
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex == 1);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 2);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 3);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 4);
+
+ __ Push(name);
+ Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
+ DCHECK(!masm->isolate()->heap()->InNewSpace(*interceptor));
+ Register scratch = name;
+ __ Mov(scratch, Operand(interceptor));
+ __ Push(scratch, receiver, holder);
+}
+
+
+static void CompileCallLoadPropertyWithInterceptor(
+ MacroAssembler* masm, Register receiver, Register holder, Register name,
+ Handle<JSObject> holder_obj, IC::UtilityId id) {
+ PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+
+ __ CallExternalReference(ExternalReference(IC_Utility(id), masm->isolate()),
+ NamedLoadHandlerCompiler::kInterceptorArgsLength);
+}
+
+
+// Generate call to api function.
+void PropertyHandlerCompiler::GenerateFastApiCall(
+ MacroAssembler* masm, const CallOptimization& optimization,
+ Handle<Map> receiver_map, Register receiver, Register scratch,
+ bool is_store, int argc, Register* values) {
+ DCHECK(!AreAliased(receiver, scratch));
+
+ MacroAssembler::PushPopQueue queue(masm);
+ queue.Queue(receiver);
+ // Write the arguments to the stack frame.
+ for (int i = 0; i < argc; i++) {
+ Register arg = values[argc - 1 - i];
+ DCHECK(!AreAliased(receiver, scratch, arg));
+ queue.Queue(arg);
+ }
+ queue.PushQueued();
+
+ DCHECK(optimization.is_simple_api_call());
+
+ // Abi for CallApiFunctionStub.
+ Register callee = x0;
+ Register call_data = x4;
+ Register holder = x2;
+ Register api_function_address = x1;
+
+ // Put holder in place.
+ CallOptimization::HolderLookup holder_lookup;
+ Handle<JSObject> api_holder =
+ optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
+ switch (holder_lookup) {
+ case CallOptimization::kHolderIsReceiver:
+ __ Mov(holder, receiver);
+ break;
+ case CallOptimization::kHolderFound:
+ __ LoadObject(holder, api_holder);
+ break;
+ case CallOptimization::kHolderNotFound:
+ UNREACHABLE();
+ break;
+ }
+
+ Isolate* isolate = masm->isolate();
+ Handle<JSFunction> function = optimization.constant_function();
+ Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+ Handle<Object> call_data_obj(api_call_info->data(), isolate);
+
+ // Put callee in place.
+ __ LoadObject(callee, function);
+
+ bool call_data_undefined = false;
+ // Put call_data in place.
+ if (isolate->heap()->InNewSpace(*call_data_obj)) {
+ __ LoadObject(call_data, api_call_info);
+ __ Ldr(call_data, FieldMemOperand(call_data, CallHandlerInfo::kDataOffset));
+ } else if (call_data_obj->IsUndefined()) {
+ call_data_undefined = true;
+ __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
+ } else {
+ __ LoadObject(call_data, call_data_obj);
+ }
+
+ // Put api_function_address in place.
+ Address function_address = v8::ToCData<Address>(api_call_info->callback());
+ ApiFunction fun(function_address);
+ ExternalReference ref = ExternalReference(
+ &fun, ExternalReference::DIRECT_API_CALL, masm->isolate());
+ __ Mov(api_function_address, ref);
+
+ // Jump to stub.
+ CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc);
+ __ TailCallStub(&stub);
+}
+
+
+void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
+ Handle<Code> code) {
+ __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
+ Handle<Name> name) {
+ if (!label->is_unused()) {
+ __ Bind(label);
+ __ Mov(this->name(), Operand(name));
+ }
+}
+
+
+// Generate StoreTransition code, value is passed in x0 register.
+// When leaving generated code after success, the receiver_reg and storage_reg
+// may be clobbered. Upon branch to miss_label, the receiver and name registers
+// have their original values.
+void NamedStoreHandlerCompiler::GenerateStoreTransition(
+ Handle<Map> transition, Handle<Name> name, Register receiver_reg,
+ Register storage_reg, Register value_reg, Register scratch1,
+ Register scratch2, Register scratch3, Label* miss_label, Label* slow) {
+ Label exit;
+
+ DCHECK(!AreAliased(receiver_reg, storage_reg, value_reg, scratch1, scratch2,
+ scratch3));
+
+ // We don't need scratch3.
+ scratch3 = NoReg;
+
+ int descriptor = transition->LastAdded();
+ DescriptorArray* descriptors = transition->instance_descriptors();
+ PropertyDetails details = descriptors->GetDetails(descriptor);
+ Representation representation = details.representation();
+ DCHECK(!representation.IsNone());
+
+ if (details.type() == CONSTANT) {
+ Handle<Object> constant(descriptors->GetValue(descriptor), isolate());
+ __ LoadObject(scratch1, constant);
+ __ Cmp(value_reg, scratch1);
+ __ B(ne, miss_label);
+ } else if (representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_label);
+ } else if (representation.IsHeapObject()) {
+ __ JumpIfSmi(value_reg, miss_label);
+ HeapType* field_type = descriptors->GetFieldType(descriptor);
+ HeapType::Iterator<Map> it = field_type->Classes();
+ if (!it.Done()) {
+ __ Ldr(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
+ Label do_store;
+ while (true) {
+ __ CompareMap(scratch1, it.Current());
+ it.Advance();
+ if (it.Done()) {
+ __ B(ne, miss_label);
+ break;
+ }
+ __ B(eq, &do_store);
+ }
+ __ Bind(&do_store);
+ }
+ } else if (representation.IsDouble()) {
+ UseScratchRegisterScope temps(masm());
+ DoubleRegister temp_double = temps.AcquireD();
+ __ SmiUntagToDouble(temp_double, value_reg, kSpeculativeUntag);
+
+ Label do_store;
+ __ JumpIfSmi(value_reg, &do_store);
+
+ __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex, miss_label,
+ DONT_DO_SMI_CHECK);
+ __ Ldr(temp_double, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
+
+ __ Bind(&do_store);
+ __ AllocateHeapNumber(storage_reg, slow, scratch1, scratch2, temp_double,
+ NoReg, MUTABLE);
+ }
+
+ // Stub never generated for objects that require access checks.
+ DCHECK(!transition->is_access_check_needed());
+
+ // Perform map transition for the receiver if necessary.
+ if (details.type() == FIELD &&
+ Map::cast(transition->GetBackPointer())->unused_property_fields() == 0) {
+ // The properties must be extended before we can store the value.
+ // We jump to a runtime call that extends the properties array.
+ __ Mov(scratch1, Operand(transition));
+ __ Push(receiver_reg, scratch1, value_reg);
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
+ isolate()),
+ 3, 1);
+ return;
+ }
+
+ // Update the map of the object.
+ __ Mov(scratch1, Operand(transition));
+ __ Str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
+
+ // Update the write barrier for the map field.
+ __ RecordWriteField(receiver_reg, HeapObject::kMapOffset, scratch1, scratch2,
+ kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+ if (details.type() == CONSTANT) {
+ DCHECK(value_reg.is(x0));
+ __ Ret();
+ return;
+ }
+
+ int index = transition->instance_descriptors()->GetFieldIndex(
+ transition->LastAdded());
+
+ // Adjust for the number of properties stored in the object. Even in the
+ // face of a transition we can use the old map here because the size of the
+ // object and the number of in-object properties is not going to change.
+ index -= transition->inobject_properties();
+
+ // TODO(verwaest): Share this code as a code stub.
+ SmiCheck smi_check =
+ representation.IsTagged() ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
+ Register prop_reg = representation.IsDouble() ? storage_reg : value_reg;
+ if (index < 0) {
+ // Set the property straight into the object.
+ int offset = transition->instance_size() + (index * kPointerSize);
+ __ Str(prop_reg, FieldMemOperand(receiver_reg, offset));
+
+ if (!representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ if (!representation.IsDouble()) {
+ __ Mov(storage_reg, value_reg);
+ }
+ __ RecordWriteField(receiver_reg, offset, storage_reg, scratch1,
+ kLRHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, smi_check);
+ }
+ } else {
+ // Write to the properties array.
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ // Get the properties array
+ __ Ldr(scratch1,
+ FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
+ __ Str(prop_reg, FieldMemOperand(scratch1, offset));
+
+ if (!representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ if (!representation.IsDouble()) {
+ __ Mov(storage_reg, value_reg);
+ }
+ __ RecordWriteField(scratch1, offset, storage_reg, receiver_reg,
+ kLRHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, smi_check);
+ }
+ }
+
+ __ Bind(&exit);
+ // Return the value (register x0).
+ DCHECK(value_reg.is(x0));
+ __ Ret();
+}
+
+
+void NamedStoreHandlerCompiler::GenerateStoreField(LookupIterator* lookup,
+ Register value_reg,
+ Label* miss_label) {
+ DCHECK(lookup->representation().IsHeapObject());
+ __ JumpIfSmi(value_reg, miss_label);
+ HeapType::Iterator<Map> it = lookup->GetFieldType()->Classes();
+ __ Ldr(scratch1(), FieldMemOperand(value_reg, HeapObject::kMapOffset));
+ Label do_store;
+ while (true) {
+ __ CompareMap(scratch1(), it.Current());
+ it.Advance();
+ if (it.Done()) {
+ __ B(ne, miss_label);
+ break;
+ }
+ __ B(eq, &do_store);
+ }
+ __ Bind(&do_store);
+
+ StoreFieldStub stub(isolate(), lookup->GetFieldIndex(),
+ lookup->representation());
+ GenerateTailCall(masm(), stub.GetCode());
+}
+
+
+Register PropertyHandlerCompiler::CheckPrototypes(
+ Register object_reg, Register holder_reg, Register scratch1,
+ Register scratch2, Handle<Name> name, Label* miss,
+ PrototypeCheckType check) {
+ Handle<Map> receiver_map(IC::TypeToMap(*type(), isolate()));
+
+ // object_reg and holder_reg registers can alias.
+ DCHECK(!AreAliased(object_reg, scratch1, scratch2));
+ DCHECK(!AreAliased(holder_reg, scratch1, scratch2));
+
+ // Keep track of the current object in register reg.
+ Register reg = object_reg;
+ int depth = 0;
+
+ Handle<JSObject> current = Handle<JSObject>::null();
+ if (type()->IsConstant()) {
+ current = Handle<JSObject>::cast(type()->AsConstant()->Value());
+ }
+ Handle<JSObject> prototype = Handle<JSObject>::null();
+ Handle<Map> current_map = receiver_map;
+ Handle<Map> holder_map(holder()->map());
+ // Traverse the prototype chain and check the maps in the prototype chain for
+ // fast and global objects or do negative lookup for normal objects.
+ while (!current_map.is_identical_to(holder_map)) {
+ ++depth;
+
+ // Only global objects and objects that do not require access
+ // checks are allowed in stubs.
+ DCHECK(current_map->IsJSGlobalProxyMap() ||
+ !current_map->is_access_check_needed());
+
+ prototype = handle(JSObject::cast(current_map->prototype()));
+ if (current_map->is_dictionary_map() &&
+ !current_map->IsJSGlobalObjectMap()) {
+ DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
+ if (!name->IsUniqueName()) {
+ DCHECK(name->IsString());
+ name = factory()->InternalizeString(Handle<String>::cast(name));
+ }
+ DCHECK(current.is_null() || (current->property_dictionary()->FindEntry(
+ name) == NameDictionary::kNotFound));
+
+ GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
+ scratch2);
+
+ __ Ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+ reg = holder_reg; // From now on the object will be in holder_reg.
+ __ Ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
+ } else {
+ // Two possible reasons for loading the prototype from the map:
+ // (1) Can't store references to new space in code.
+ // (2) Handler is shared for all receivers with the same prototype
+ // map (but not necessarily the same prototype instance).
+ bool load_prototype_from_map =
+ heap()->InNewSpace(*prototype) || depth == 1;
+ Register map_reg = scratch1;
+ __ Ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
+
+ if (depth != 1 || check == CHECK_ALL_MAPS) {
+ __ CheckMap(map_reg, current_map, miss, DONT_DO_SMI_CHECK);
+ }
+
+ // Check access rights to the global object. This has to happen after
+ // the map check so that we know that the object is actually a global
+ // object.
+ // This allows us to install generated handlers for accesses to the
+ // global proxy (as opposed to using slow ICs). See corresponding code
+ // in LookupForRead().
+ if (current_map->IsJSGlobalProxyMap()) {
+ UseScratchRegisterScope temps(masm());
+ __ CheckAccessGlobalProxy(reg, scratch2, temps.AcquireX(), miss);
+ } else if (current_map->IsJSGlobalObjectMap()) {
+ GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
+ name, scratch2, miss);
+ }
+
+ reg = holder_reg; // From now on the object will be in holder_reg.
+
+ if (load_prototype_from_map) {
+ __ Ldr(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
+ } else {
+ __ Mov(reg, Operand(prototype));
+ }
+ }
+
+ // Go to the next object in the prototype chain.
+ current = prototype;
+ current_map = handle(current->map());
+ }
+
+ // Log the check depth.
+ LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
+
+ // Check the holder map.
+ if (depth != 0 || check == CHECK_ALL_MAPS) {
+ // Check the holder map.
+ __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK);
+ }
+
+ // Perform security check for access to the global object.
+ DCHECK(current_map->IsJSGlobalProxyMap() ||
+ !current_map->is_access_check_needed());
+ if (current_map->IsJSGlobalProxyMap()) {
+ __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
+ }
+
+ // Return the register containing the holder.
+ return reg;
+}
+
+
+void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
+ if (!miss->is_unused()) {
+ Label success;
+ __ B(&success);
+
+ __ Bind(miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ __ Bind(&success);
+ }
+}
+
+
+void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
+ if (!miss->is_unused()) {
+ Label success;
+ __ B(&success);
+
+ GenerateRestoreName(miss, name);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ __ Bind(&success);
+ }
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
+ // Return the constant value.
+ __ LoadObject(x0, value);
+ __ Ret();
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadCallback(
+ Register reg, Handle<ExecutableAccessorInfo> callback) {
+ DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), reg));
+
+ // Build ExecutableAccessorInfo::args_ list on the stack and push property
+ // name below the exit frame to make GC aware of them and store pointers to
+ // them.
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
+
+ __ Push(receiver());
+
+ if (heap()->InNewSpace(callback->data())) {
+ __ Mov(scratch3(), Operand(callback));
+ __ Ldr(scratch3(),
+ FieldMemOperand(scratch3(), ExecutableAccessorInfo::kDataOffset));
+ } else {
+ __ Mov(scratch3(), Operand(Handle<Object>(callback->data(), isolate())));
+ }
+ __ LoadRoot(scratch4(), Heap::kUndefinedValueRootIndex);
+ __ Mov(scratch2(), Operand(ExternalReference::isolate_address(isolate())));
+ __ Push(scratch3(), scratch4(), scratch4(), scratch2(), reg, name());
+
+ Register args_addr = scratch2();
+ __ Add(args_addr, __ StackPointer(), kPointerSize);
+
+ // Stack at this point:
+ // sp[40] callback data
+ // sp[32] undefined
+ // sp[24] undefined
+ // sp[16] isolate
+ // args_addr -> sp[8] reg
+ // sp[0] name
+
+ // Abi for CallApiGetter.
+ Register getter_address_reg = x2;
+
+ // Set up the call.
+ Address getter_address = v8::ToCData<Address>(callback->getter());
+ ApiFunction fun(getter_address);
+ ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
+ ExternalReference ref = ExternalReference(&fun, type, isolate());
+ __ Mov(getter_address_reg, ref);
+
+ CallApiGetterStub stub(isolate());
+ __ TailCallStub(&stub);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
+ LookupIterator* it, Register holder_reg) {
+ DCHECK(!AreAliased(receiver(), this->name(), scratch1(), scratch2(),
+ scratch3()));
+ DCHECK(holder()->HasNamedInterceptor());
+ DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+
+ // Compile the interceptor call, followed by inline code to load the
+ // property from further up the prototype chain if the call fails.
+ // Check that the maps haven't changed.
+ DCHECK(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
+
+ // Preserve the receiver register explicitly whenever it is different from the
+ // holder and it is needed should the interceptor return without any result.
+ // The ACCESSOR case needs the receiver to be passed into C++ code, the FIELD
+ // case might cause a miss during the prototype check.
+ bool must_perform_prototype_check =
+ !holder().is_identical_to(it->GetHolder<JSObject>());
+ bool must_preserve_receiver_reg =
+ !receiver().is(holder_reg) &&
+ (it->property_kind() == LookupIterator::ACCESSOR ||
+ must_perform_prototype_check);
+
+ // Save necessary data before invoking an interceptor.
+ // Requires a frame to make GC aware of pushed pointers.
+ {
+ FrameScope frame_scope(masm(), StackFrame::INTERNAL);
+ if (must_preserve_receiver_reg) {
+ __ Push(receiver(), holder_reg, this->name());
+ } else {
+ __ Push(holder_reg, this->name());
+ }
+ // Invoke an interceptor. Note: map checks from receiver to
+ // interceptor's holder has been compiled before (see a caller
+ // of this method.)
+ CompileCallLoadPropertyWithInterceptor(
+ masm(), receiver(), holder_reg, this->name(), holder(),
+ IC::kLoadPropertyWithInterceptorOnly);
+
+ // Check if interceptor provided a value for property. If it's
+ // the case, return immediately.
+ Label interceptor_failed;
+ __ JumpIfRoot(x0, Heap::kNoInterceptorResultSentinelRootIndex,
+ &interceptor_failed);
+ frame_scope.GenerateLeaveFrame();
+ __ Ret();
+
+ __ Bind(&interceptor_failed);
+ if (must_preserve_receiver_reg) {
+ __ Pop(this->name(), holder_reg, receiver());
+ } else {
+ __ Pop(this->name(), holder_reg);
+ }
+ // Leave the internal frame.
+ }
+
+ GenerateLoadPostInterceptor(it, holder_reg);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
+ // Call the runtime system to load the interceptor.
+ DCHECK(holder()->HasNamedInterceptor());
+ DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+ PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
+ holder());
+
+ ExternalReference ref = ExternalReference(
+ IC_Utility(IC::kLoadPropertyWithInterceptor), isolate());
+ __ TailCallExternalReference(
+ ref, NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
+}
+
+
+Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
+ Handle<JSObject> object, Handle<Name> name,
+ Handle<ExecutableAccessorInfo> callback) {
+ ASM_LOCATION("NamedStoreHandlerCompiler::CompileStoreCallback");
+ Register holder_reg = Frontend(receiver(), name);
+
+ // Stub never generated for non-global objects that require access checks.
+ DCHECK(holder()->IsJSGlobalProxy() || !holder()->IsAccessCheckNeeded());
+
+ // receiver() and holder_reg can alias.
+ DCHECK(!AreAliased(receiver(), scratch1(), scratch2(), value()));
+ DCHECK(!AreAliased(holder_reg, scratch1(), scratch2(), value()));
+ __ Mov(scratch1(), Operand(callback));
+ __ Mov(scratch2(), Operand(name));
+ __ Push(receiver(), holder_reg, scratch1(), scratch2(), value());
+
+ // Do tail-call to the runtime system.
+ ExternalReference store_callback_property =
+ ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
+ __ TailCallExternalReference(store_callback_property, 5, 1);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
+ MacroAssembler* masm, Handle<HeapType> type, Register receiver,
+ Handle<JSFunction> setter) {
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -----------------------------------
+ Label miss;
+
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Save value register, so we can restore it later.
+ __ Push(value());
+
+ if (!setter.is_null()) {
+ // Call the JavaScript setter with receiver and value on the stack.
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ Ldr(receiver,
+ FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+ }
+ __ Push(receiver, value());
+ ParameterCount actual(1);
+ ParameterCount expected(setter);
+ __ InvokeFunction(setter, expected, actual, CALL_FUNCTION,
+ NullCallWrapper());
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // We have to return the passed value, not the return value of the setter.
+ __ Pop(x0);
+
+ // Restore context register.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ }
+ __ Ret();
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
+ Handle<Name> name) {
+ Label miss;
+
+ ASM_LOCATION("NamedStoreHandlerCompiler::CompileStoreInterceptor");
+
+ __ Push(receiver(), this->name(), value());
+
+ // Do tail-call to the runtime system.
+ ExternalReference store_ic_property = ExternalReference(
+ IC_Utility(IC::kStorePropertyWithInterceptor), isolate());
+ __ TailCallExternalReference(store_ic_property, 3, 1);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+// TODO(all): The so-called scratch registers are significant in some cases. For
+// example, PropertyAccessCompiler::keyed_store_calling_convention()[3] (x3) is
+// actually
+// used for KeyedStoreCompiler::transition_map(). We should verify which
+// registers are actually scratch registers, and which are important. For now,
+// we use the same assignments as ARM to remain on the safe side.
+
+Register* PropertyAccessCompiler::load_calling_convention() {
+ // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ Register receiver = LoadIC::ReceiverRegister();
+ Register name = LoadIC::NameRegister();
+ static Register registers[] = {receiver, name, x3, x0, x4, x5};
+ return registers;
+}
+
+
+Register* PropertyAccessCompiler::store_calling_convention() {
+ // receiver, value, scratch1, scratch2, scratch3.
+ Register receiver = StoreIC::ReceiverRegister();
+ Register name = StoreIC::NameRegister();
+ DCHECK(x3.is(KeyedStoreIC::MapRegister()));
+ static Register registers[] = {receiver, name, x3, x4, x5};
+ return registers;
+}
+
+
+Register NamedStoreHandlerCompiler::value() { return StoreIC::ValueRegister(); }
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
+ MacroAssembler* masm, Handle<HeapType> type, Register receiver,
+ Handle<JSFunction> getter) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ if (!getter.is_null()) {
+ // Call the JavaScript getter with the receiver on the stack.
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ Ldr(receiver,
+ FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+ }
+ __ Push(receiver);
+ ParameterCount actual(0);
+ ParameterCount expected(getter);
+ __ InvokeFunction(getter, expected, actual, CALL_FUNCTION,
+ NullCallWrapper());
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // Restore context register.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ }
+ __ Ret();
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
+ Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
+ Label miss;
+ FrontendHeader(receiver(), name, &miss);
+
+ // Get the value from the cell.
+ Register result = StoreIC::ValueRegister();
+ __ Mov(result, Operand(cell));
+ __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
+
+ // Check for deleted property if property can actually be deleted.
+ if (is_configurable) {
+ __ JumpIfRoot(result, Heap::kTheHoleValueRootIndex, &miss);
+ }
+
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->named_load_global_stub(), 1, x1, x3);
+ __ Ret();
+
+ FrontendFooter(name, &miss);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::NORMAL, name);
+}
+
+
+Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
+ CodeHandleList* handlers,
+ Handle<Name> name,
+ Code::StubType type,
+ IcCheckType check) {
+ Label miss;
+
+ if (check == PROPERTY &&
+ (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
+ // In case we are compiling an IC for dictionary loads and stores, just
+ // check whether the name is unique.
+ if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
+ __ JumpIfNotUniqueName(this->name(), &miss);
+ } else {
+ __ CompareAndBranch(this->name(), Operand(name), ne, &miss);
+ }
+ }
+
+ Label number_case;
+ Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
+ __ JumpIfSmi(receiver(), smi_target);
+
+ // Polymorphic keyed stores may use the map register
+ Register map_reg = scratch1();
+ DCHECK(kind() != Code::KEYED_STORE_IC ||
+ map_reg.is(KeyedStoreIC::MapRegister()));
+ __ Ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
+ int receiver_count = types->length();
+ int number_of_handled_maps = 0;
+ for (int current = 0; current < receiver_count; ++current) {
+ Handle<HeapType> type = types->at(current);
+ Handle<Map> map = IC::TypeToMap(*type, isolate());
+ if (!map->is_deprecated()) {
+ number_of_handled_maps++;
+ Label try_next;
+ __ Cmp(map_reg, Operand(map));
+ __ B(ne, &try_next);
+ if (type->Is(HeapType::Number())) {
+ DCHECK(!number_case.is_unused());
+ __ Bind(&number_case);
+ }
+ __ Jump(handlers->at(current), RelocInfo::CODE_TARGET);
+ __ Bind(&try_next);
+ }
+ }
+ DCHECK(number_of_handled_maps != 0);
+
+ __ Bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ // Return the generated code.
+ InlineCacheState state =
+ (number_of_handled_maps > 1) ? POLYMORPHIC : MONOMORPHIC;
+ return GetCode(kind(), type, name, state);
+}
+
+
+Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
+ MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
+ MapHandleList* transitioned_maps) {
+ Label miss;
+
+ ASM_LOCATION("PropertyICCompiler::CompileStorePolymorphic");
+
+ __ JumpIfSmi(receiver(), &miss);
+
+ int receiver_count = receiver_maps->length();
+ __ Ldr(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
+ for (int i = 0; i < receiver_count; i++) {
+ __ Cmp(scratch1(), Operand(receiver_maps->at(i)));
+
+ Label skip;
+ __ B(&skip, ne);
+ if (!transitioned_maps->at(i).is_null()) {
+ // This argument is used by the handler stub. For example, see
+ // ElementsTransitionGenerator::GenerateMapChangeElementsTransition.
+ __ Mov(transition_map(), Operand(transitioned_maps->at(i)));
+ }
+ __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
+ __ Bind(&skip);
+ }
+
+ __ Bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+void ElementHandlerCompiler::GenerateLoadDictionaryElement(
+ MacroAssembler* masm) {
+ // The return address is in lr.
+ Label slow, miss;
+
+ Register result = x0;
+ Register key = LoadIC::NameRegister();
+ Register receiver = LoadIC::ReceiverRegister();
+ DCHECK(receiver.is(x1));
+ DCHECK(key.is(x2));
+
+ __ JumpIfNotSmi(key, &miss);
+ __ Ldr(x4, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ LoadFromNumberDictionary(&slow, x4, key, result, x7, x3, x5, x6);
+ __ Ret();
+
+ __ Bind(&slow);
+ __ IncrementCounter(
+ masm->isolate()->counters()->keyed_load_external_array_slow(), 1, x4, x3);
+ TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
+
+ // Miss case, call the runtime.
+ __ Bind(&miss);
+ TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
+}
+
+#undef __
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "src/codegen.h"
+#include "src/ic/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define __ ACCESS_MASM(masm)
+
+
+// Probe primary or secondary table.
+// If the entry is found in the cache, the generated code jump to the first
+// instruction of the stub in the cache.
+// If there is a miss the code fall trough.
+//
+// 'receiver', 'name' and 'offset' registers are preserved on miss.
+static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
+ Code::Flags flags, StubCache::Table table,
+ Register receiver, Register name, Register offset,
+ Register scratch, Register scratch2, Register scratch3) {
+ // Some code below relies on the fact that the Entry struct contains
+ // 3 pointers (name, code, map).
+ STATIC_ASSERT(sizeof(StubCache::Entry) == (3 * kPointerSize));
+
+ ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
+ ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
+ ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
+
+ uintptr_t key_off_addr = reinterpret_cast<uintptr_t>(key_offset.address());
+ uintptr_t value_off_addr =
+ reinterpret_cast<uintptr_t>(value_offset.address());
+ uintptr_t map_off_addr = reinterpret_cast<uintptr_t>(map_offset.address());
+
+ Label miss;
+
+ DCHECK(!AreAliased(name, offset, scratch, scratch2, scratch3));
+
+ // Multiply by 3 because there are 3 fields per entry.
+ __ Add(scratch3, offset, Operand(offset, LSL, 1));
+
+ // Calculate the base address of the entry.
+ __ Mov(scratch, key_offset);
+ __ Add(scratch, scratch, Operand(scratch3, LSL, kPointerSizeLog2));
+
+ // Check that the key in the entry matches the name.
+ __ Ldr(scratch2, MemOperand(scratch));
+ __ Cmp(name, scratch2);
+ __ B(ne, &miss);
+
+ // Check the map matches.
+ __ Ldr(scratch2, MemOperand(scratch, map_off_addr - key_off_addr));
+ __ Ldr(scratch3, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Cmp(scratch2, scratch3);
+ __ B(ne, &miss);
+
+ // Get the code entry from the cache.
+ __ Ldr(scratch, MemOperand(scratch, value_off_addr - key_off_addr));
+
+ // Check that the flags match what we're looking for.
+ __ Ldr(scratch2.W(), FieldMemOperand(scratch, Code::kFlagsOffset));
+ __ Bic(scratch2.W(), scratch2.W(), Code::kFlagsNotUsedInLookup);
+ __ Cmp(scratch2.W(), flags);
+ __ B(ne, &miss);
+
+#ifdef DEBUG
+ if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+ __ B(&miss);
+ } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+ __ B(&miss);
+ }
+#endif
+
+ // Jump to the first instruction in the code stub.
+ __ Add(scratch, scratch, Code::kHeaderSize - kHeapObjectTag);
+ __ Br(scratch);
+
+ // Miss: fall through.
+ __ Bind(&miss);
+}
+
+
+void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
+ Register receiver, Register name,
+ Register scratch, Register extra, Register extra2,
+ Register extra3) {
+ Isolate* isolate = masm->isolate();
+ Label miss;
+
+ // Make sure the flags does not name a specific type.
+ DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
+
+ // Make sure that there are no register conflicts.
+ DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
+
+ // Make sure extra and extra2 registers are valid.
+ DCHECK(!extra.is(no_reg));
+ DCHECK(!extra2.is(no_reg));
+ DCHECK(!extra3.is(no_reg));
+
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
+ extra3);
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &miss);
+
+ // Compute the hash for primary table.
+ __ Ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
+ __ Ldr(extra, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Add(scratch, scratch, extra);
+ __ Eor(scratch, scratch, flags);
+ // We shift out the last two bits because they are not part of the hash.
+ __ Ubfx(scratch, scratch, kCacheIndexShift,
+ CountTrailingZeros(kPrimaryTableSize, 64));
+
+ // Probe the primary table.
+ ProbeTable(isolate, masm, flags, kPrimary, receiver, name, scratch, extra,
+ extra2, extra3);
+
+ // Primary miss: Compute hash for secondary table.
+ __ Sub(scratch, scratch, Operand(name, LSR, kCacheIndexShift));
+ __ Add(scratch, scratch, flags >> kCacheIndexShift);
+ __ And(scratch, scratch, kSecondaryTableSize - 1);
+
+ // Probe the secondary table.
+ ProbeTable(isolate, masm, flags, kSecondary, receiver, name, scratch, extra,
+ extra2, extra3);
+
+ // Cache miss: Fall-through and let caller handle the miss by
+ // entering the runtime system.
+ __ Bind(&miss);
+ __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2,
+ extra3);
+}
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM64
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_IA32
+
+#include "src/ic/ic-compiler.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
+ MacroAssembler* masm, Label* miss_label, Register receiver,
+ Handle<Name> name, Register scratch0, Register scratch1) {
+ DCHECK(name->IsUniqueName());
+ DCHECK(!receiver.is(scratch0));
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->negative_lookups(), 1);
+ __ IncrementCounter(counters->negative_lookups_miss(), 1);
+
+ __ mov(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
+
+ const int kInterceptorOrAccessCheckNeededMask =
+ (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
+
+ // Bail out if the receiver has a named interceptor or requires access checks.
+ __ test_b(FieldOperand(scratch0, Map::kBitFieldOffset),
+ kInterceptorOrAccessCheckNeededMask);
+ __ j(not_zero, miss_label);
+
+ // Check that receiver is a JSObject.
+ __ CmpInstanceType(scratch0, FIRST_SPEC_OBJECT_TYPE);
+ __ j(below, miss_label);
+
+ // Load properties array.
+ Register properties = scratch0;
+ __ mov(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
+
+ // Check that the properties array is a dictionary.
+ __ cmp(FieldOperand(properties, HeapObject::kMapOffset),
+ Immediate(masm->isolate()->factory()->hash_table_map()));
+ __ j(not_equal, miss_label);
+
+ Label done;
+ NameDictionaryLookupStub::GenerateNegativeLookup(masm, miss_label, &done,
+ properties, name, scratch1);
+ __ bind(&done);
+ __ DecrementCounter(counters->negative_lookups_miss(), 1);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
+ MacroAssembler* masm, int index, Register prototype, Label* miss) {
+ // Get the global function with the given index.
+ Handle<JSFunction> function(
+ JSFunction::cast(masm->isolate()->native_context()->get(index)));
+ // Check we're still in the same context.
+ Register scratch = prototype;
+ const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
+ __ mov(scratch, Operand(esi, offset));
+ __ mov(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
+ __ cmp(Operand(scratch, Context::SlotOffset(index)), function);
+ __ j(not_equal, miss);
+
+ // Load its initial map. The global functions all have initial maps.
+ __ Move(prototype, Immediate(Handle<Map>(function->initial_map())));
+ // Load the prototype from the initial map.
+ __ mov(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
+ MacroAssembler* masm, Register receiver, Register scratch1,
+ Register scratch2, Label* miss_label) {
+ __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
+ __ mov(eax, scratch1);
+ __ ret(0);
+}
+
+
+static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
+ Register holder, Register name,
+ Handle<JSObject> holder_obj) {
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex == 1);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 2);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 3);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 4);
+ __ push(name);
+ Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
+ DCHECK(!masm->isolate()->heap()->InNewSpace(*interceptor));
+ Register scratch = name;
+ __ mov(scratch, Immediate(interceptor));
+ __ push(scratch);
+ __ push(receiver);
+ __ push(holder);
+}
+
+
+static void CompileCallLoadPropertyWithInterceptor(
+ MacroAssembler* masm, Register receiver, Register holder, Register name,
+ Handle<JSObject> holder_obj, IC::UtilityId id) {
+ PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+ __ CallExternalReference(ExternalReference(IC_Utility(id), masm->isolate()),
+ NamedLoadHandlerCompiler::kInterceptorArgsLength);
+}
+
+
+// Generate call to api function.
+// This function uses push() to generate smaller, faster code than
+// the version above. It is an optimization that should will be removed
+// when api call ICs are generated in hydrogen.
+void PropertyHandlerCompiler::GenerateFastApiCall(
+ MacroAssembler* masm, const CallOptimization& optimization,
+ Handle<Map> receiver_map, Register receiver, Register scratch_in,
+ bool is_store, int argc, Register* values) {
+ // Copy return value.
+ __ pop(scratch_in);
+ // receiver
+ __ push(receiver);
+ // Write the arguments to stack frame.
+ for (int i = 0; i < argc; i++) {
+ Register arg = values[argc - 1 - i];
+ DCHECK(!receiver.is(arg));
+ DCHECK(!scratch_in.is(arg));
+ __ push(arg);
+ }
+ __ push(scratch_in);
+ // Stack now matches JSFunction abi.
+ DCHECK(optimization.is_simple_api_call());
+
+ // Abi for CallApiFunctionStub.
+ Register callee = eax;
+ Register call_data = ebx;
+ Register holder = ecx;
+ Register api_function_address = edx;
+ Register scratch = edi; // scratch_in is no longer valid.
+
+ // Put holder in place.
+ CallOptimization::HolderLookup holder_lookup;
+ Handle<JSObject> api_holder =
+ optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
+ switch (holder_lookup) {
+ case CallOptimization::kHolderIsReceiver:
+ __ Move(holder, receiver);
+ break;
+ case CallOptimization::kHolderFound:
+ __ LoadHeapObject(holder, api_holder);
+ break;
+ case CallOptimization::kHolderNotFound:
+ UNREACHABLE();
+ break;
+ }
+
+ Isolate* isolate = masm->isolate();
+ Handle<JSFunction> function = optimization.constant_function();
+ Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+ Handle<Object> call_data_obj(api_call_info->data(), isolate);
+
+ // Put callee in place.
+ __ LoadHeapObject(callee, function);
+
+ bool call_data_undefined = false;
+ // Put call_data in place.
+ if (isolate->heap()->InNewSpace(*call_data_obj)) {
+ __ mov(scratch, api_call_info);
+ __ mov(call_data, FieldOperand(scratch, CallHandlerInfo::kDataOffset));
+ } else if (call_data_obj->IsUndefined()) {
+ call_data_undefined = true;
+ __ mov(call_data, Immediate(isolate->factory()->undefined_value()));
+ } else {
+ __ mov(call_data, call_data_obj);
+ }
+
+ // Put api_function_address in place.
+ Address function_address = v8::ToCData<Address>(api_call_info->callback());
+ __ mov(api_function_address, Immediate(function_address));
+
+ // Jump to stub.
+ CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc);
+ __ TailCallStub(&stub);
+}
+
+
+// Generate code to check that a global property cell is empty. Create
+// the property cell at compilation time if no cell exists for the
+// property.
+void PropertyHandlerCompiler::GenerateCheckPropertyCell(
+ MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
+ Register scratch, Label* miss) {
+ Handle<PropertyCell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
+ DCHECK(cell->value()->IsTheHole());
+ Handle<Oddball> the_hole = masm->isolate()->factory()->the_hole_value();
+ if (masm->serializer_enabled()) {
+ __ mov(scratch, Immediate(cell));
+ __ cmp(FieldOperand(scratch, PropertyCell::kValueOffset),
+ Immediate(the_hole));
+ } else {
+ __ cmp(Operand::ForCell(cell), Immediate(the_hole));
+ }
+ __ j(not_equal, miss);
+}
+
+
+void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
+ Handle<Code> code) {
+ __ jmp(code, RelocInfo::CODE_TARGET);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
+ Handle<Name> name) {
+ if (!label->is_unused()) {
+ __ bind(label);
+ __ mov(this->name(), Immediate(name));
+ }
+}
+
+
+// Receiver_reg is preserved on jumps to miss_label, but may be destroyed if
+// store is successful.
+void NamedStoreHandlerCompiler::GenerateStoreTransition(
+ Handle<Map> transition, Handle<Name> name, Register receiver_reg,
+ Register storage_reg, Register value_reg, Register scratch1,
+ Register scratch2, Register unused, Label* miss_label, Label* slow) {
+ int descriptor = transition->LastAdded();
+ DescriptorArray* descriptors = transition->instance_descriptors();
+ PropertyDetails details = descriptors->GetDetails(descriptor);
+ Representation representation = details.representation();
+ DCHECK(!representation.IsNone());
+
+ if (details.type() == CONSTANT) {
+ Handle<Object> constant(descriptors->GetValue(descriptor), isolate());
+ __ CmpObject(value_reg, constant);
+ __ j(not_equal, miss_label);
+ } else if (representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_label);
+ } else if (representation.IsHeapObject()) {
+ __ JumpIfSmi(value_reg, miss_label);
+ HeapType* field_type = descriptors->GetFieldType(descriptor);
+ HeapType::Iterator<Map> it = field_type->Classes();
+ if (!it.Done()) {
+ Label do_store;
+ while (true) {
+ __ CompareMap(value_reg, it.Current());
+ it.Advance();
+ if (it.Done()) {
+ __ j(not_equal, miss_label);
+ break;
+ }
+ __ j(equal, &do_store, Label::kNear);
+ }
+ __ bind(&do_store);
+ }
+ } else if (representation.IsDouble()) {
+ Label do_store, heap_number;
+ __ AllocateHeapNumber(storage_reg, scratch1, scratch2, slow, MUTABLE);
+
+ __ JumpIfNotSmi(value_reg, &heap_number);
+ __ SmiUntag(value_reg);
+ __ Cvtsi2sd(xmm0, value_reg);
+ __ SmiTag(value_reg);
+ __ jmp(&do_store);
+
+ __ bind(&heap_number);
+ __ CheckMap(value_reg, isolate()->factory()->heap_number_map(), miss_label,
+ DONT_DO_SMI_CHECK);
+ __ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
+
+ __ bind(&do_store);
+ __ movsd(FieldOperand(storage_reg, HeapNumber::kValueOffset), xmm0);
+ }
+
+ // Stub never generated for objects that require access checks.
+ DCHECK(!transition->is_access_check_needed());
+
+ // Perform map transition for the receiver if necessary.
+ if (details.type() == FIELD &&
+ Map::cast(transition->GetBackPointer())->unused_property_fields() == 0) {
+ // The properties must be extended before we can store the value.
+ // We jump to a runtime call that extends the properties array.
+ __ pop(scratch1); // Return address.
+ __ push(receiver_reg);
+ __ push(Immediate(transition));
+ __ push(value_reg);
+ __ push(scratch1);
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
+ isolate()),
+ 3, 1);
+ return;
+ }
+
+ // Update the map of the object.
+ __ mov(scratch1, Immediate(transition));
+ __ mov(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
+
+ // Update the write barrier for the map field.
+ __ RecordWriteField(receiver_reg, HeapObject::kMapOffset, scratch1, scratch2,
+ kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ if (details.type() == CONSTANT) {
+ DCHECK(value_reg.is(eax));
+ __ ret(0);
+ return;
+ }
+
+ int index = transition->instance_descriptors()->GetFieldIndex(
+ transition->LastAdded());
+
+ // Adjust for the number of properties stored in the object. Even in the
+ // face of a transition we can use the old map here because the size of the
+ // object and the number of in-object properties is not going to change.
+ index -= transition->inobject_properties();
+
+ SmiCheck smi_check =
+ representation.IsTagged() ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
+ // TODO(verwaest): Share this code as a code stub.
+ if (index < 0) {
+ // Set the property straight into the object.
+ int offset = transition->instance_size() + (index * kPointerSize);
+ if (representation.IsDouble()) {
+ __ mov(FieldOperand(receiver_reg, offset), storage_reg);
+ } else {
+ __ mov(FieldOperand(receiver_reg, offset), value_reg);
+ }
+
+ if (!representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ if (!representation.IsDouble()) {
+ __ mov(storage_reg, value_reg);
+ }
+ __ RecordWriteField(receiver_reg, offset, storage_reg, scratch1,
+ kDontSaveFPRegs, EMIT_REMEMBERED_SET, smi_check);
+ }
+ } else {
+ // Write to the properties array.
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ // Get the properties array (optimistically).
+ __ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
+ if (representation.IsDouble()) {
+ __ mov(FieldOperand(scratch1, offset), storage_reg);
+ } else {
+ __ mov(FieldOperand(scratch1, offset), value_reg);
+ }
+
+ if (!representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ if (!representation.IsDouble()) {
+ __ mov(storage_reg, value_reg);
+ }
+ __ RecordWriteField(scratch1, offset, storage_reg, receiver_reg,
+ kDontSaveFPRegs, EMIT_REMEMBERED_SET, smi_check);
+ }
+ }
+
+ // Return the value (register eax).
+ DCHECK(value_reg.is(eax));
+ __ ret(0);
+}
+
+
+void NamedStoreHandlerCompiler::GenerateStoreField(LookupIterator* lookup,
+ Register value_reg,
+ Label* miss_label) {
+ DCHECK(lookup->representation().IsHeapObject());
+ __ JumpIfSmi(value_reg, miss_label);
+ HeapType::Iterator<Map> it = lookup->GetFieldType()->Classes();
+ Label do_store;
+ while (true) {
+ __ CompareMap(value_reg, it.Current());
+ it.Advance();
+ if (it.Done()) {
+ __ j(not_equal, miss_label);
+ break;
+ }
+ __ j(equal, &do_store, Label::kNear);
+ }
+ __ bind(&do_store);
+
+ StoreFieldStub stub(isolate(), lookup->GetFieldIndex(),
+ lookup->representation());
+ GenerateTailCall(masm(), stub.GetCode());
+}
+
+
+Register PropertyHandlerCompiler::CheckPrototypes(
+ Register object_reg, Register holder_reg, Register scratch1,
+ Register scratch2, Handle<Name> name, Label* miss,
+ PrototypeCheckType check) {
+ Handle<Map> receiver_map(IC::TypeToMap(*type(), isolate()));
+
+ // Make sure there's no overlap between holder and object registers.
+ DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
+ DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) &&
+ !scratch2.is(scratch1));
+
+ // Keep track of the current object in register reg.
+ Register reg = object_reg;
+ int depth = 0;
+
+ Handle<JSObject> current = Handle<JSObject>::null();
+ if (type()->IsConstant())
+ current = Handle<JSObject>::cast(type()->AsConstant()->Value());
+ Handle<JSObject> prototype = Handle<JSObject>::null();
+ Handle<Map> current_map = receiver_map;
+ Handle<Map> holder_map(holder()->map());
+ // Traverse the prototype chain and check the maps in the prototype chain for
+ // fast and global objects or do negative lookup for normal objects.
+ while (!current_map.is_identical_to(holder_map)) {
+ ++depth;
+
+ // Only global objects and objects that do not require access
+ // checks are allowed in stubs.
+ DCHECK(current_map->IsJSGlobalProxyMap() ||
+ !current_map->is_access_check_needed());
+
+ prototype = handle(JSObject::cast(current_map->prototype()));
+ if (current_map->is_dictionary_map() &&
+ !current_map->IsJSGlobalObjectMap()) {
+ DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
+ if (!name->IsUniqueName()) {
+ DCHECK(name->IsString());
+ name = factory()->InternalizeString(Handle<String>::cast(name));
+ }
+ DCHECK(current.is_null() ||
+ current->property_dictionary()->FindEntry(name) ==
+ NameDictionary::kNotFound);
+
+ GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
+ scratch2);
+
+ __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
+ reg = holder_reg; // From now on the object will be in holder_reg.
+ __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
+ } else {
+ bool in_new_space = heap()->InNewSpace(*prototype);
+ // Two possible reasons for loading the prototype from the map:
+ // (1) Can't store references to new space in code.
+ // (2) Handler is shared for all receivers with the same prototype
+ // map (but not necessarily the same prototype instance).
+ bool load_prototype_from_map = in_new_space || depth == 1;
+ if (depth != 1 || check == CHECK_ALL_MAPS) {
+ __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
+ }
+
+ // Check access rights to the global object. This has to happen after
+ // the map check so that we know that the object is actually a global
+ // object.
+ // This allows us to install generated handlers for accesses to the
+ // global proxy (as opposed to using slow ICs). See corresponding code
+ // in LookupForRead().
+ if (current_map->IsJSGlobalProxyMap()) {
+ __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
+ } else if (current_map->IsJSGlobalObjectMap()) {
+ GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
+ name, scratch2, miss);
+ }
+
+ if (load_prototype_from_map) {
+ // Save the map in scratch1 for later.
+ __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
+ }
+
+ reg = holder_reg; // From now on the object will be in holder_reg.
+
+ if (load_prototype_from_map) {
+ __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
+ } else {
+ __ mov(reg, prototype);
+ }
+ }
+
+ // Go to the next object in the prototype chain.
+ current = prototype;
+ current_map = handle(current->map());
+ }
+
+ // Log the check depth.
+ LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
+
+ if (depth != 0 || check == CHECK_ALL_MAPS) {
+ // Check the holder map.
+ __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
+ }
+
+ // Perform security check for access to the global object.
+ DCHECK(current_map->IsJSGlobalProxyMap() ||
+ !current_map->is_access_check_needed());
+ if (current_map->IsJSGlobalProxyMap()) {
+ __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
+ }
+
+ // Return the register containing the holder.
+ return reg;
+}
+
+
+void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
+ if (!miss->is_unused()) {
+ Label success;
+ __ jmp(&success);
+ __ bind(miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+ __ bind(&success);
+ }
+}
+
+
+void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
+ if (!miss->is_unused()) {
+ Label success;
+ __ jmp(&success);
+ GenerateRestoreName(miss, name);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+ __ bind(&success);
+ }
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadCallback(
+ Register reg, Handle<ExecutableAccessorInfo> callback) {
+ // Insert additional parameters into the stack frame above return address.
+ DCHECK(!scratch3().is(reg));
+ __ pop(scratch3()); // Get return address to place it below.
+
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
+ __ push(receiver()); // receiver
+ // Push data from ExecutableAccessorInfo.
+ if (isolate()->heap()->InNewSpace(callback->data())) {
+ DCHECK(!scratch2().is(reg));
+ __ mov(scratch2(), Immediate(callback));
+ __ push(FieldOperand(scratch2(), ExecutableAccessorInfo::kDataOffset));
+ } else {
+ __ push(Immediate(Handle<Object>(callback->data(), isolate())));
+ }
+ __ push(Immediate(isolate()->factory()->undefined_value())); // ReturnValue
+ // ReturnValue default value
+ __ push(Immediate(isolate()->factory()->undefined_value()));
+ __ push(Immediate(reinterpret_cast<int>(isolate())));
+ __ push(reg); // holder
+
+ // Save a pointer to where we pushed the arguments. This will be
+ // passed as the const PropertyAccessorInfo& to the C++ callback.
+ __ push(esp);
+
+ __ push(name()); // name
+
+ __ push(scratch3()); // Restore return address.
+
+ // Abi for CallApiGetter
+ Register getter_address = edx;
+ Address function_address = v8::ToCData<Address>(callback->getter());
+ __ mov(getter_address, Immediate(function_address));
+
+ CallApiGetterStub stub(isolate());
+ __ TailCallStub(&stub);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
+ // Return the constant value.
+ __ LoadObject(eax, value);
+ __ ret(0);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
+ LookupIterator* it, Register holder_reg) {
+ DCHECK(holder()->HasNamedInterceptor());
+ DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+
+ // Compile the interceptor call, followed by inline code to load the
+ // property from further up the prototype chain if the call fails.
+ // Check that the maps haven't changed.
+ DCHECK(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
+
+ // Preserve the receiver register explicitly whenever it is different from the
+ // holder and it is needed should the interceptor return without any result.
+ // The ACCESSOR case needs the receiver to be passed into C++ code, the FIELD
+ // case might cause a miss during the prototype check.
+ bool must_perform_prototype_check =
+ !holder().is_identical_to(it->GetHolder<JSObject>());
+ bool must_preserve_receiver_reg =
+ !receiver().is(holder_reg) &&
+ (it->property_kind() == LookupIterator::ACCESSOR ||
+ must_perform_prototype_check);
+
+ // Save necessary data before invoking an interceptor.
+ // Requires a frame to make GC aware of pushed pointers.
+ {
+ FrameScope frame_scope(masm(), StackFrame::INTERNAL);
+
+ if (must_preserve_receiver_reg) {
+ __ push(receiver());
+ }
+ __ push(holder_reg);
+ __ push(this->name());
+
+ // Invoke an interceptor. Note: map checks from receiver to
+ // interceptor's holder has been compiled before (see a caller
+ // of this method.)
+ CompileCallLoadPropertyWithInterceptor(
+ masm(), receiver(), holder_reg, this->name(), holder(),
+ IC::kLoadPropertyWithInterceptorOnly);
+
+ // Check if interceptor provided a value for property. If it's
+ // the case, return immediately.
+ Label interceptor_failed;
+ __ cmp(eax, factory()->no_interceptor_result_sentinel());
+ __ j(equal, &interceptor_failed);
+ frame_scope.GenerateLeaveFrame();
+ __ ret(0);
+
+ // Clobber registers when generating debug-code to provoke errors.
+ __ bind(&interceptor_failed);
+ if (FLAG_debug_code) {
+ __ mov(receiver(), Immediate(BitCast<int32_t>(kZapValue)));
+ __ mov(holder_reg, Immediate(BitCast<int32_t>(kZapValue)));
+ __ mov(this->name(), Immediate(BitCast<int32_t>(kZapValue)));
+ }
+
+ __ pop(this->name());
+ __ pop(holder_reg);
+ if (must_preserve_receiver_reg) {
+ __ pop(receiver());
+ }
+
+ // Leave the internal frame.
+ }
+
+ GenerateLoadPostInterceptor(it, holder_reg);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
+ DCHECK(holder()->HasNamedInterceptor());
+ DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+ // Call the runtime system to load the interceptor.
+ __ pop(scratch2()); // save old return address
+ PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
+ holder());
+ __ push(scratch2()); // restore old return address
+
+ ExternalReference ref = ExternalReference(
+ IC_Utility(IC::kLoadPropertyWithInterceptor), isolate());
+ __ TailCallExternalReference(
+ ref, NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
+}
+
+
+Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
+ Handle<JSObject> object, Handle<Name> name,
+ Handle<ExecutableAccessorInfo> callback) {
+ Register holder_reg = Frontend(receiver(), name);
+
+ __ pop(scratch1()); // remove the return address
+ __ push(receiver());
+ __ push(holder_reg);
+ __ Push(callback);
+ __ Push(name);
+ __ push(value());
+ __ push(scratch1()); // restore return address
+
+ // Do tail-call to the runtime system.
+ ExternalReference store_callback_property =
+ ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
+ __ TailCallExternalReference(store_callback_property, 5, 1);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
+ MacroAssembler* masm, Handle<HeapType> type, Register receiver,
+ Handle<JSFunction> setter) {
+ // ----------- S t a t e -------------
+ // -- esp[0] : return address
+ // -----------------------------------
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Save value register, so we can restore it later.
+ __ push(value());
+
+ if (!setter.is_null()) {
+ // Call the JavaScript setter with receiver and value on the stack.
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ mov(receiver,
+ FieldOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+ }
+ __ push(receiver);
+ __ push(value());
+ ParameterCount actual(1);
+ ParameterCount expected(setter);
+ __ InvokeFunction(setter, expected, actual, CALL_FUNCTION,
+ NullCallWrapper());
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // We have to return the passed value, not the return value of the setter.
+ __ pop(eax);
+
+ // Restore context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ }
+ __ ret(0);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
+ Handle<Name> name) {
+ __ pop(scratch1()); // remove the return address
+ __ push(receiver());
+ __ push(this->name());
+ __ push(value());
+ __ push(scratch1()); // restore return address
+
+ // Do tail-call to the runtime system.
+ ExternalReference store_ic_property = ExternalReference(
+ IC_Utility(IC::kStorePropertyWithInterceptor), isolate());
+ __ TailCallExternalReference(store_ic_property, 3, 1);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
+ MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
+ MapHandleList* transitioned_maps) {
+ Label miss;
+ __ JumpIfSmi(receiver(), &miss, Label::kNear);
+ __ mov(scratch1(), FieldOperand(receiver(), HeapObject::kMapOffset));
+ for (int i = 0; i < receiver_maps->length(); ++i) {
+ __ cmp(scratch1(), receiver_maps->at(i));
+ if (transitioned_maps->at(i).is_null()) {
+ __ j(equal, handler_stubs->at(i));
+ } else {
+ Label next_map;
+ __ j(not_equal, &next_map, Label::kNear);
+ __ mov(transition_map(), Immediate(transitioned_maps->at(i)));
+ __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
+ __ bind(&next_map);
+ }
+ }
+ __ bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ // Return the generated code.
+ return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
+}
+
+
+Register* PropertyAccessCompiler::load_calling_convention() {
+ // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ Register receiver = LoadIC::ReceiverRegister();
+ Register name = LoadIC::NameRegister();
+ static Register registers[] = {receiver, name, ebx, eax, edi, no_reg};
+ return registers;
+}
+
+
+Register* PropertyAccessCompiler::store_calling_convention() {
+ // receiver, name, scratch1, scratch2, scratch3.
+ Register receiver = StoreIC::ReceiverRegister();
+ Register name = StoreIC::NameRegister();
+ DCHECK(ebx.is(KeyedStoreIC::MapRegister()));
+ static Register registers[] = {receiver, name, ebx, edi, no_reg};
+ return registers;
+}
+
+
+Register NamedStoreHandlerCompiler::value() { return StoreIC::ValueRegister(); }
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
+ MacroAssembler* masm, Handle<HeapType> type, Register receiver,
+ Handle<JSFunction> getter) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ if (!getter.is_null()) {
+ // Call the JavaScript getter with the receiver on the stack.
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ mov(receiver,
+ FieldOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+ }
+ __ push(receiver);
+ ParameterCount actual(0);
+ ParameterCount expected(getter);
+ __ InvokeFunction(getter, expected, actual, CALL_FUNCTION,
+ NullCallWrapper());
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // Restore context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ }
+ __ ret(0);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
+ Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
+ Label miss;
+
+ FrontendHeader(receiver(), name, &miss);
+ // Get the value from the cell.
+ Register result = StoreIC::ValueRegister();
+ if (masm()->serializer_enabled()) {
+ __ mov(result, Immediate(cell));
+ __ mov(result, FieldOperand(result, PropertyCell::kValueOffset));
+ } else {
+ __ mov(result, Operand::ForCell(cell));
+ }
+
+ // Check for deleted property if property can actually be deleted.
+ if (is_configurable) {
+ __ cmp(result, factory()->the_hole_value());
+ __ j(equal, &miss);
+ } else if (FLAG_debug_code) {
+ __ cmp(result, factory()->the_hole_value());
+ __ Check(not_equal, kDontDeleteCellsCannotContainTheHole);
+ }
+
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->named_load_global_stub(), 1);
+ // The code above already loads the result into the return register.
+ __ ret(0);
+
+ FrontendFooter(name, &miss);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::NORMAL, name);
+}
+
+
+Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
+ CodeHandleList* handlers,
+ Handle<Name> name,
+ Code::StubType type,
+ IcCheckType check) {
+ Label miss;
+
+ if (check == PROPERTY &&
+ (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
+ // In case we are compiling an IC for dictionary loads and stores, just
+ // check whether the name is unique.
+ if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
+ __ JumpIfNotUniqueName(this->name(), &miss);
+ } else {
+ __ cmp(this->name(), Immediate(name));
+ __ j(not_equal, &miss);
+ }
+ }
+
+ Label number_case;
+ Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
+ __ JumpIfSmi(receiver(), smi_target);
+
+ // Polymorphic keyed stores may use the map register
+ Register map_reg = scratch1();
+ DCHECK(kind() != Code::KEYED_STORE_IC ||
+ map_reg.is(KeyedStoreIC::MapRegister()));
+ __ mov(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
+ int receiver_count = types->length();
+ int number_of_handled_maps = 0;
+ for (int current = 0; current < receiver_count; ++current) {
+ Handle<HeapType> type = types->at(current);
+ Handle<Map> map = IC::TypeToMap(*type, isolate());
+ if (!map->is_deprecated()) {
+ number_of_handled_maps++;
+ __ cmp(map_reg, map);
+ if (type->Is(HeapType::Number())) {
+ DCHECK(!number_case.is_unused());
+ __ bind(&number_case);
+ }
+ __ j(equal, handlers->at(current));
+ }
+ }
+ DCHECK(number_of_handled_maps != 0);
+
+ __ bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ // Return the generated code.
+ InlineCacheState state =
+ number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
+ return GetCode(kind(), type, name, state);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void ElementHandlerCompiler::GenerateLoadDictionaryElement(
+ MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ DCHECK(edx.is(LoadIC::ReceiverRegister()));
+ DCHECK(ecx.is(LoadIC::NameRegister()));
+ Label slow, miss;
+
+ // This stub is meant to be tail-jumped to, the receiver must already
+ // have been verified by the caller to not be a smi.
+ __ JumpIfNotSmi(ecx, &miss);
+ __ mov(ebx, ecx);
+ __ SmiUntag(ebx);
+ __ mov(eax, FieldOperand(edx, JSObject::kElementsOffset));
+
+ // Push receiver on the stack to free up a register for the dictionary
+ // probing.
+ __ push(edx);
+ __ LoadFromNumberDictionary(&slow, eax, ecx, ebx, edx, edi, eax);
+ // Pop receiver before returning.
+ __ pop(edx);
+ __ ret(0);
+
+ __ bind(&slow);
+ __ pop(edx);
+
+ // ----------- S t a t e -------------
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
+
+ __ bind(&miss);
+ // ----------- S t a t e -------------
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
+}
+
+
+#undef __
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_IA32
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_IA32
+
+#include "src/codegen.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+// ----------------------------------------------------------------------------
+// Static IC stub generators.
+//
+
+#define __ ACCESS_MASM(masm)
+
+
+static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
+ Label* global_object) {
+ // Register usage:
+ // type: holds the receiver instance type on entry.
+ __ cmp(type, JS_GLOBAL_OBJECT_TYPE);
+ __ j(equal, global_object);
+ __ cmp(type, JS_BUILTINS_OBJECT_TYPE);
+ __ j(equal, global_object);
+ __ cmp(type, JS_GLOBAL_PROXY_TYPE);
+ __ j(equal, global_object);
+}
+
+
+// Helper function used to load a property from a dictionary backing
+// storage. This function may fail to load a property even though it is
+// in the dictionary, so code at miss_label must always call a backup
+// property load that is complete. This function is safe to call if
+// name is not internalized, and will jump to the miss_label in that
+// case. The generated code assumes that the receiver has slow
+// properties, is not a global object and does not have interceptors.
+static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
+ Register elements, Register name,
+ Register r0, Register r1, Register result) {
+ // Register use:
+ //
+ // elements - holds the property dictionary on entry and is unchanged.
+ //
+ // name - holds the name of the property on entry and is unchanged.
+ //
+ // Scratch registers:
+ //
+ // r0 - used for the index into the property dictionary
+ //
+ // r1 - used to hold the capacity of the property dictionary.
+ //
+ // result - holds the result on exit.
+
+ Label done;
+
+ // Probe the dictionary.
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss_label, &done,
+ elements, name, r0, r1);
+
+ // If probing finds an entry in the dictionary, r0 contains the
+ // index into the dictionary. Check that the value is a normal
+ // property.
+ __ bind(&done);
+ const int kElementsStartOffset =
+ NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+ const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+ __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
+ Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
+ __ j(not_zero, miss_label);
+
+ // Get the value at the masked, scaled index.
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ mov(result, Operand(elements, r0, times_4, kValueOffset - kHeapObjectTag));
+}
+
+
+// Helper function used to store a property to a dictionary backing
+// storage. This function may fail to store a property eventhough it
+// is in the dictionary, so code at miss_label must always call a
+// backup property store that is complete. This function is safe to
+// call if name is not internalized, and will jump to the miss_label in
+// that case. The generated code assumes that the receiver has slow
+// properties, is not a global object and does not have interceptors.
+static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss_label,
+ Register elements, Register name,
+ Register value, Register r0, Register r1) {
+ // Register use:
+ //
+ // elements - holds the property dictionary on entry and is clobbered.
+ //
+ // name - holds the name of the property on entry and is unchanged.
+ //
+ // value - holds the value to store and is unchanged.
+ //
+ // r0 - used for index into the property dictionary and is clobbered.
+ //
+ // r1 - used to hold the capacity of the property dictionary and is clobbered.
+ Label done;
+
+
+ // Probe the dictionary.
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss_label, &done,
+ elements, name, r0, r1);
+
+ // If probing finds an entry in the dictionary, r0 contains the
+ // index into the dictionary. Check that the value is a normal
+ // property that is not read only.
+ __ bind(&done);
+ const int kElementsStartOffset =
+ NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+ const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+ const int kTypeAndReadOnlyMask =
+ (PropertyDetails::TypeField::kMask |
+ PropertyDetails::AttributesField::encode(READ_ONLY))
+ << kSmiTagSize;
+ __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
+ Immediate(kTypeAndReadOnlyMask));
+ __ j(not_zero, miss_label);
+
+ // Store the value at the masked, scaled index.
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ lea(r0, Operand(elements, r0, times_4, kValueOffset - kHeapObjectTag));
+ __ mov(Operand(r0, 0), value);
+
+ // Update write barrier. Make sure not to clobber the value.
+ __ mov(r1, value);
+ __ RecordWrite(elements, r0, r1, kDontSaveFPRegs);
+}
+
+
+// Checks the receiver for special cases (value type, slow case bits).
+// Falls through for regular JS object.
+static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
+ Register receiver, Register map,
+ int interceptor_bit, Label* slow) {
+ // Register use:
+ // receiver - holds the receiver and is unchanged.
+ // Scratch registers:
+ // map - used to hold the map of the receiver.
+
+ // Check that the object isn't a smi.
+ __ JumpIfSmi(receiver, slow);
+
+ // Get the map of the receiver.
+ __ mov(map, FieldOperand(receiver, HeapObject::kMapOffset));
+
+ // Check bit field.
+ __ test_b(FieldOperand(map, Map::kBitFieldOffset),
+ (1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit));
+ __ j(not_zero, slow);
+ // Check that the object is some kind of JS object EXCEPT JS Value type.
+ // In the case that the object is a value-wrapper object,
+ // we enter the runtime system to make sure that indexing
+ // into string objects works as intended.
+ DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
+
+ __ CmpInstanceType(map, JS_OBJECT_TYPE);
+ __ j(below, slow);
+}
+
+
+// Loads an indexed element from a fast case array.
+// If not_fast_array is NULL, doesn't perform the elements map check.
+static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
+ Register key, Register scratch,
+ Register result, Label* not_fast_array,
+ Label* out_of_range) {
+ // Register use:
+ // receiver - holds the receiver and is unchanged.
+ // key - holds the key and is unchanged (must be a smi).
+ // Scratch registers:
+ // scratch - used to hold elements of the receiver and the loaded value.
+ // result - holds the result on exit if the load succeeds and
+ // we fall through.
+
+ __ mov(scratch, FieldOperand(receiver, JSObject::kElementsOffset));
+ if (not_fast_array != NULL) {
+ // Check that the object is in fast mode and writable.
+ __ CheckMap(scratch, masm->isolate()->factory()->fixed_array_map(),
+ not_fast_array, DONT_DO_SMI_CHECK);
+ } else {
+ __ AssertFastElements(scratch);
+ }
+ // Check that the key (index) is within bounds.
+ __ cmp(key, FieldOperand(scratch, FixedArray::kLengthOffset));
+ __ j(above_equal, out_of_range);
+ // Fast case: Do the load.
+ STATIC_ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
+ __ mov(scratch, FieldOperand(scratch, key, times_2, FixedArray::kHeaderSize));
+ __ cmp(scratch, Immediate(masm->isolate()->factory()->the_hole_value()));
+ // In case the loaded value is the_hole we have to consult GetProperty
+ // to ensure the prototype chain is searched.
+ __ j(equal, out_of_range);
+ if (!result.is(scratch)) {
+ __ mov(result, scratch);
+ }
+}
+
+
+// Checks whether a key is an array index string or a unique name.
+// Falls through if the key is a unique name.
+static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
+ Register map, Register hash,
+ Label* index_string, Label* not_unique) {
+ // Register use:
+ // key - holds the key and is unchanged. Assumed to be non-smi.
+ // Scratch registers:
+ // map - used to hold the map of the key.
+ // hash - used to hold the hash of the key.
+ Label unique;
+ __ CmpObjectType(key, LAST_UNIQUE_NAME_TYPE, map);
+ __ j(above, not_unique);
+ STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
+ __ j(equal, &unique);
+
+ // Is the string an array index, with cached numeric value?
+ __ mov(hash, FieldOperand(key, Name::kHashFieldOffset));
+ __ test(hash, Immediate(Name::kContainsCachedArrayIndexMask));
+ __ j(zero, index_string);
+
+ // Is the string internalized? We already know it's a string so a single
+ // bit test is enough.
+ STATIC_ASSERT(kNotInternalizedTag != 0);
+ __ test_b(FieldOperand(map, Map::kInstanceTypeOffset),
+ kIsNotInternalizedMask);
+ __ j(not_zero, not_unique);
+
+ __ bind(&unique);
+}
+
+
+static Operand GenerateMappedArgumentsLookup(
+ MacroAssembler* masm, Register object, Register key, Register scratch1,
+ Register scratch2, Label* unmapped_case, Label* slow_case) {
+ Heap* heap = masm->isolate()->heap();
+ Factory* factory = masm->isolate()->factory();
+
+ // Check that the receiver is a JSObject. Because of the elements
+ // map check later, we do not need to check for interceptors or
+ // whether it requires access checks.
+ __ JumpIfSmi(object, slow_case);
+ // Check that the object is some kind of JSObject.
+ __ CmpObjectType(object, FIRST_JS_RECEIVER_TYPE, scratch1);
+ __ j(below, slow_case);
+
+ // Check that the key is a positive smi.
+ __ test(key, Immediate(0x80000001));
+ __ j(not_zero, slow_case);
+
+ // Load the elements into scratch1 and check its map.
+ Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
+ __ mov(scratch1, FieldOperand(object, JSObject::kElementsOffset));
+ __ CheckMap(scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
+
+ // Check if element is in the range of mapped arguments. If not, jump
+ // to the unmapped lookup with the parameter map in scratch1.
+ __ mov(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
+ __ sub(scratch2, Immediate(Smi::FromInt(2)));
+ __ cmp(key, scratch2);
+ __ j(above_equal, unmapped_case);
+
+ // Load element index and check whether it is the hole.
+ const int kHeaderSize = FixedArray::kHeaderSize + 2 * kPointerSize;
+ __ mov(scratch2,
+ FieldOperand(scratch1, key, times_half_pointer_size, kHeaderSize));
+ __ cmp(scratch2, factory->the_hole_value());
+ __ j(equal, unmapped_case);
+
+ // Load value from context and return it. We can reuse scratch1 because
+ // we do not jump to the unmapped lookup (which requires the parameter
+ // map in scratch1).
+ const int kContextOffset = FixedArray::kHeaderSize;
+ __ mov(scratch1, FieldOperand(scratch1, kContextOffset));
+ return FieldOperand(scratch1, scratch2, times_half_pointer_size,
+ Context::kHeaderSize);
+}
+
+
+static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
+ Register key,
+ Register parameter_map,
+ Register scratch,
+ Label* slow_case) {
+ // Element is in arguments backing store, which is referenced by the
+ // second element of the parameter_map.
+ const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
+ Register backing_store = parameter_map;
+ __ mov(backing_store, FieldOperand(parameter_map, kBackingStoreOffset));
+ Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
+ __ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
+ __ mov(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
+ __ cmp(key, scratch);
+ __ j(greater_equal, slow_case);
+ return FieldOperand(backing_store, key, times_half_pointer_size,
+ FixedArray::kHeaderSize);
+}
+
+
+void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
+ // The return address is on the stack.
+ Label slow, check_name, index_smi, index_name, property_array_property;
+ Label probe_dictionary, check_number_dictionary;
+
+ Register receiver = ReceiverRegister();
+ Register key = NameRegister();
+ DCHECK(receiver.is(edx));
+ DCHECK(key.is(ecx));
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(key, &check_name);
+ __ bind(&index_smi);
+ // Now the key is known to be a smi. This place is also jumped to from
+ // where a numeric string is converted to a smi.
+
+ GenerateKeyedLoadReceiverCheck(masm, receiver, eax,
+ Map::kHasIndexedInterceptor, &slow);
+
+ // Check the receiver's map to see if it has fast elements.
+ __ CheckFastElements(eax, &check_number_dictionary);
+
+ GenerateFastArrayLoad(masm, receiver, key, eax, eax, NULL, &slow);
+ Isolate* isolate = masm->isolate();
+ Counters* counters = isolate->counters();
+ __ IncrementCounter(counters->keyed_load_generic_smi(), 1);
+ __ ret(0);
+
+ __ bind(&check_number_dictionary);
+ __ mov(ebx, key);
+ __ SmiUntag(ebx);
+ __ mov(eax, FieldOperand(receiver, JSObject::kElementsOffset));
+
+ // Check whether the elements is a number dictionary.
+ // ebx: untagged index
+ // eax: elements
+ __ CheckMap(eax, isolate->factory()->hash_table_map(), &slow,
+ DONT_DO_SMI_CHECK);
+ Label slow_pop_receiver;
+ // Push receiver on the stack to free up a register for the dictionary
+ // probing.
+ __ push(receiver);
+ __ LoadFromNumberDictionary(&slow_pop_receiver, eax, key, ebx, edx, edi, eax);
+ // Pop receiver before returning.
+ __ pop(receiver);
+ __ ret(0);
+
+ __ bind(&slow_pop_receiver);
+ // Pop the receiver from the stack and jump to runtime.
+ __ pop(receiver);
+
+ __ bind(&slow);
+ // Slow case: jump to runtime.
+ __ IncrementCounter(counters->keyed_load_generic_slow(), 1);
+ GenerateRuntimeGetProperty(masm);
+
+ __ bind(&check_name);
+ GenerateKeyNameCheck(masm, key, eax, ebx, &index_name, &slow);
+
+ GenerateKeyedLoadReceiverCheck(masm, receiver, eax, Map::kHasNamedInterceptor,
+ &slow);
+
+ // If the receiver is a fast-case object, check the keyed lookup
+ // cache. Otherwise probe the dictionary.
+ __ mov(ebx, FieldOperand(receiver, JSObject::kPropertiesOffset));
+ __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
+ Immediate(isolate->factory()->hash_table_map()));
+ __ j(equal, &probe_dictionary);
+
+ // The receiver's map is still in eax, compute the keyed lookup cache hash
+ // based on 32 bits of the map pointer and the string hash.
+ if (FLAG_debug_code) {
+ __ cmp(eax, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ Check(equal, kMapIsNoLongerInEax);
+ }
+ __ mov(ebx, eax); // Keep the map around for later.
+ __ shr(eax, KeyedLookupCache::kMapHashShift);
+ __ mov(edi, FieldOperand(key, String::kHashFieldOffset));
+ __ shr(edi, String::kHashShift);
+ __ xor_(eax, edi);
+ __ and_(eax, KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
+
+ // Load the key (consisting of map and internalized string) from the cache and
+ // check for match.
+ Label load_in_object_property;
+ static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
+ Label hit_on_nth_entry[kEntriesPerBucket];
+ ExternalReference cache_keys =
+ ExternalReference::keyed_lookup_cache_keys(masm->isolate());
+
+ for (int i = 0; i < kEntriesPerBucket - 1; i++) {
+ Label try_next_entry;
+ __ mov(edi, eax);
+ __ shl(edi, kPointerSizeLog2 + 1);
+ if (i != 0) {
+ __ add(edi, Immediate(kPointerSize * i * 2));
+ }
+ __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
+ __ j(not_equal, &try_next_entry);
+ __ add(edi, Immediate(kPointerSize));
+ __ cmp(key, Operand::StaticArray(edi, times_1, cache_keys));
+ __ j(equal, &hit_on_nth_entry[i]);
+ __ bind(&try_next_entry);
+ }
+
+ __ lea(edi, Operand(eax, 1));
+ __ shl(edi, kPointerSizeLog2 + 1);
+ __ add(edi, Immediate(kPointerSize * (kEntriesPerBucket - 1) * 2));
+ __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
+ __ j(not_equal, &slow);
+ __ add(edi, Immediate(kPointerSize));
+ __ cmp(key, Operand::StaticArray(edi, times_1, cache_keys));
+ __ j(not_equal, &slow);
+
+ // Get field offset.
+ // ebx : receiver's map
+ // eax : lookup cache index
+ ExternalReference cache_field_offsets =
+ ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate());
+
+ // Hit on nth entry.
+ for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
+ __ bind(&hit_on_nth_entry[i]);
+ if (i != 0) {
+ __ add(eax, Immediate(i));
+ }
+ __ mov(edi,
+ Operand::StaticArray(eax, times_pointer_size, cache_field_offsets));
+ __ movzx_b(eax, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
+ __ sub(edi, eax);
+ __ j(above_equal, &property_array_property);
+ if (i != 0) {
+ __ jmp(&load_in_object_property);
+ }
+ }
+
+ // Load in-object property.
+ __ bind(&load_in_object_property);
+ __ movzx_b(eax, FieldOperand(ebx, Map::kInstanceSizeOffset));
+ __ add(eax, edi);
+ __ mov(eax, FieldOperand(receiver, eax, times_pointer_size, 0));
+ __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
+ __ ret(0);
+
+ // Load property array property.
+ __ bind(&property_array_property);
+ __ mov(eax, FieldOperand(receiver, JSObject::kPropertiesOffset));
+ __ mov(eax,
+ FieldOperand(eax, edi, times_pointer_size, FixedArray::kHeaderSize));
+ __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
+ __ ret(0);
+
+ // Do a quick inline probe of the receiver's dictionary, if it
+ // exists.
+ __ bind(&probe_dictionary);
+
+ __ mov(eax, FieldOperand(receiver, JSObject::kMapOffset));
+ __ movzx_b(eax, FieldOperand(eax, Map::kInstanceTypeOffset));
+ GenerateGlobalInstanceTypeCheck(masm, eax, &slow);
+
+ GenerateDictionaryLoad(masm, &slow, ebx, key, eax, edi, eax);
+ __ IncrementCounter(counters->keyed_load_generic_symbol(), 1);
+ __ ret(0);
+
+ __ bind(&index_name);
+ __ IndexFromHash(ebx, key);
+ // Now jump to the place where smi keys are handled.
+ __ jmp(&index_smi);
+}
+
+
+void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
+ // Return address is on the stack.
+ Label miss;
+
+ Register receiver = ReceiverRegister();
+ Register index = NameRegister();
+ Register scratch = ebx;
+ DCHECK(!scratch.is(receiver) && !scratch.is(index));
+ Register result = eax;
+ DCHECK(!result.is(scratch));
+
+ StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ &miss, // When index out of range.
+ STRING_INDEX_IS_ARRAY_INDEX);
+ char_at_generator.GenerateFast(masm);
+ __ ret(0);
+
+ StubRuntimeCallHelper call_helper;
+ char_at_generator.GenerateSlow(masm, call_helper);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
+ // Return address is on the stack.
+ Label slow;
+
+ Register receiver = ReceiverRegister();
+ Register key = NameRegister();
+ Register scratch = eax;
+ DCHECK(!scratch.is(receiver) && !scratch.is(key));
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &slow);
+
+ // Check that the key is an array index, that is Uint32.
+ __ test(key, Immediate(kSmiTagMask | kSmiSignMask));
+ __ j(not_zero, &slow);
+
+ // Get the map of the receiver.
+ __ mov(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
+
+ // Check that it has indexed interceptor and access checks
+ // are not enabled for this object.
+ __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
+ __ and_(scratch, Immediate(kSlowCaseBitFieldMask));
+ __ cmp(scratch, Immediate(1 << Map::kHasIndexedInterceptor));
+ __ j(not_zero, &slow);
+
+ // Everything is fine, call runtime.
+ __ pop(scratch);
+ __ push(receiver); // receiver
+ __ push(key); // key
+ __ push(scratch); // return address
+
+ // Perform tail call to the entry.
+ ExternalReference ref = ExternalReference(
+ IC_Utility(kLoadElementWithInterceptor), masm->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+
+ __ bind(&slow);
+ GenerateMiss(masm);
+}
+
+
+void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
+ // The return address is on the stack.
+ Register receiver = ReceiverRegister();
+ Register key = NameRegister();
+ DCHECK(receiver.is(edx));
+ DCHECK(key.is(ecx));
+
+ Label slow, notin;
+ Factory* factory = masm->isolate()->factory();
+ Operand mapped_location = GenerateMappedArgumentsLookup(
+ masm, receiver, key, ebx, eax, ¬in, &slow);
+ __ mov(eax, mapped_location);
+ __ Ret();
+ __ bind(¬in);
+ // The unmapped lookup expects that the parameter map is in ebx.
+ Operand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, key, ebx, eax, &slow);
+ __ cmp(unmapped_location, factory->the_hole_value());
+ __ j(equal, &slow);
+ __ mov(eax, unmapped_location);
+ __ Ret();
+ __ bind(&slow);
+ GenerateMiss(masm);
+}
+
+
+void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
+ // Return address is on the stack.
+ Label slow, notin;
+ Register receiver = ReceiverRegister();
+ Register name = NameRegister();
+ Register value = ValueRegister();
+ DCHECK(receiver.is(edx));
+ DCHECK(name.is(ecx));
+ DCHECK(value.is(eax));
+
+ Operand mapped_location = GenerateMappedArgumentsLookup(
+ masm, receiver, name, ebx, edi, ¬in, &slow);
+ __ mov(mapped_location, value);
+ __ lea(ecx, mapped_location);
+ __ mov(edx, value);
+ __ RecordWrite(ebx, ecx, edx, kDontSaveFPRegs);
+ __ Ret();
+ __ bind(¬in);
+ // The unmapped lookup expects that the parameter map is in ebx.
+ Operand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, name, ebx, edi, &slow);
+ __ mov(unmapped_location, value);
+ __ lea(edi, unmapped_location);
+ __ mov(edx, value);
+ __ RecordWrite(ebx, edi, edx, kDontSaveFPRegs);
+ __ Ret();
+ __ bind(&slow);
+ GenerateMiss(masm);
+}
+
+
+static void KeyedStoreGenerateGenericHelper(
+ MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
+ KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length) {
+ Label transition_smi_elements;
+ Label finish_object_store, non_double_value, transition_double_elements;
+ Label fast_double_without_map_check;
+ Register receiver = KeyedStoreIC::ReceiverRegister();
+ Register key = KeyedStoreIC::NameRegister();
+ Register value = KeyedStoreIC::ValueRegister();
+ DCHECK(receiver.is(edx));
+ DCHECK(key.is(ecx));
+ DCHECK(value.is(eax));
+ // key is a smi.
+ // ebx: FixedArray receiver->elements
+ // edi: receiver map
+ // Fast case: Do the store, could either Object or double.
+ __ bind(fast_object);
+ if (check_map == kCheckMap) {
+ __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
+ __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
+ __ j(not_equal, fast_double);
+ }
+
+ // HOLECHECK: guards "A[i] = V"
+ // We have to go to the runtime if the current value is the hole because
+ // there may be a callback on the element
+ Label holecheck_passed1;
+ __ cmp(FixedArrayElementOperand(ebx, key),
+ masm->isolate()->factory()->the_hole_value());
+ __ j(not_equal, &holecheck_passed1);
+ __ JumpIfDictionaryInPrototypeChain(receiver, ebx, edi, slow);
+ __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
+
+ __ bind(&holecheck_passed1);
+
+ // Smi stores don't require further checks.
+ Label non_smi_value;
+ __ JumpIfNotSmi(value, &non_smi_value);
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ add(FieldOperand(receiver, JSArray::kLengthOffset),
+ Immediate(Smi::FromInt(1)));
+ }
+ // It's irrelevant whether array is smi-only or not when writing a smi.
+ __ mov(FixedArrayElementOperand(ebx, key), value);
+ __ ret(0);
+
+ __ bind(&non_smi_value);
+ // Escape to elements kind transition case.
+ __ mov(edi, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ CheckFastObjectElements(edi, &transition_smi_elements);
+
+ // Fast elements array, store the value to the elements backing store.
+ __ bind(&finish_object_store);
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ add(FieldOperand(receiver, JSArray::kLengthOffset),
+ Immediate(Smi::FromInt(1)));
+ }
+ __ mov(FixedArrayElementOperand(ebx, key), value);
+ // Update write barrier for the elements array address.
+ __ mov(edx, value); // Preserve the value which is returned.
+ __ RecordWriteArray(ebx, edx, key, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ ret(0);
+
+ __ bind(fast_double);
+ if (check_map == kCheckMap) {
+ // Check for fast double array case. If this fails, call through to the
+ // runtime.
+ __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
+ __ j(not_equal, slow);
+ // If the value is a number, store it as a double in the FastDoubleElements
+ // array.
+ }
+
+ // HOLECHECK: guards "A[i] double hole?"
+ // We have to see if the double version of the hole is present. If so
+ // go to the runtime.
+ uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
+ __ cmp(FieldOperand(ebx, key, times_4, offset), Immediate(kHoleNanUpper32));
+ __ j(not_equal, &fast_double_without_map_check);
+ __ JumpIfDictionaryInPrototypeChain(receiver, ebx, edi, slow);
+ __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
+
+ __ bind(&fast_double_without_map_check);
+ __ StoreNumberToDoubleElements(value, ebx, key, edi, xmm0,
+ &transition_double_elements);
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ add(FieldOperand(receiver, JSArray::kLengthOffset),
+ Immediate(Smi::FromInt(1)));
+ }
+ __ ret(0);
+
+ __ bind(&transition_smi_elements);
+ __ mov(ebx, FieldOperand(receiver, HeapObject::kMapOffset));
+
+ // Transition the array appropriately depending on the value type.
+ __ CheckMap(value, masm->isolate()->factory()->heap_number_map(),
+ &non_double_value, DONT_DO_SMI_CHECK);
+
+ // Value is a double. Transition FAST_SMI_ELEMENTS -> FAST_DOUBLE_ELEMENTS
+ // and complete the store.
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS, ebx, edi, slow);
+ AllocationSiteMode mode =
+ AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
+ ebx, mode, slow);
+ __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
+ __ jmp(&fast_double_without_map_check);
+
+ __ bind(&non_double_value);
+ // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, ebx,
+ edi, slow);
+ mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
+ masm, receiver, key, value, ebx, mode, slow);
+ __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
+ __ jmp(&finish_object_store);
+
+ __ bind(&transition_double_elements);
+ // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
+ // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
+ // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
+ __ mov(ebx, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
+ ebx, edi, slow);
+ mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateDoubleToObject(masm, receiver, key,
+ value, ebx, mode, slow);
+ __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
+ __ jmp(&finish_object_store);
+}
+
+
+void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
+ StrictMode strict_mode) {
+ // Return address is on the stack.
+ Label slow, fast_object, fast_object_grow;
+ Label fast_double, fast_double_grow;
+ Label array, extra, check_if_double_array;
+ Register receiver = ReceiverRegister();
+ Register key = NameRegister();
+ DCHECK(receiver.is(edx));
+ DCHECK(key.is(ecx));
+
+ // Check that the object isn't a smi.
+ __ JumpIfSmi(receiver, &slow);
+ // Get the map from the receiver.
+ __ mov(edi, FieldOperand(receiver, HeapObject::kMapOffset));
+ // Check that the receiver does not require access checks and is not observed.
+ // The generic stub does not perform map checks or handle observed objects.
+ __ test_b(FieldOperand(edi, Map::kBitFieldOffset),
+ 1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved);
+ __ j(not_zero, &slow);
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(key, &slow);
+ __ CmpInstanceType(edi, JS_ARRAY_TYPE);
+ __ j(equal, &array);
+ // Check that the object is some kind of JSObject.
+ __ CmpInstanceType(edi, FIRST_JS_OBJECT_TYPE);
+ __ j(below, &slow);
+
+ // Object case: Check key against length in the elements array.
+ // Key is a smi.
+ // edi: receiver map
+ __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
+ // Check array bounds. Both the key and the length of FixedArray are smis.
+ __ cmp(key, FieldOperand(ebx, FixedArray::kLengthOffset));
+ __ j(below, &fast_object);
+
+ // Slow case: call runtime.
+ __ bind(&slow);
+ GenerateRuntimeSetProperty(masm, strict_mode);
+
+ // Extra capacity case: Check if there is extra capacity to
+ // perform the store and update the length. Used for adding one
+ // element to the array by writing to array[array.length].
+ __ bind(&extra);
+ // receiver is a JSArray.
+ // key is a smi.
+ // ebx: receiver->elements, a FixedArray
+ // edi: receiver map
+ // flags: compare (key, receiver.length())
+ // do not leave holes in the array:
+ __ j(not_equal, &slow);
+ __ cmp(key, FieldOperand(ebx, FixedArray::kLengthOffset));
+ __ j(above_equal, &slow);
+ __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
+ __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
+ __ j(not_equal, &check_if_double_array);
+ __ jmp(&fast_object_grow);
+
+ __ bind(&check_if_double_array);
+ __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
+ __ j(not_equal, &slow);
+ __ jmp(&fast_double_grow);
+
+ // Array case: Get the length and the elements array from the JS
+ // array. Check that the array is in fast mode (and writable); if it
+ // is the length is always a smi.
+ __ bind(&array);
+ // receiver is a JSArray.
+ // key is a smi.
+ // edi: receiver map
+ __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
+
+ // Check the key against the length in the array and fall through to the
+ // common store code.
+ __ cmp(key, FieldOperand(receiver, JSArray::kLengthOffset)); // Compare smis.
+ __ j(above_equal, &extra);
+
+ KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double, &slow,
+ kCheckMap, kDontIncrementLength);
+ KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
+ &slow, kDontCheckMap, kIncrementLength);
+}
+
+
+void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
+ // The return address is on the stack.
+ Register receiver = ReceiverRegister();
+ Register name = NameRegister();
+ DCHECK(receiver.is(edx));
+ DCHECK(name.is(ecx));
+
+ // Probe the stub cache.
+ Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
+ Code::ComputeHandlerFlags(Code::LOAD_IC));
+ masm->isolate()->stub_cache()->GenerateProbe(masm, flags, receiver, name, ebx,
+ eax);
+
+ // Cache miss: Jump to runtime.
+ GenerateMiss(masm);
+}
+
+
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
+ Register dictionary = eax;
+ DCHECK(!dictionary.is(ReceiverRegister()));
+ DCHECK(!dictionary.is(NameRegister()));
+
+ Label slow;
+
+ __ mov(dictionary,
+ FieldOperand(ReceiverRegister(), JSObject::kPropertiesOffset));
+ GenerateDictionaryLoad(masm, &slow, dictionary, NameRegister(), edi, ebx,
+ eax);
+ __ ret(0);
+
+ // Dictionary load failed, go slow (but don't miss).
+ __ bind(&slow);
+ GenerateRuntimeGetProperty(masm);
+}
+
+
+static void LoadIC_PushArgs(MacroAssembler* masm) {
+ Register receiver = LoadIC::ReceiverRegister();
+ Register name = LoadIC::NameRegister();
+ DCHECK(!ebx.is(receiver) && !ebx.is(name));
+
+ __ pop(ebx);
+ __ push(receiver);
+ __ push(name);
+ __ push(ebx);
+}
+
+
+void LoadIC::GenerateMiss(MacroAssembler* masm) {
+ // Return address is on the stack.
+ __ IncrementCounter(masm->isolate()->counters()->load_miss(), 1);
+
+ LoadIC_PushArgs(masm);
+
+ // Perform tail call to the entry.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kLoadIC_Miss), masm->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+ // Return address is on the stack.
+ LoadIC_PushArgs(masm);
+
+ // Perform tail call to the entry.
+ __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
+}
+
+
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
+ // Return address is on the stack.
+ __ IncrementCounter(masm->isolate()->counters()->keyed_load_miss(), 1);
+
+ LoadIC_PushArgs(masm);
+
+ // Perform tail call to the entry.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+// IC register specifications
+const Register LoadIC::ReceiverRegister() { return edx; }
+const Register LoadIC::NameRegister() { return ecx; }
+
+
+const Register LoadIC::SlotRegister() {
+ DCHECK(FLAG_vector_ics);
+ return eax;
+}
+
+
+const Register LoadIC::VectorRegister() {
+ DCHECK(FLAG_vector_ics);
+ return ebx;
+}
+
+
+const Register StoreIC::ReceiverRegister() { return edx; }
+const Register StoreIC::NameRegister() { return ecx; }
+const Register StoreIC::ValueRegister() { return eax; }
+
+
+const Register KeyedStoreIC::MapRegister() { return ebx; }
+
+
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+ // Return address is on the stack.
+ LoadIC_PushArgs(masm);
+
+ // Perform tail call to the entry.
+ __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
+}
+
+
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+ // Return address is on the stack.
+ Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
+ Code::ComputeHandlerFlags(Code::STORE_IC));
+ masm->isolate()->stub_cache()->GenerateProbe(masm, flags, ReceiverRegister(),
+ NameRegister(), ebx, no_reg);
+
+ // Cache miss: Jump to runtime.
+ GenerateMiss(masm);
+}
+
+
+static void StoreIC_PushArgs(MacroAssembler* masm) {
+ Register receiver = StoreIC::ReceiverRegister();
+ Register name = StoreIC::NameRegister();
+ Register value = StoreIC::ValueRegister();
+
+ DCHECK(!ebx.is(receiver) && !ebx.is(name) && !ebx.is(value));
+
+ __ pop(ebx);
+ __ push(receiver);
+ __ push(name);
+ __ push(value);
+ __ push(ebx);
+}
+
+
+void StoreIC::GenerateMiss(MacroAssembler* masm) {
+ // Return address is on the stack.
+ StoreIC_PushArgs(masm);
+
+ // Perform tail call to the entry.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void StoreIC::GenerateNormal(MacroAssembler* masm) {
+ Label restore_miss;
+ Register receiver = ReceiverRegister();
+ Register name = NameRegister();
+ Register value = ValueRegister();
+ Register dictionary = ebx;
+
+ __ mov(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset));
+
+ // A lot of registers are needed for storing to slow case
+ // objects. Push and restore receiver but rely on
+ // GenerateDictionaryStore preserving the value and name.
+ __ push(receiver);
+ GenerateDictionaryStore(masm, &restore_miss, dictionary, name, value,
+ receiver, edi);
+ __ Drop(1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->store_normal_hit(), 1);
+ __ ret(0);
+
+ __ bind(&restore_miss);
+ __ pop(receiver);
+ __ IncrementCounter(counters->store_normal_miss(), 1);
+ GenerateMiss(masm);
+}
+
+
+void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictMode strict_mode) {
+ // Return address is on the stack.
+ DCHECK(!ebx.is(ReceiverRegister()) && !ebx.is(NameRegister()) &&
+ !ebx.is(ValueRegister()));
+ __ pop(ebx);
+ __ push(ReceiverRegister());
+ __ push(NameRegister());
+ __ push(ValueRegister());
+ __ push(Immediate(Smi::FromInt(strict_mode)));
+ __ push(ebx); // return address
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
+}
+
+
+void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictMode strict_mode) {
+ // Return address is on the stack.
+ DCHECK(!ebx.is(ReceiverRegister()) && !ebx.is(NameRegister()) &&
+ !ebx.is(ValueRegister()));
+ __ pop(ebx);
+ __ push(ReceiverRegister());
+ __ push(NameRegister());
+ __ push(ValueRegister());
+ __ push(Immediate(Smi::FromInt(strict_mode)));
+ __ push(ebx); // return address
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
+}
+
+
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
+ // Return address is on the stack.
+ StoreIC_PushArgs(masm);
+
+ // Do tail-call to runtime routine.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void StoreIC::GenerateSlow(MacroAssembler* masm) {
+ // Return address is on the stack.
+ StoreIC_PushArgs(masm);
+
+ // Do tail-call to runtime routine.
+ ExternalReference ref(IC_Utility(kStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
+ // Return address is on the stack.
+ StoreIC_PushArgs(masm);
+
+ // Do tail-call to runtime routine.
+ ExternalReference ref(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+#undef __
+
+
+Condition CompareIC::ComputeCondition(Token::Value op) {
+ switch (op) {
+ case Token::EQ_STRICT:
+ case Token::EQ:
+ return equal;
+ case Token::LT:
+ return less;
+ case Token::GT:
+ return greater;
+ case Token::LTE:
+ return less_equal;
+ case Token::GTE:
+ return greater_equal;
+ default:
+ UNREACHABLE();
+ return no_condition;
+ }
+}
+
+
+bool CompareIC::HasInlinedSmiCode(Address address) {
+ // The address of the instruction following the call.
+ Address test_instruction_address =
+ address + Assembler::kCallTargetAddressOffset;
+
+ // If the instruction following the call is not a test al, nothing
+ // was inlined.
+ return *test_instruction_address == Assembler::kTestAlByte;
+}
+
+
+void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
+ // The address of the instruction following the call.
+ Address test_instruction_address =
+ address + Assembler::kCallTargetAddressOffset;
+
+ // If the instruction following the call is not a test al, nothing
+ // was inlined.
+ if (*test_instruction_address != Assembler::kTestAlByte) {
+ DCHECK(*test_instruction_address == Assembler::kNopByte);
+ return;
+ }
+
+ Address delta_address = test_instruction_address + 1;
+ // The delta to the start of the map check instruction and the
+ // condition code uses at the patched jump.
+ uint8_t delta = *reinterpret_cast<uint8_t*>(delta_address);
+ if (FLAG_trace_ic) {
+ PrintF("[ patching ic at %p, test=%p, delta=%d\n", address,
+ test_instruction_address, delta);
+ }
+
+ // Patch with a short conditional jump. Enabling means switching from a short
+ // jump-if-carry/not-carry to jump-if-zero/not-zero, whereas disabling is the
+ // reverse operation of that.
+ Address jmp_address = test_instruction_address - delta;
+ DCHECK((check == ENABLE_INLINED_SMI_CHECK)
+ ? (*jmp_address == Assembler::kJncShortOpcode ||
+ *jmp_address == Assembler::kJcShortOpcode)
+ : (*jmp_address == Assembler::kJnzShortOpcode ||
+ *jmp_address == Assembler::kJzShortOpcode));
+ Condition cc =
+ (check == ENABLE_INLINED_SMI_CHECK)
+ ? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero)
+ : (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry);
+ *jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc);
+}
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_IA32
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_IA32
+
+#include "src/codegen.h"
+#include "src/ic/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
+ Code::Flags flags, StubCache::Table table, Register name,
+ Register receiver,
+ // Number of the cache entry pointer-size scaled.
+ Register offset, Register extra) {
+ ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
+ ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
+ ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
+
+ Label miss;
+
+ // Multiply by 3 because there are 3 fields per entry (name, code, map).
+ __ lea(offset, Operand(offset, offset, times_2, 0));
+
+ if (extra.is_valid()) {
+ // Get the code entry from the cache.
+ __ mov(extra, Operand::StaticArray(offset, times_1, value_offset));
+
+ // Check that the key in the entry matches the name.
+ __ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
+ __ j(not_equal, &miss);
+
+ // Check the map matches.
+ __ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
+ __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ j(not_equal, &miss);
+
+ // Check that the flags match what we're looking for.
+ __ mov(offset, FieldOperand(extra, Code::kFlagsOffset));
+ __ and_(offset, ~Code::kFlagsNotUsedInLookup);
+ __ cmp(offset, flags);
+ __ j(not_equal, &miss);
+
+#ifdef DEBUG
+ if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+ __ jmp(&miss);
+ } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+ __ jmp(&miss);
+ }
+#endif
+
+ // Jump to the first instruction in the code stub.
+ __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(extra);
+
+ __ bind(&miss);
+ } else {
+ // Save the offset on the stack.
+ __ push(offset);
+
+ // Check that the key in the entry matches the name.
+ __ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
+ __ j(not_equal, &miss);
+
+ // Check the map matches.
+ __ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
+ __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ j(not_equal, &miss);
+
+ // Restore offset register.
+ __ mov(offset, Operand(esp, 0));
+
+ // Get the code entry from the cache.
+ __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
+
+ // Check that the flags match what we're looking for.
+ __ mov(offset, FieldOperand(offset, Code::kFlagsOffset));
+ __ and_(offset, ~Code::kFlagsNotUsedInLookup);
+ __ cmp(offset, flags);
+ __ j(not_equal, &miss);
+
+#ifdef DEBUG
+ if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+ __ jmp(&miss);
+ } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+ __ jmp(&miss);
+ }
+#endif
+
+ // Restore offset and re-load code entry from cache.
+ __ pop(offset);
+ __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
+
+ // Jump to the first instruction in the code stub.
+ __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(offset);
+
+ // Pop at miss.
+ __ bind(&miss);
+ __ pop(offset);
+ }
+}
+
+
+void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
+ Register receiver, Register name,
+ Register scratch, Register extra, Register extra2,
+ Register extra3) {
+ Label miss;
+
+ // Assert that code is valid. The multiplying code relies on the entry size
+ // being 12.
+ DCHECK(sizeof(Entry) == 12);
+
+ // Assert the flags do not name a specific type.
+ DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
+
+ // Assert that there are no register conflicts.
+ DCHECK(!scratch.is(receiver));
+ DCHECK(!scratch.is(name));
+ DCHECK(!extra.is(receiver));
+ DCHECK(!extra.is(name));
+ DCHECK(!extra.is(scratch));
+
+ // Assert scratch and extra registers are valid, and extra2/3 are unused.
+ DCHECK(!scratch.is(no_reg));
+ DCHECK(extra2.is(no_reg));
+ DCHECK(extra3.is(no_reg));
+
+ Register offset = scratch;
+ scratch = no_reg;
+
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &miss);
+
+ // Get the map of the receiver and compute the hash.
+ __ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
+ __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ xor_(offset, flags);
+ // We mask out the last two bits because they are not part of the hash and
+ // they are always 01 for maps. Also in the two 'and' instructions below.
+ __ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
+ // ProbeTable expects the offset to be pointer scaled, which it is, because
+ // the heap object tag size is 2 and the pointer size log 2 is also 2.
+ DCHECK(kCacheIndexShift == kPointerSizeLog2);
+
+ // Probe the primary table.
+ ProbeTable(isolate(), masm, flags, kPrimary, name, receiver, offset, extra);
+
+ // Primary miss: Compute hash for secondary probe.
+ __ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
+ __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ xor_(offset, flags);
+ __ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
+ __ sub(offset, name);
+ __ add(offset, Immediate(flags));
+ __ and_(offset, (kSecondaryTableSize - 1) << kCacheIndexShift);
+
+ // Probe the secondary table.
+ ProbeTable(isolate(), masm, flags, kSecondary, name, receiver, offset, extra);
+
+ // Cache miss: Fall-through and let caller handle the miss by
+ // entering the runtime system.
+ __ bind(&miss);
+ __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
+}
+
+
+#undef __
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_IA32
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/ic/ic-inl.h"
+#include "src/ic/ic-compiler.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+Handle<Code> PropertyICCompiler::Find(Handle<Name> name,
+ Handle<Map> stub_holder, Code::Kind kind,
+ ExtraICState extra_state,
+ CacheHolderFlag cache_holder) {
+ Code::Flags flags =
+ Code::ComputeMonomorphicFlags(kind, extra_state, cache_holder);
+ Object* probe = stub_holder->FindInCodeCache(*name, flags);
+ if (probe->IsCode()) return handle(Code::cast(probe));
+ return Handle<Code>::null();
+}
+
+
+Handle<Code> PropertyHandlerCompiler::Find(Handle<Name> name,
+ Handle<Map> stub_holder,
+ Code::Kind kind,
+ CacheHolderFlag cache_holder,
+ Code::StubType type) {
+ Code::Flags flags = Code::ComputeHandlerFlags(kind, type, cache_holder);
+ Object* probe = stub_holder->FindInCodeCache(*name, flags);
+ if (probe->IsCode()) return handle(Code::cast(probe));
+ return Handle<Code>::null();
+}
+
+
+Handle<Code> PropertyICCompiler::ComputeMonomorphic(
+ Code::Kind kind, Handle<Name> name, Handle<HeapType> type,
+ Handle<Code> handler, ExtraICState extra_ic_state) {
+ Isolate* isolate = name->GetIsolate();
+ if (handler.is_identical_to(isolate->builtins()->LoadIC_Normal()) ||
+ handler.is_identical_to(isolate->builtins()->StoreIC_Normal())) {
+ name = isolate->factory()->normal_ic_symbol();
+ }
+
+ CacheHolderFlag flag;
+ Handle<Map> stub_holder = IC::GetICCacheHolder(*type, isolate, &flag);
+
+ Handle<Code> ic;
+ // There are multiple string maps that all use the same prototype. That
+ // prototype cannot hold multiple handlers, one for each of the string maps,
+ // for a single name. Hence, turn off caching of the IC.
+ bool can_be_cached = !type->Is(HeapType::String());
+ if (can_be_cached) {
+ ic = Find(name, stub_holder, kind, extra_ic_state, flag);
+ if (!ic.is_null()) return ic;
+ }
+
+#ifdef DEBUG
+ if (kind == Code::KEYED_STORE_IC) {
+ DCHECK(STANDARD_STORE ==
+ KeyedStoreIC::GetKeyedAccessStoreMode(extra_ic_state));
+ }
+#endif
+
+ PropertyICCompiler ic_compiler(isolate, kind, extra_ic_state, flag);
+ ic = ic_compiler.CompileMonomorphic(type, handler, name, PROPERTY);
+
+ if (can_be_cached) Map::UpdateCodeCache(stub_holder, name, ic);
+ return ic;
+}
+
+
+Handle<Code> NamedLoadHandlerCompiler::ComputeLoadNonexistent(
+ Handle<Name> name, Handle<HeapType> type) {
+ Isolate* isolate = name->GetIsolate();
+ Handle<Map> receiver_map = IC::TypeToMap(*type, isolate);
+ if (receiver_map->prototype()->IsNull()) {
+ // TODO(jkummerow/verwaest): If there is no prototype and the property
+ // is nonexistent, introduce a builtin to handle this (fast properties
+ // -> return undefined, dictionary properties -> do negative lookup).
+ return Handle<Code>();
+ }
+ CacheHolderFlag flag;
+ Handle<Map> stub_holder_map =
+ IC::GetHandlerCacheHolder(*type, false, isolate, &flag);
+
+ // If no dictionary mode objects are present in the prototype chain, the load
+ // nonexistent IC stub can be shared for all names for a given map and we use
+ // the empty string for the map cache in that case. If there are dictionary
+ // mode objects involved, we need to do negative lookups in the stub and
+ // therefore the stub will be specific to the name.
+ Handle<Name> cache_name =
+ receiver_map->is_dictionary_map()
+ ? name
+ : Handle<Name>::cast(isolate->factory()->nonexistent_symbol());
+ Handle<Map> current_map = stub_holder_map;
+ Handle<JSObject> last(JSObject::cast(receiver_map->prototype()));
+ while (true) {
+ if (current_map->is_dictionary_map()) cache_name = name;
+ if (current_map->prototype()->IsNull()) break;
+ last = handle(JSObject::cast(current_map->prototype()));
+ current_map = handle(last->map());
+ }
+ // Compile the stub that is either shared for all names or
+ // name specific if there are global objects involved.
+ Handle<Code> handler = PropertyHandlerCompiler::Find(
+ cache_name, stub_holder_map, Code::LOAD_IC, flag, Code::FAST);
+ if (!handler.is_null()) return handler;
+
+ NamedLoadHandlerCompiler compiler(isolate, type, last, flag);
+ handler = compiler.CompileLoadNonexistent(cache_name);
+ Map::UpdateCodeCache(stub_holder_map, cache_name, handler);
+ return handler;
+}
+
+
+Handle<Code> PropertyICCompiler::ComputeKeyedLoadMonomorphic(
+ Handle<Map> receiver_map) {
+ Isolate* isolate = receiver_map->GetIsolate();
+ Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC);
+ Handle<Name> name = isolate->factory()->KeyedLoadMonomorphic_string();
+
+ Handle<Object> probe(receiver_map->FindInCodeCache(*name, flags), isolate);
+ if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+ ElementsKind elements_kind = receiver_map->elements_kind();
+ Handle<Code> stub;
+ if (receiver_map->has_fast_elements() ||
+ receiver_map->has_external_array_elements() ||
+ receiver_map->has_fixed_typed_array_elements()) {
+ stub = LoadFastElementStub(isolate,
+ receiver_map->instance_type() == JS_ARRAY_TYPE,
+ elements_kind).GetCode();
+ } else {
+ stub = FLAG_compiled_keyed_dictionary_loads
+ ? LoadDictionaryElementStub(isolate).GetCode()
+ : LoadDictionaryElementPlatformStub(isolate).GetCode();
+ }
+ PropertyICCompiler compiler(isolate, Code::KEYED_LOAD_IC);
+ Handle<Code> code =
+ compiler.CompileMonomorphic(HeapType::Class(receiver_map, isolate), stub,
+ isolate->factory()->empty_string(), ELEMENT);
+
+ Map::UpdateCodeCache(receiver_map, name, code);
+ return code;
+}
+
+
+Handle<Code> PropertyICCompiler::ComputeKeyedStoreMonomorphic(
+ Handle<Map> receiver_map, StrictMode strict_mode,
+ KeyedAccessStoreMode store_mode) {
+ Isolate* isolate = receiver_map->GetIsolate();
+ ExtraICState extra_state =
+ KeyedStoreIC::ComputeExtraICState(strict_mode, store_mode);
+ Code::Flags flags =
+ Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC, extra_state);
+
+ DCHECK(store_mode == STANDARD_STORE ||
+ store_mode == STORE_AND_GROW_NO_TRANSITION ||
+ store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
+ store_mode == STORE_NO_TRANSITION_HANDLE_COW);
+
+ Handle<String> name = isolate->factory()->KeyedStoreMonomorphic_string();
+ Handle<Object> probe(receiver_map->FindInCodeCache(*name, flags), isolate);
+ if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+ PropertyICCompiler compiler(isolate, Code::KEYED_STORE_IC, extra_state);
+ Handle<Code> code =
+ compiler.CompileKeyedStoreMonomorphic(receiver_map, store_mode);
+
+ Map::UpdateCodeCache(receiver_map, name, code);
+ DCHECK(KeyedStoreIC::GetKeyedAccessStoreMode(code->extra_ic_state()) ==
+ store_mode);
+ return code;
+}
+
+
+#define CALL_LOGGER_TAG(kind, type) (Logger::KEYED_##type)
+
+
+Code* PropertyICCompiler::FindPreMonomorphic(Isolate* isolate, Code::Kind kind,
+ ExtraICState state) {
+ Code::Flags flags = Code::ComputeFlags(kind, PREMONOMORPHIC, state);
+ UnseededNumberDictionary* dictionary =
+ isolate->heap()->non_monomorphic_cache();
+ int entry = dictionary->FindEntry(isolate, flags);
+ DCHECK(entry != -1);
+ Object* code = dictionary->ValueAt(entry);
+ // This might be called during the marking phase of the collector
+ // hence the unchecked cast.
+ return reinterpret_cast<Code*>(code);
+}
+
+
+static void FillCache(Isolate* isolate, Handle<Code> code) {
+ Handle<UnseededNumberDictionary> dictionary = UnseededNumberDictionary::Set(
+ isolate->factory()->non_monomorphic_cache(), code->flags(), code);
+ isolate->heap()->public_set_non_monomorphic_cache(*dictionary);
+}
+
+
+Handle<Code> PropertyICCompiler::ComputeLoad(Isolate* isolate,
+ InlineCacheState ic_state,
+ ExtraICState extra_state) {
+ Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC, ic_state, extra_state);
+ Handle<UnseededNumberDictionary> cache =
+ isolate->factory()->non_monomorphic_cache();
+ int entry = cache->FindEntry(isolate, flags);
+ if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
+
+ PropertyICCompiler compiler(isolate, Code::LOAD_IC);
+ Handle<Code> code;
+ if (ic_state == UNINITIALIZED) {
+ code = compiler.CompileLoadInitialize(flags);
+ } else if (ic_state == PREMONOMORPHIC) {
+ code = compiler.CompileLoadPreMonomorphic(flags);
+ } else if (ic_state == MEGAMORPHIC) {
+ code = compiler.CompileLoadMegamorphic(flags);
+ } else {
+ UNREACHABLE();
+ }
+ FillCache(isolate, code);
+ return code;
+}
+
+
+Handle<Code> PropertyICCompiler::ComputeStore(Isolate* isolate,
+ InlineCacheState ic_state,
+ ExtraICState extra_state) {
+ Code::Flags flags = Code::ComputeFlags(Code::STORE_IC, ic_state, extra_state);
+ Handle<UnseededNumberDictionary> cache =
+ isolate->factory()->non_monomorphic_cache();
+ int entry = cache->FindEntry(isolate, flags);
+ if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
+
+ PropertyICCompiler compiler(isolate, Code::STORE_IC);
+ Handle<Code> code;
+ if (ic_state == UNINITIALIZED) {
+ code = compiler.CompileStoreInitialize(flags);
+ } else if (ic_state == PREMONOMORPHIC) {
+ code = compiler.CompileStorePreMonomorphic(flags);
+ } else if (ic_state == GENERIC) {
+ code = compiler.CompileStoreGeneric(flags);
+ } else if (ic_state == MEGAMORPHIC) {
+ code = compiler.CompileStoreMegamorphic(flags);
+ } else {
+ UNREACHABLE();
+ }
+
+ FillCache(isolate, code);
+ return code;
+}
+
+
+Handle<Code> PropertyICCompiler::ComputeCompareNil(Handle<Map> receiver_map,
+ CompareNilICStub* stub) {
+ Isolate* isolate = receiver_map->GetIsolate();
+ Handle<String> name(isolate->heap()->empty_string());
+ if (!receiver_map->is_dictionary_map()) {
+ Handle<Code> cached_ic =
+ Find(name, receiver_map, Code::COMPARE_NIL_IC, stub->GetExtraICState());
+ if (!cached_ic.is_null()) return cached_ic;
+ }
+
+ Code::FindAndReplacePattern pattern;
+ pattern.Add(isolate->factory()->meta_map(), receiver_map);
+ Handle<Code> ic = stub->GetCodeCopy(pattern);
+
+ if (!receiver_map->is_dictionary_map()) {
+ Map::UpdateCodeCache(receiver_map, name, ic);
+ }
+
+ return ic;
+}
+
+
+// TODO(verwaest): Change this method so it takes in a TypeHandleList.
+Handle<Code> PropertyICCompiler::ComputeKeyedLoadPolymorphic(
+ MapHandleList* receiver_maps) {
+ Isolate* isolate = receiver_maps->at(0)->GetIsolate();
+ Code::Flags flags = Code::ComputeFlags(Code::KEYED_LOAD_IC, POLYMORPHIC);
+ Handle<PolymorphicCodeCache> cache =
+ isolate->factory()->polymorphic_code_cache();
+ Handle<Object> probe = cache->Lookup(receiver_maps, flags);
+ if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+ TypeHandleList types(receiver_maps->length());
+ for (int i = 0; i < receiver_maps->length(); i++) {
+ types.Add(HeapType::Class(receiver_maps->at(i), isolate));
+ }
+ CodeHandleList handlers(receiver_maps->length());
+ ElementHandlerCompiler compiler(isolate);
+ compiler.CompileElementHandlers(receiver_maps, &handlers);
+ PropertyICCompiler ic_compiler(isolate, Code::KEYED_LOAD_IC);
+ Handle<Code> code = ic_compiler.CompilePolymorphic(
+ &types, &handlers, isolate->factory()->empty_string(), Code::NORMAL,
+ ELEMENT);
+
+ isolate->counters()->keyed_load_polymorphic_stubs()->Increment();
+
+ PolymorphicCodeCache::Update(cache, receiver_maps, flags, code);
+ return code;
+}
+
+
+Handle<Code> PropertyICCompiler::ComputePolymorphic(
+ Code::Kind kind, TypeHandleList* types, CodeHandleList* handlers,
+ int valid_types, Handle<Name> name, ExtraICState extra_ic_state) {
+ Handle<Code> handler = handlers->at(0);
+ Code::StubType type = valid_types == 1 ? handler->type() : Code::NORMAL;
+ DCHECK(kind == Code::LOAD_IC || kind == Code::STORE_IC);
+ PropertyICCompiler ic_compiler(name->GetIsolate(), kind, extra_ic_state);
+ return ic_compiler.CompilePolymorphic(types, handlers, name, type, PROPERTY);
+}
+
+
+Handle<Code> PropertyICCompiler::ComputeKeyedStorePolymorphic(
+ MapHandleList* receiver_maps, KeyedAccessStoreMode store_mode,
+ StrictMode strict_mode) {
+ Isolate* isolate = receiver_maps->at(0)->GetIsolate();
+ DCHECK(store_mode == STANDARD_STORE ||
+ store_mode == STORE_AND_GROW_NO_TRANSITION ||
+ store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
+ store_mode == STORE_NO_TRANSITION_HANDLE_COW);
+ Handle<PolymorphicCodeCache> cache =
+ isolate->factory()->polymorphic_code_cache();
+ ExtraICState extra_state =
+ KeyedStoreIC::ComputeExtraICState(strict_mode, store_mode);
+ Code::Flags flags =
+ Code::ComputeFlags(Code::KEYED_STORE_IC, POLYMORPHIC, extra_state);
+ Handle<Object> probe = cache->Lookup(receiver_maps, flags);
+ if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+ PropertyICCompiler compiler(isolate, Code::KEYED_STORE_IC, extra_state);
+ Handle<Code> code =
+ compiler.CompileKeyedStorePolymorphic(receiver_maps, store_mode);
+ PolymorphicCodeCache::Update(cache, receiver_maps, flags, code);
+ return code;
+}
+
+
+Handle<Code> PropertyICCompiler::CompileLoadInitialize(Code::Flags flags) {
+ LoadIC::GenerateInitialize(masm());
+ Handle<Code> code = GetCodeWithFlags(flags, "CompileLoadInitialize");
+ PROFILE(isolate(), CodeCreateEvent(Logger::LOAD_INITIALIZE_TAG, *code, 0));
+ return code;
+}
+
+
+Handle<Code> PropertyICCompiler::CompileLoadPreMonomorphic(Code::Flags flags) {
+ LoadIC::GeneratePreMonomorphic(masm());
+ Handle<Code> code = GetCodeWithFlags(flags, "CompileLoadPreMonomorphic");
+ PROFILE(isolate(),
+ CodeCreateEvent(Logger::LOAD_PREMONOMORPHIC_TAG, *code, 0));
+ return code;
+}
+
+
+Handle<Code> PropertyICCompiler::CompileLoadMegamorphic(Code::Flags flags) {
+ LoadIC::GenerateMegamorphic(masm());
+ Handle<Code> code = GetCodeWithFlags(flags, "CompileLoadMegamorphic");
+ PROFILE(isolate(), CodeCreateEvent(Logger::LOAD_MEGAMORPHIC_TAG, *code, 0));
+ return code;
+}
+
+
+Handle<Code> PropertyICCompiler::CompileStoreInitialize(Code::Flags flags) {
+ StoreIC::GenerateInitialize(masm());
+ Handle<Code> code = GetCodeWithFlags(flags, "CompileStoreInitialize");
+ PROFILE(isolate(), CodeCreateEvent(Logger::STORE_INITIALIZE_TAG, *code, 0));
+ return code;
+}
+
+
+Handle<Code> PropertyICCompiler::CompileStorePreMonomorphic(Code::Flags flags) {
+ StoreIC::GeneratePreMonomorphic(masm());
+ Handle<Code> code = GetCodeWithFlags(flags, "CompileStorePreMonomorphic");
+ PROFILE(isolate(),
+ CodeCreateEvent(Logger::STORE_PREMONOMORPHIC_TAG, *code, 0));
+ return code;
+}
+
+
+Handle<Code> PropertyICCompiler::CompileStoreGeneric(Code::Flags flags) {
+ ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
+ StrictMode strict_mode = StoreIC::GetStrictMode(extra_state);
+ StoreIC::GenerateRuntimeSetProperty(masm(), strict_mode);
+ Handle<Code> code = GetCodeWithFlags(flags, "CompileStoreGeneric");
+ PROFILE(isolate(), CodeCreateEvent(Logger::STORE_GENERIC_TAG, *code, 0));
+ return code;
+}
+
+
+Handle<Code> PropertyICCompiler::CompileStoreMegamorphic(Code::Flags flags) {
+ StoreIC::GenerateMegamorphic(masm());
+ Handle<Code> code = GetCodeWithFlags(flags, "CompileStoreMegamorphic");
+ PROFILE(isolate(), CodeCreateEvent(Logger::STORE_MEGAMORPHIC_TAG, *code, 0));
+ return code;
+}
+
+
+#undef CALL_LOGGER_TAG
+
+
+Handle<Code> PropertyAccessCompiler::GetCodeWithFlags(Code::Flags flags,
+ const char* name) {
+ // Create code object in the heap.
+ CodeDesc desc;
+ masm()->GetCode(&desc);
+ Handle<Code> code = factory()->NewCode(desc, flags, masm()->CodeObject());
+ if (code->IsCodeStubOrIC()) code->set_stub_key(CodeStub::NoCacheKey());
+#ifdef ENABLE_DISASSEMBLER
+ if (FLAG_print_code_stubs) {
+ OFStream os(stdout);
+ code->Disassemble(name, os);
+ }
+#endif
+ return code;
+}
+
+
+Handle<Code> PropertyAccessCompiler::GetCodeWithFlags(Code::Flags flags,
+ Handle<Name> name) {
+ return (FLAG_print_code_stubs && !name.is_null() && name->IsString())
+ ? GetCodeWithFlags(flags,
+ Handle<String>::cast(name)->ToCString().get())
+ : GetCodeWithFlags(flags, NULL);
+}
+
+
+#define __ ACCESS_MASM(masm())
+
+
+Register NamedLoadHandlerCompiler::FrontendHeader(Register object_reg,
+ Handle<Name> name,
+ Label* miss) {
+ PrototypeCheckType check_type = CHECK_ALL_MAPS;
+ int function_index = -1;
+ if (type()->Is(HeapType::String())) {
+ function_index = Context::STRING_FUNCTION_INDEX;
+ } else if (type()->Is(HeapType::Symbol())) {
+ function_index = Context::SYMBOL_FUNCTION_INDEX;
+ } else if (type()->Is(HeapType::Number())) {
+ function_index = Context::NUMBER_FUNCTION_INDEX;
+ } else if (type()->Is(HeapType::Boolean())) {
+ function_index = Context::BOOLEAN_FUNCTION_INDEX;
+ } else {
+ check_type = SKIP_RECEIVER;
+ }
+
+ if (check_type == CHECK_ALL_MAPS) {
+ GenerateDirectLoadGlobalFunctionPrototype(masm(), function_index,
+ scratch1(), miss);
+ Object* function = isolate()->native_context()->get(function_index);
+ Object* prototype = JSFunction::cast(function)->instance_prototype();
+ set_type_for_object(handle(prototype, isolate()));
+ object_reg = scratch1();
+ }
+
+ // Check that the maps starting from the prototype haven't changed.
+ return CheckPrototypes(object_reg, scratch1(), scratch2(), scratch3(), name,
+ miss, check_type);
+}
+
+
+// Frontend for store uses the name register. It has to be restored before a
+// miss.
+Register NamedStoreHandlerCompiler::FrontendHeader(Register object_reg,
+ Handle<Name> name,
+ Label* miss) {
+ return CheckPrototypes(object_reg, this->name(), scratch1(), scratch2(), name,
+ miss, SKIP_RECEIVER);
+}
+
+
+bool PropertyICCompiler::IncludesNumberType(TypeHandleList* types) {
+ for (int i = 0; i < types->length(); ++i) {
+ if (types->at(i)->Is(HeapType::Number())) return true;
+ }
+ return false;
+}
+
+
+Register PropertyHandlerCompiler::Frontend(Register object_reg,
+ Handle<Name> name) {
+ Label miss;
+ Register reg = FrontendHeader(object_reg, name, &miss);
+ FrontendFooter(name, &miss);
+ return reg;
+}
+
+
+void PropertyHandlerCompiler::NonexistentFrontendHeader(Handle<Name> name,
+ Label* miss,
+ Register scratch1,
+ Register scratch2) {
+ Register holder_reg;
+ Handle<Map> last_map;
+ if (holder().is_null()) {
+ holder_reg = receiver();
+ last_map = IC::TypeToMap(*type(), isolate());
+ // If |type| has null as its prototype, |holder()| is
+ // Handle<JSObject>::null().
+ DCHECK(last_map->prototype() == isolate()->heap()->null_value());
+ } else {
+ holder_reg = FrontendHeader(receiver(), name, miss);
+ last_map = handle(holder()->map());
+ }
+
+ if (last_map->is_dictionary_map()) {
+ if (last_map->IsJSGlobalObjectMap()) {
+ Handle<JSGlobalObject> global =
+ holder().is_null()
+ ? Handle<JSGlobalObject>::cast(type()->AsConstant()->Value())
+ : Handle<JSGlobalObject>::cast(holder());
+ GenerateCheckPropertyCell(masm(), global, name, scratch1, miss);
+ } else {
+ if (!name->IsUniqueName()) {
+ DCHECK(name->IsString());
+ name = factory()->InternalizeString(Handle<String>::cast(name));
+ }
+ DCHECK(holder().is_null() ||
+ holder()->property_dictionary()->FindEntry(name) ==
+ NameDictionary::kNotFound);
+ GenerateDictionaryNegativeLookup(masm(), miss, holder_reg, name, scratch1,
+ scratch2);
+ }
+ }
+}
+
+
+Handle<Code> NamedLoadHandlerCompiler::CompileLoadField(Handle<Name> name,
+ FieldIndex field) {
+ Register reg = Frontend(receiver(), name);
+ __ Move(receiver(), reg);
+ LoadFieldStub stub(isolate(), field);
+ GenerateTailCall(masm(), stub.GetCode());
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+Handle<Code> NamedLoadHandlerCompiler::CompileLoadConstant(Handle<Name> name,
+ int constant_index) {
+ Register reg = Frontend(receiver(), name);
+ __ Move(receiver(), reg);
+ LoadConstantStub stub(isolate(), constant_index);
+ GenerateTailCall(masm(), stub.GetCode());
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+Handle<Code> NamedLoadHandlerCompiler::CompileLoadNonexistent(
+ Handle<Name> name) {
+ Label miss;
+ NonexistentFrontendHeader(name, &miss, scratch2(), scratch3());
+ GenerateLoadConstant(isolate()->factory()->undefined_value());
+ FrontendFooter(name, &miss);
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+Handle<Code> NamedLoadHandlerCompiler::CompileLoadCallback(
+ Handle<Name> name, Handle<ExecutableAccessorInfo> callback) {
+ Register reg = Frontend(receiver(), name);
+ GenerateLoadCallback(reg, callback);
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+Handle<Code> NamedLoadHandlerCompiler::CompileLoadCallback(
+ Handle<Name> name, const CallOptimization& call_optimization) {
+ DCHECK(call_optimization.is_simple_api_call());
+ Frontend(receiver(), name);
+ Handle<Map> receiver_map = IC::TypeToMap(*type(), isolate());
+ GenerateFastApiCall(masm(), call_optimization, receiver_map, receiver(),
+ scratch1(), false, 0, NULL);
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+Handle<Code> NamedLoadHandlerCompiler::CompileLoadInterceptor(
+ LookupIterator* it) {
+ // So far the most popular follow ups for interceptor loads are FIELD and
+ // ExecutableAccessorInfo, so inline only them. Other cases may be added
+ // later.
+ bool inline_followup = it->state() == LookupIterator::PROPERTY;
+ if (inline_followup) {
+ switch (it->property_kind()) {
+ case LookupIterator::DATA:
+ inline_followup = it->property_details().type() == FIELD;
+ break;
+ case LookupIterator::ACCESSOR: {
+ Handle<Object> accessors = it->GetAccessors();
+ inline_followup = accessors->IsExecutableAccessorInfo();
+ if (!inline_followup) break;
+ Handle<ExecutableAccessorInfo> info =
+ Handle<ExecutableAccessorInfo>::cast(accessors);
+ inline_followup = info->getter() != NULL &&
+ ExecutableAccessorInfo::IsCompatibleReceiverType(
+ isolate(), info, type());
+ }
+ }
+ }
+
+ Register reg = Frontend(receiver(), it->name());
+ if (inline_followup) {
+ // TODO(368): Compile in the whole chain: all the interceptors in
+ // prototypes and ultimate answer.
+ GenerateLoadInterceptorWithFollowup(it, reg);
+ } else {
+ GenerateLoadInterceptor(reg);
+ }
+ return GetCode(kind(), Code::FAST, it->name());
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadPostInterceptor(
+ LookupIterator* it, Register interceptor_reg) {
+ Handle<JSObject> real_named_property_holder(it->GetHolder<JSObject>());
+
+ set_type_for_object(holder());
+ set_holder(real_named_property_holder);
+ Register reg = Frontend(interceptor_reg, it->name());
+
+ switch (it->property_kind()) {
+ case LookupIterator::DATA: {
+ DCHECK_EQ(FIELD, it->property_details().type());
+ __ Move(receiver(), reg);
+ LoadFieldStub stub(isolate(), it->GetFieldIndex());
+ GenerateTailCall(masm(), stub.GetCode());
+ break;
+ }
+ case LookupIterator::ACCESSOR:
+ Handle<ExecutableAccessorInfo> info =
+ Handle<ExecutableAccessorInfo>::cast(it->GetAccessors());
+ DCHECK_NE(NULL, info->getter());
+ GenerateLoadCallback(reg, info);
+ }
+}
+
+
+Handle<Code> PropertyICCompiler::CompileMonomorphic(Handle<HeapType> type,
+ Handle<Code> handler,
+ Handle<Name> name,
+ IcCheckType check) {
+ TypeHandleList types(1);
+ CodeHandleList handlers(1);
+ types.Add(type);
+ handlers.Add(handler);
+ Code::StubType stub_type = handler->type();
+ return CompilePolymorphic(&types, &handlers, name, stub_type, check);
+}
+
+
+Handle<Code> NamedLoadHandlerCompiler::CompileLoadViaGetter(
+ Handle<Name> name, Handle<JSFunction> getter) {
+ Frontend(receiver(), name);
+ GenerateLoadViaGetter(masm(), type(), receiver(), getter);
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+// TODO(verwaest): Cleanup. holder() is actually the receiver.
+Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
+ Handle<Map> transition, Handle<Name> name) {
+ Label miss, slow;
+
+ // Ensure no transitions to deprecated maps are followed.
+ __ CheckMapDeprecated(transition, scratch1(), &miss);
+
+ // Check that we are allowed to write this.
+ bool is_nonexistent = holder()->map() == transition->GetBackPointer();
+ if (is_nonexistent) {
+ // Find the top object.
+ Handle<JSObject> last;
+ PrototypeIterator iter(isolate(), holder());
+ while (!iter.IsAtEnd()) {
+ last = Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
+ iter.Advance();
+ }
+ if (!last.is_null()) set_holder(last);
+ NonexistentFrontendHeader(name, &miss, scratch1(), scratch2());
+ } else {
+ FrontendHeader(receiver(), name, &miss);
+ DCHECK(holder()->HasFastProperties());
+ }
+
+ GenerateStoreTransition(transition, name, receiver(), this->name(), value(),
+ scratch1(), scratch2(), scratch3(), &miss, &slow);
+
+ GenerateRestoreName(&miss, name);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ GenerateRestoreName(&slow, name);
+ TailCallBuiltin(masm(), SlowBuiltin(kind()));
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+Handle<Code> NamedStoreHandlerCompiler::CompileStoreField(LookupIterator* it) {
+ Label miss;
+ GenerateStoreField(it, value(), &miss);
+ __ bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+ return GetCode(kind(), Code::FAST, it->name());
+}
+
+
+Handle<Code> NamedStoreHandlerCompiler::CompileStoreViaSetter(
+ Handle<JSObject> object, Handle<Name> name, Handle<JSFunction> setter) {
+ Frontend(receiver(), name);
+ GenerateStoreViaSetter(masm(), type(), receiver(), setter);
+
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
+ Handle<JSObject> object, Handle<Name> name,
+ const CallOptimization& call_optimization) {
+ Frontend(receiver(), name);
+ Register values[] = {value()};
+ GenerateFastApiCall(masm(), call_optimization, handle(object->map()),
+ receiver(), scratch1(), true, 1, values);
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+Handle<Code> PropertyICCompiler::CompileKeyedStoreMonomorphic(
+ Handle<Map> receiver_map, KeyedAccessStoreMode store_mode) {
+ ElementsKind elements_kind = receiver_map->elements_kind();
+ bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE;
+ Handle<Code> stub;
+ if (receiver_map->has_fast_elements() ||
+ receiver_map->has_external_array_elements() ||
+ receiver_map->has_fixed_typed_array_elements()) {
+ stub = StoreFastElementStub(isolate(), is_jsarray, elements_kind,
+ store_mode).GetCode();
+ } else {
+ stub = StoreElementStub(isolate(), is_jsarray, elements_kind, store_mode)
+ .GetCode();
+ }
+
+ __ DispatchMap(receiver(), scratch1(), receiver_map, stub, DO_SMI_CHECK);
+
+ TailCallBuiltin(masm(), Builtins::kKeyedStoreIC_Miss);
+
+ return GetCode(kind(), Code::NORMAL, factory()->empty_string());
+}
+
+
+#undef __
+
+
+void PropertyAccessCompiler::TailCallBuiltin(MacroAssembler* masm,
+ Builtins::Name name) {
+ Handle<Code> code(masm->isolate()->builtins()->builtin(name));
+ GenerateTailCall(masm, code);
+}
+
+
+Register* PropertyAccessCompiler::GetCallingConvention(Code::Kind kind) {
+ if (kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC) {
+ return load_calling_convention();
+ }
+ DCHECK(kind == Code::STORE_IC || kind == Code::KEYED_STORE_IC);
+ return store_calling_convention();
+}
+
+
+Handle<Code> PropertyICCompiler::GetCode(Code::Kind kind, Code::StubType type,
+ Handle<Name> name,
+ InlineCacheState state) {
+ Code::Flags flags =
+ Code::ComputeFlags(kind, state, extra_ic_state_, type, cache_holder());
+ Handle<Code> code = GetCodeWithFlags(flags, name);
+ IC::RegisterWeakMapDependency(code);
+ PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, *name));
+ return code;
+}
+
+
+Handle<Code> PropertyHandlerCompiler::GetCode(Code::Kind kind,
+ Code::StubType type,
+ Handle<Name> name) {
+ Code::Flags flags = Code::ComputeHandlerFlags(kind, type, cache_holder());
+ Handle<Code> code = GetCodeWithFlags(flags, name);
+ PROFILE(isolate(), CodeCreateEvent(Logger::STUB_TAG, *code, *name));
+ return code;
+}
+
+
+void ElementHandlerCompiler::CompileElementHandlers(
+ MapHandleList* receiver_maps, CodeHandleList* handlers) {
+ for (int i = 0; i < receiver_maps->length(); ++i) {
+ Handle<Map> receiver_map = receiver_maps->at(i);
+ Handle<Code> cached_stub;
+
+ if ((receiver_map->instance_type() & kNotStringTag) == 0) {
+ cached_stub = isolate()->builtins()->KeyedLoadIC_String();
+ } else if (receiver_map->instance_type() < FIRST_JS_RECEIVER_TYPE) {
+ cached_stub = isolate()->builtins()->KeyedLoadIC_Slow();
+ } else {
+ bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
+ ElementsKind elements_kind = receiver_map->elements_kind();
+
+ if (IsFastElementsKind(elements_kind) ||
+ IsExternalArrayElementsKind(elements_kind) ||
+ IsFixedTypedArrayElementsKind(elements_kind)) {
+ cached_stub = LoadFastElementStub(isolate(), is_js_array, elements_kind)
+ .GetCode();
+ } else if (elements_kind == SLOPPY_ARGUMENTS_ELEMENTS) {
+ cached_stub = isolate()->builtins()->KeyedLoadIC_SloppyArguments();
+ } else {
+ DCHECK(elements_kind == DICTIONARY_ELEMENTS);
+ cached_stub = LoadDictionaryElementStub(isolate()).GetCode();
+ }
+ }
+
+ handlers->Add(cached_stub);
+ }
+}
+
+
+Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
+ MapHandleList* receiver_maps, KeyedAccessStoreMode store_mode) {
+ // Collect MONOMORPHIC stubs for all |receiver_maps|.
+ CodeHandleList handlers(receiver_maps->length());
+ MapHandleList transitioned_maps(receiver_maps->length());
+ for (int i = 0; i < receiver_maps->length(); ++i) {
+ Handle<Map> receiver_map(receiver_maps->at(i));
+ Handle<Code> cached_stub;
+ Handle<Map> transitioned_map =
+ receiver_map->FindTransitionedMap(receiver_maps);
+
+ // TODO(mvstanton): The code below is doing pessimistic elements
+ // transitions. I would like to stop doing that and rely on Allocation Site
+ // Tracking to do a better job of ensuring the data types are what they need
+ // to be. Not all the elements are in place yet, pessimistic elements
+ // transitions are still important for performance.
+ bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
+ ElementsKind elements_kind = receiver_map->elements_kind();
+ if (!transitioned_map.is_null()) {
+ cached_stub =
+ ElementsTransitionAndStoreStub(isolate(), elements_kind,
+ transitioned_map->elements_kind(),
+ is_js_array, store_mode).GetCode();
+ } else if (receiver_map->instance_type() < FIRST_JS_RECEIVER_TYPE) {
+ cached_stub = isolate()->builtins()->KeyedStoreIC_Slow();
+ } else {
+ if (receiver_map->has_fast_elements() ||
+ receiver_map->has_external_array_elements() ||
+ receiver_map->has_fixed_typed_array_elements()) {
+ cached_stub = StoreFastElementStub(isolate(), is_js_array,
+ elements_kind, store_mode).GetCode();
+ } else {
+ cached_stub = StoreElementStub(isolate(), is_js_array, elements_kind,
+ store_mode).GetCode();
+ }
+ }
+ DCHECK(!cached_stub.is_null());
+ handlers.Add(cached_stub);
+ transitioned_maps.Add(transitioned_map);
+ }
+
+ Handle<Code> code = CompileKeyedStorePolymorphic(receiver_maps, &handlers,
+ &transitioned_maps);
+ isolate()->counters()->keyed_store_polymorphic_stubs()->Increment();
+ PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, 0));
+ return code;
+}
+
+
+void ElementHandlerCompiler::GenerateStoreDictionaryElement(
+ MacroAssembler* masm) {
+ KeyedStoreIC::GenerateSlow(masm);
+}
+
+
+CallOptimization::CallOptimization(Handle<JSFunction> function) {
+ Initialize(function);
+}
+
+
+Handle<JSObject> CallOptimization::LookupHolderOfExpectedType(
+ Handle<Map> object_map, HolderLookup* holder_lookup) const {
+ DCHECK(is_simple_api_call());
+ if (!object_map->IsJSObjectMap()) {
+ *holder_lookup = kHolderNotFound;
+ return Handle<JSObject>::null();
+ }
+ if (expected_receiver_type_.is_null() ||
+ expected_receiver_type_->IsTemplateFor(*object_map)) {
+ *holder_lookup = kHolderIsReceiver;
+ return Handle<JSObject>::null();
+ }
+ while (true) {
+ if (!object_map->prototype()->IsJSObject()) break;
+ Handle<JSObject> prototype(JSObject::cast(object_map->prototype()));
+ if (!prototype->map()->is_hidden_prototype()) break;
+ object_map = handle(prototype->map());
+ if (expected_receiver_type_->IsTemplateFor(*object_map)) {
+ *holder_lookup = kHolderFound;
+ return prototype;
+ }
+ }
+ *holder_lookup = kHolderNotFound;
+ return Handle<JSObject>::null();
+}
+
+
+bool CallOptimization::IsCompatibleReceiver(Handle<Object> receiver,
+ Handle<JSObject> holder) const {
+ DCHECK(is_simple_api_call());
+ if (!receiver->IsJSObject()) return false;
+ Handle<Map> map(JSObject::cast(*receiver)->map());
+ HolderLookup holder_lookup;
+ Handle<JSObject> api_holder = LookupHolderOfExpectedType(map, &holder_lookup);
+ switch (holder_lookup) {
+ case kHolderNotFound:
+ return false;
+ case kHolderIsReceiver:
+ return true;
+ case kHolderFound:
+ if (api_holder.is_identical_to(holder)) return true;
+ // Check if holder is in prototype chain of api_holder.
+ {
+ JSObject* object = *api_holder;
+ while (true) {
+ Object* prototype = object->map()->prototype();
+ if (!prototype->IsJSObject()) return false;
+ if (prototype == *holder) return true;
+ object = JSObject::cast(prototype);
+ }
+ }
+ break;
+ }
+ UNREACHABLE();
+ return false;
+}
+
+
+void CallOptimization::Initialize(Handle<JSFunction> function) {
+ constant_function_ = Handle<JSFunction>::null();
+ is_simple_api_call_ = false;
+ expected_receiver_type_ = Handle<FunctionTemplateInfo>::null();
+ api_call_info_ = Handle<CallHandlerInfo>::null();
+
+ if (function.is_null() || !function->is_compiled()) return;
+
+ constant_function_ = function;
+ AnalyzePossibleApiFunction(function);
+}
+
+
+void CallOptimization::AnalyzePossibleApiFunction(Handle<JSFunction> function) {
+ if (!function->shared()->IsApiFunction()) return;
+ Handle<FunctionTemplateInfo> info(function->shared()->get_api_func_data());
+
+ // Require a C++ callback.
+ if (info->call_code()->IsUndefined()) return;
+ api_call_info_ =
+ Handle<CallHandlerInfo>(CallHandlerInfo::cast(info->call_code()));
+
+ // Accept signatures that either have no restrictions at all or
+ // only have restrictions on the receiver.
+ if (!info->signature()->IsUndefined()) {
+ Handle<SignatureInfo> signature =
+ Handle<SignatureInfo>(SignatureInfo::cast(info->signature()));
+ if (!signature->args()->IsUndefined()) return;
+ if (!signature->receiver()->IsUndefined()) {
+ expected_receiver_type_ = Handle<FunctionTemplateInfo>(
+ FunctionTemplateInfo::cast(signature->receiver()));
+ }
+ }
+
+ is_simple_api_call_ = true;
+}
+}
+} // namespace v8::internal
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_IC_IC_COMPILER_H_
+#define V8_IC_IC_COMPILER_H_
+
+#include "src/code-stubs.h"
+#include "src/macro-assembler.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+
+class CallOptimization;
+class SmallMapList;
+class StubCache;
+
+
+enum PrototypeCheckType { CHECK_ALL_MAPS, SKIP_RECEIVER };
+enum IcCheckType { ELEMENT, PROPERTY };
+
+
+class PropertyAccessCompiler BASE_EMBEDDED {
+ public:
+ static Builtins::Name MissBuiltin(Code::Kind kind) {
+ switch (kind) {
+ case Code::LOAD_IC:
+ return Builtins::kLoadIC_Miss;
+ case Code::STORE_IC:
+ return Builtins::kStoreIC_Miss;
+ case Code::KEYED_LOAD_IC:
+ return Builtins::kKeyedLoadIC_Miss;
+ case Code::KEYED_STORE_IC:
+ return Builtins::kKeyedStoreIC_Miss;
+ default:
+ UNREACHABLE();
+ }
+ return Builtins::kLoadIC_Miss;
+ }
+
+ static void TailCallBuiltin(MacroAssembler* masm, Builtins::Name name);
+
+ protected:
+ PropertyAccessCompiler(Isolate* isolate, Code::Kind kind,
+ CacheHolderFlag cache_holder)
+ : registers_(GetCallingConvention(kind)),
+ kind_(kind),
+ cache_holder_(cache_holder),
+ isolate_(isolate),
+ masm_(isolate, NULL, 256) {}
+
+ Code::Kind kind() const { return kind_; }
+ CacheHolderFlag cache_holder() const { return cache_holder_; }
+ MacroAssembler* masm() { return &masm_; }
+ Isolate* isolate() const { return isolate_; }
+ Heap* heap() const { return isolate()->heap(); }
+ Factory* factory() const { return isolate()->factory(); }
+
+ Register receiver() const { return registers_[0]; }
+ Register name() const { return registers_[1]; }
+ Register scratch1() const { return registers_[2]; }
+ Register scratch2() const { return registers_[3]; }
+ Register scratch3() const { return registers_[4]; }
+
+ // Calling convention between indexed store IC and handler.
+ Register transition_map() const { return scratch1(); }
+
+ static Register* GetCallingConvention(Code::Kind);
+ static Register* load_calling_convention();
+ static Register* store_calling_convention();
+ static Register* keyed_store_calling_convention();
+
+ Register* registers_;
+
+ static void GenerateTailCall(MacroAssembler* masm, Handle<Code> code);
+
+ Handle<Code> GetCodeWithFlags(Code::Flags flags, const char* name);
+ Handle<Code> GetCodeWithFlags(Code::Flags flags, Handle<Name> name);
+
+ private:
+ Code::Kind kind_;
+ CacheHolderFlag cache_holder_;
+
+ Isolate* isolate_;
+ MacroAssembler masm_;
+};
+
+
+class PropertyICCompiler : public PropertyAccessCompiler {
+ public:
+ // Finds the Code object stored in the Heap::non_monomorphic_cache().
+ static Code* FindPreMonomorphic(Isolate* isolate, Code::Kind kind,
+ ExtraICState extra_ic_state);
+
+ // Named
+ static Handle<Code> ComputeLoad(Isolate* isolate, InlineCacheState ic_state,
+ ExtraICState extra_state);
+ static Handle<Code> ComputeStore(Isolate* isolate, InlineCacheState ic_state,
+ ExtraICState extra_state);
+
+ static Handle<Code> ComputeMonomorphic(Code::Kind kind, Handle<Name> name,
+ Handle<HeapType> type,
+ Handle<Code> handler,
+ ExtraICState extra_ic_state);
+ static Handle<Code> ComputePolymorphic(Code::Kind kind, TypeHandleList* types,
+ CodeHandleList* handlers,
+ int number_of_valid_maps,
+ Handle<Name> name,
+ ExtraICState extra_ic_state);
+
+ // Keyed
+ static Handle<Code> ComputeKeyedLoadMonomorphic(Handle<Map> receiver_map);
+
+ static Handle<Code> ComputeKeyedStoreMonomorphic(
+ Handle<Map> receiver_map, StrictMode strict_mode,
+ KeyedAccessStoreMode store_mode);
+ static Handle<Code> ComputeKeyedLoadPolymorphic(MapHandleList* receiver_maps);
+ static Handle<Code> ComputeKeyedStorePolymorphic(
+ MapHandleList* receiver_maps, KeyedAccessStoreMode store_mode,
+ StrictMode strict_mode);
+
+ // Compare nil
+ static Handle<Code> ComputeCompareNil(Handle<Map> receiver_map,
+ CompareNilICStub* stub);
+
+
+ private:
+ PropertyICCompiler(Isolate* isolate, Code::Kind kind,
+ ExtraICState extra_ic_state = kNoExtraICState,
+ CacheHolderFlag cache_holder = kCacheOnReceiver)
+ : PropertyAccessCompiler(isolate, kind, cache_holder),
+ extra_ic_state_(extra_ic_state) {}
+
+ static Handle<Code> Find(Handle<Name> name, Handle<Map> stub_holder_map,
+ Code::Kind kind,
+ ExtraICState extra_ic_state = kNoExtraICState,
+ CacheHolderFlag cache_holder = kCacheOnReceiver);
+
+ Handle<Code> CompileLoadInitialize(Code::Flags flags);
+ Handle<Code> CompileLoadPreMonomorphic(Code::Flags flags);
+ Handle<Code> CompileLoadMegamorphic(Code::Flags flags);
+ Handle<Code> CompileStoreInitialize(Code::Flags flags);
+ Handle<Code> CompileStorePreMonomorphic(Code::Flags flags);
+ Handle<Code> CompileStoreGeneric(Code::Flags flags);
+ Handle<Code> CompileStoreMegamorphic(Code::Flags flags);
+
+ Handle<Code> CompileMonomorphic(Handle<HeapType> type, Handle<Code> handler,
+ Handle<Name> name, IcCheckType check);
+ Handle<Code> CompilePolymorphic(TypeHandleList* types,
+ CodeHandleList* handlers, Handle<Name> name,
+ Code::StubType type, IcCheckType check);
+
+ Handle<Code> CompileKeyedStoreMonomorphic(Handle<Map> receiver_map,
+ KeyedAccessStoreMode store_mode);
+ Handle<Code> CompileKeyedStorePolymorphic(MapHandleList* receiver_maps,
+ KeyedAccessStoreMode store_mode);
+ Handle<Code> CompileKeyedStorePolymorphic(MapHandleList* receiver_maps,
+ CodeHandleList* handler_stubs,
+ MapHandleList* transitioned_maps);
+
+ bool IncludesNumberType(TypeHandleList* types);
+
+ Handle<Code> GetCode(Code::Kind kind, Code::StubType type, Handle<Name> name,
+ InlineCacheState state = MONOMORPHIC);
+
+ Logger::LogEventsAndTags log_kind(Handle<Code> code) {
+ if (kind() == Code::LOAD_IC) {
+ return code->ic_state() == MONOMORPHIC ? Logger::LOAD_IC_TAG
+ : Logger::LOAD_POLYMORPHIC_IC_TAG;
+ } else if (kind() == Code::KEYED_LOAD_IC) {
+ return code->ic_state() == MONOMORPHIC
+ ? Logger::KEYED_LOAD_IC_TAG
+ : Logger::KEYED_LOAD_POLYMORPHIC_IC_TAG;
+ } else if (kind() == Code::STORE_IC) {
+ return code->ic_state() == MONOMORPHIC ? Logger::STORE_IC_TAG
+ : Logger::STORE_POLYMORPHIC_IC_TAG;
+ } else {
+ DCHECK_EQ(Code::KEYED_STORE_IC, kind());
+ return code->ic_state() == MONOMORPHIC
+ ? Logger::KEYED_STORE_IC_TAG
+ : Logger::KEYED_STORE_POLYMORPHIC_IC_TAG;
+ }
+ }
+
+ const ExtraICState extra_ic_state_;
+};
+
+
+class PropertyHandlerCompiler : public PropertyAccessCompiler {
+ public:
+ static Handle<Code> Find(Handle<Name> name, Handle<Map> map, Code::Kind kind,
+ CacheHolderFlag cache_holder, Code::StubType type);
+
+ protected:
+ PropertyHandlerCompiler(Isolate* isolate, Code::Kind kind,
+ Handle<HeapType> type, Handle<JSObject> holder,
+ CacheHolderFlag cache_holder)
+ : PropertyAccessCompiler(isolate, kind, cache_holder),
+ type_(type),
+ holder_(holder) {}
+
+ virtual ~PropertyHandlerCompiler() {}
+
+ virtual Register FrontendHeader(Register object_reg, Handle<Name> name,
+ Label* miss) {
+ UNREACHABLE();
+ return receiver();
+ }
+
+ virtual void FrontendFooter(Handle<Name> name, Label* miss) { UNREACHABLE(); }
+
+ Register Frontend(Register object_reg, Handle<Name> name);
+ void NonexistentFrontendHeader(Handle<Name> name, Label* miss,
+ Register scratch1, Register scratch2);
+
+ // TODO(verwaest): Make non-static.
+ static void GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Handle<Map> receiver_map, Register receiver,
+ Register scratch, bool is_store, int argc,
+ Register* values);
+
+ // Helper function used to check that the dictionary doesn't contain
+ // the property. This function may return false negatives, so miss_label
+ // must always call a backup property check that is complete.
+ // This function is safe to call if the receiver has fast properties.
+ // Name must be unique and receiver must be a heap object.
+ static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
+ Label* miss_label,
+ Register receiver,
+ Handle<Name> name, Register r0,
+ Register r1);
+
+ // Generate code to check that a global property cell is empty. Create
+ // the property cell at compilation time if no cell exists for the
+ // property.
+ static void GenerateCheckPropertyCell(MacroAssembler* masm,
+ Handle<JSGlobalObject> global,
+ Handle<Name> name, Register scratch,
+ Label* miss);
+
+ // Generates code that verifies that the property holder has not changed
+ // (checking maps of objects in the prototype chain for fast and global
+ // objects or doing negative lookup for slow objects, ensures that the
+ // property cells for global objects are still empty) and checks that the map
+ // of the holder has not changed. If necessary the function also generates
+ // code for security check in case of global object holders. Helps to make
+ // sure that the current IC is still valid.
+ //
+ // The scratch and holder registers are always clobbered, but the object
+ // register is only clobbered if it the same as the holder register. The
+ // function returns a register containing the holder - either object_reg or
+ // holder_reg.
+ Register CheckPrototypes(Register object_reg, Register holder_reg,
+ Register scratch1, Register scratch2,
+ Handle<Name> name, Label* miss,
+ PrototypeCheckType check = CHECK_ALL_MAPS);
+
+ Handle<Code> GetCode(Code::Kind kind, Code::StubType type, Handle<Name> name);
+ void set_type_for_object(Handle<Object> object) {
+ type_ = IC::CurrentTypeOf(object, isolate());
+ }
+ void set_holder(Handle<JSObject> holder) { holder_ = holder; }
+ Handle<HeapType> type() const { return type_; }
+ Handle<JSObject> holder() const { return holder_; }
+
+ private:
+ Handle<HeapType> type_;
+ Handle<JSObject> holder_;
+};
+
+
+class NamedLoadHandlerCompiler : public PropertyHandlerCompiler {
+ public:
+ NamedLoadHandlerCompiler(Isolate* isolate, Handle<HeapType> type,
+ Handle<JSObject> holder,
+ CacheHolderFlag cache_holder)
+ : PropertyHandlerCompiler(isolate, Code::LOAD_IC, type, holder,
+ cache_holder) {}
+
+ virtual ~NamedLoadHandlerCompiler() {}
+
+ Handle<Code> CompileLoadField(Handle<Name> name, FieldIndex index);
+
+ Handle<Code> CompileLoadCallback(Handle<Name> name,
+ Handle<ExecutableAccessorInfo> callback);
+
+ Handle<Code> CompileLoadCallback(Handle<Name> name,
+ const CallOptimization& call_optimization);
+
+ Handle<Code> CompileLoadConstant(Handle<Name> name, int constant_index);
+
+ // The LookupIterator is used to perform a lookup behind the interceptor. If
+ // the iterator points to a LookupIterator::PROPERTY, its access will be
+ // inlined.
+ Handle<Code> CompileLoadInterceptor(LookupIterator* it);
+
+ Handle<Code> CompileLoadViaGetter(Handle<Name> name,
+ Handle<JSFunction> getter);
+
+ Handle<Code> CompileLoadGlobal(Handle<PropertyCell> cell, Handle<Name> name,
+ bool is_configurable);
+
+ // Static interface
+ static Handle<Code> ComputeLoadNonexistent(Handle<Name> name,
+ Handle<HeapType> type);
+
+ static void GenerateLoadViaGetter(MacroAssembler* masm, Handle<HeapType> type,
+ Register receiver,
+ Handle<JSFunction> getter);
+
+ static void GenerateLoadViaGetterForDeopt(MacroAssembler* masm) {
+ GenerateLoadViaGetter(masm, Handle<HeapType>::null(), no_reg,
+ Handle<JSFunction>());
+ }
+
+ static void GenerateLoadFunctionPrototype(MacroAssembler* masm,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label);
+
+ // These constants describe the structure of the interceptor arguments on the
+ // stack. The arguments are pushed by the (platform-specific)
+ // PushInterceptorArguments and read by LoadPropertyWithInterceptorOnly and
+ // LoadWithInterceptor.
+ static const int kInterceptorArgsNameIndex = 0;
+ static const int kInterceptorArgsInfoIndex = 1;
+ static const int kInterceptorArgsThisIndex = 2;
+ static const int kInterceptorArgsHolderIndex = 3;
+ static const int kInterceptorArgsLength = 4;
+
+ protected:
+ virtual Register FrontendHeader(Register object_reg, Handle<Name> name,
+ Label* miss);
+
+ virtual void FrontendFooter(Handle<Name> name, Label* miss);
+
+ private:
+ Handle<Code> CompileLoadNonexistent(Handle<Name> name);
+ void GenerateLoadConstant(Handle<Object> value);
+ void GenerateLoadCallback(Register reg,
+ Handle<ExecutableAccessorInfo> callback);
+ void GenerateLoadCallback(const CallOptimization& call_optimization,
+ Handle<Map> receiver_map);
+ void GenerateLoadInterceptor(Register holder_reg);
+ void GenerateLoadInterceptorWithFollowup(LookupIterator* it,
+ Register holder_reg);
+ void GenerateLoadPostInterceptor(LookupIterator* it, Register reg);
+
+ // Generates prototype loading code that uses the objects from the
+ // context we were in when this function was called. If the context
+ // has changed, a jump to miss is performed. This ties the generated
+ // code to a particular context and so must not be used in cases
+ // where the generated code is not allowed to have references to
+ // objects from a context.
+ static void GenerateDirectLoadGlobalFunctionPrototype(MacroAssembler* masm,
+ int index,
+ Register prototype,
+ Label* miss);
+
+
+ Register scratch4() { return registers_[5]; }
+};
+
+
+class NamedStoreHandlerCompiler : public PropertyHandlerCompiler {
+ public:
+ explicit NamedStoreHandlerCompiler(Isolate* isolate, Handle<HeapType> type,
+ Handle<JSObject> holder)
+ : PropertyHandlerCompiler(isolate, Code::STORE_IC, type, holder,
+ kCacheOnReceiver) {}
+
+ virtual ~NamedStoreHandlerCompiler() {}
+
+ Handle<Code> CompileStoreTransition(Handle<Map> transition,
+ Handle<Name> name);
+ Handle<Code> CompileStoreField(LookupIterator* it);
+ Handle<Code> CompileStoreCallback(Handle<JSObject> object, Handle<Name> name,
+ Handle<ExecutableAccessorInfo> callback);
+ Handle<Code> CompileStoreCallback(Handle<JSObject> object, Handle<Name> name,
+ const CallOptimization& call_optimization);
+ Handle<Code> CompileStoreViaSetter(Handle<JSObject> object, Handle<Name> name,
+ Handle<JSFunction> setter);
+ Handle<Code> CompileStoreInterceptor(Handle<Name> name);
+
+ static void GenerateStoreViaSetter(MacroAssembler* masm,
+ Handle<HeapType> type, Register receiver,
+ Handle<JSFunction> setter);
+
+ static void GenerateStoreViaSetterForDeopt(MacroAssembler* masm) {
+ GenerateStoreViaSetter(masm, Handle<HeapType>::null(), no_reg,
+ Handle<JSFunction>());
+ }
+
+ protected:
+ virtual Register FrontendHeader(Register object_reg, Handle<Name> name,
+ Label* miss);
+
+ virtual void FrontendFooter(Handle<Name> name, Label* miss);
+ void GenerateRestoreName(Label* label, Handle<Name> name);
+
+ private:
+ void GenerateStoreTransition(Handle<Map> transition, Handle<Name> name,
+ Register receiver_reg, Register name_reg,
+ Register value_reg, Register scratch1,
+ Register scratch2, Register scratch3,
+ Label* miss_label, Label* slow);
+
+ void GenerateStoreField(LookupIterator* lookup, Register value_reg,
+ Label* miss_label);
+
+ static Builtins::Name SlowBuiltin(Code::Kind kind) {
+ switch (kind) {
+ case Code::STORE_IC:
+ return Builtins::kStoreIC_Slow;
+ case Code::KEYED_STORE_IC:
+ return Builtins::kKeyedStoreIC_Slow;
+ default:
+ UNREACHABLE();
+ }
+ return Builtins::kStoreIC_Slow;
+ }
+
+ static Register value();
+};
+
+
+class ElementHandlerCompiler : public PropertyHandlerCompiler {
+ public:
+ explicit ElementHandlerCompiler(Isolate* isolate)
+ : PropertyHandlerCompiler(isolate, Code::KEYED_LOAD_IC,
+ Handle<HeapType>::null(),
+ Handle<JSObject>::null(), kCacheOnReceiver) {}
+
+ virtual ~ElementHandlerCompiler() {}
+
+ void CompileElementHandlers(MapHandleList* receiver_maps,
+ CodeHandleList* handlers);
+
+ static void GenerateLoadDictionaryElement(MacroAssembler* masm);
+ static void GenerateStoreDictionaryElement(MacroAssembler* masm);
+};
+
+
+// Holds information about possible function call optimizations.
+class CallOptimization BASE_EMBEDDED {
+ public:
+ explicit CallOptimization(Handle<JSFunction> function);
+
+ bool is_constant_call() const { return !constant_function_.is_null(); }
+
+ Handle<JSFunction> constant_function() const {
+ DCHECK(is_constant_call());
+ return constant_function_;
+ }
+
+ bool is_simple_api_call() const { return is_simple_api_call_; }
+
+ Handle<FunctionTemplateInfo> expected_receiver_type() const {
+ DCHECK(is_simple_api_call());
+ return expected_receiver_type_;
+ }
+
+ Handle<CallHandlerInfo> api_call_info() const {
+ DCHECK(is_simple_api_call());
+ return api_call_info_;
+ }
+
+ enum HolderLookup { kHolderNotFound, kHolderIsReceiver, kHolderFound };
+ Handle<JSObject> LookupHolderOfExpectedType(
+ Handle<Map> receiver_map, HolderLookup* holder_lookup) const;
+
+ // Check if the api holder is between the receiver and the holder.
+ bool IsCompatibleReceiver(Handle<Object> receiver,
+ Handle<JSObject> holder) const;
+
+ private:
+ void Initialize(Handle<JSFunction> function);
+
+ // Determines whether the given function can be called using the
+ // fast api call builtin.
+ void AnalyzePossibleApiFunction(Handle<JSFunction> function);
+
+ Handle<JSFunction> constant_function_;
+ bool is_simple_api_call_;
+ Handle<FunctionTemplateInfo> expected_receiver_type_;
+ Handle<CallHandlerInfo> api_call_info_;
+};
+}
+} // namespace v8::internal
+
+#endif // V8_IC_IC_COMPILER_H_
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_IC_INL_H_
+#define V8_IC_INL_H_
+
+#include "src/ic/ic.h"
+
+#include "src/compiler.h"
+#include "src/debug.h"
+#include "src/macro-assembler.h"
+#include "src/prototype.h"
+
+namespace v8 {
+namespace internal {
+
+
+Address IC::address() const {
+ // Get the address of the call.
+ Address result = Assembler::target_address_from_return_address(pc());
+
+ Debug* debug = isolate()->debug();
+ // First check if any break points are active if not just return the address
+ // of the call.
+ if (!debug->has_break_points()) return result;
+
+ // At least one break point is active perform additional test to ensure that
+ // break point locations are updated correctly.
+ if (debug->IsDebugBreak(
+ Assembler::target_address_at(result, raw_constant_pool()))) {
+ // If the call site is a call to debug break then return the address in
+ // the original code instead of the address in the running code. This will
+ // cause the original code to be updated and keeps the breakpoint active in
+ // the running code.
+ Code* code = GetCode();
+ Code* original_code = GetOriginalCode();
+ intptr_t delta =
+ original_code->instruction_start() - code->instruction_start();
+ // Return the address in the original code. This is the place where
+ // the call which has been overwritten by the DebugBreakXXX resides
+ // and the place where the inline cache system should look.
+ return result + delta;
+ } else {
+ // No break point here just return the address of the call.
+ return result;
+ }
+}
+
+
+ConstantPoolArray* IC::constant_pool() const {
+ if (!FLAG_enable_ool_constant_pool) {
+ return NULL;
+ } else {
+ Handle<ConstantPoolArray> result = raw_constant_pool_;
+ Debug* debug = isolate()->debug();
+ // First check if any break points are active if not just return the
+ // original constant pool.
+ if (!debug->has_break_points()) return *result;
+
+ // At least one break point is active perform additional test to ensure that
+ // break point locations are updated correctly.
+ Address target = Assembler::target_address_from_return_address(pc());
+ if (debug->IsDebugBreak(
+ Assembler::target_address_at(target, raw_constant_pool()))) {
+ // If the call site is a call to debug break then we want to return the
+ // constant pool for the original code instead of the breakpointed code.
+ return GetOriginalCode()->constant_pool();
+ }
+ return *result;
+ }
+}
+
+
+ConstantPoolArray* IC::raw_constant_pool() const {
+ if (FLAG_enable_ool_constant_pool) {
+ return *raw_constant_pool_;
+ } else {
+ return NULL;
+ }
+}
+
+
+Code* IC::GetTargetAtAddress(Address address,
+ ConstantPoolArray* constant_pool) {
+ // Get the target address of the IC.
+ Address target = Assembler::target_address_at(address, constant_pool);
+ // Convert target address to the code object. Code::GetCodeFromTargetAddress
+ // is safe for use during GC where the map might be marked.
+ Code* result = Code::GetCodeFromTargetAddress(target);
+ DCHECK(result->is_inline_cache_stub());
+ return result;
+}
+
+
+void IC::SetTargetAtAddress(Address address, Code* target,
+ ConstantPoolArray* constant_pool) {
+ DCHECK(target->is_inline_cache_stub() || target->is_compare_ic_stub());
+ Heap* heap = target->GetHeap();
+ Code* old_target = GetTargetAtAddress(address, constant_pool);
+#ifdef DEBUG
+ // STORE_IC and KEYED_STORE_IC use Code::extra_ic_state() to mark
+ // ICs as strict mode. The strict-ness of the IC must be preserved.
+ if (old_target->kind() == Code::STORE_IC ||
+ old_target->kind() == Code::KEYED_STORE_IC) {
+ DCHECK(StoreIC::GetStrictMode(old_target->extra_ic_state()) ==
+ StoreIC::GetStrictMode(target->extra_ic_state()));
+ }
+#endif
+ Assembler::set_target_address_at(address, constant_pool,
+ target->instruction_start());
+ if (heap->gc_state() == Heap::MARK_COMPACT) {
+ heap->mark_compact_collector()->RecordCodeTargetPatch(address, target);
+ } else {
+ heap->incremental_marking()->RecordCodeTargetPatch(address, target);
+ }
+ PostPatching(address, target, old_target);
+}
+
+
+void IC::set_target(Code* code) {
+#ifdef VERIFY_HEAP
+ code->VerifyEmbeddedObjectsDependency();
+#endif
+ SetTargetAtAddress(address(), code, constant_pool());
+ target_set_ = true;
+}
+
+
+void LoadIC::set_target(Code* code) {
+ // The contextual mode must be preserved across IC patching.
+ DCHECK(GetContextualMode(code->extra_ic_state()) ==
+ GetContextualMode(target()->extra_ic_state()));
+
+ IC::set_target(code);
+}
+
+
+void StoreIC::set_target(Code* code) {
+ // Strict mode must be preserved across IC patching.
+ DCHECK(GetStrictMode(code->extra_ic_state()) ==
+ GetStrictMode(target()->extra_ic_state()));
+ IC::set_target(code);
+}
+
+
+void KeyedStoreIC::set_target(Code* code) {
+ // Strict mode must be preserved across IC patching.
+ DCHECK(GetStrictMode(code->extra_ic_state()) == strict_mode());
+ IC::set_target(code);
+}
+
+
+Code* IC::raw_target() const {
+ return GetTargetAtAddress(address(), constant_pool());
+}
+
+void IC::UpdateTarget() { target_ = handle(raw_target(), isolate_); }
+
+
+template <class TypeClass>
+JSFunction* IC::GetRootConstructor(TypeClass* type, Context* native_context) {
+ if (type->Is(TypeClass::Boolean())) {
+ return native_context->boolean_function();
+ } else if (type->Is(TypeClass::Number())) {
+ return native_context->number_function();
+ } else if (type->Is(TypeClass::String())) {
+ return native_context->string_function();
+ } else if (type->Is(TypeClass::Symbol())) {
+ return native_context->symbol_function();
+ } else {
+ return NULL;
+ }
+}
+
+
+Handle<Map> IC::GetHandlerCacheHolder(HeapType* type, bool receiver_is_holder,
+ Isolate* isolate, CacheHolderFlag* flag) {
+ Handle<Map> receiver_map = TypeToMap(type, isolate);
+ if (receiver_is_holder) {
+ *flag = kCacheOnReceiver;
+ return receiver_map;
+ }
+ Context* native_context = *isolate->native_context();
+ JSFunction* builtin_ctor = GetRootConstructor(type, native_context);
+ if (builtin_ctor != NULL) {
+ *flag = kCacheOnPrototypeReceiverIsPrimitive;
+ return handle(HeapObject::cast(builtin_ctor->instance_prototype())->map());
+ }
+ *flag = receiver_map->is_dictionary_map()
+ ? kCacheOnPrototypeReceiverIsDictionary
+ : kCacheOnPrototype;
+ // Callers must ensure that the prototype is non-null.
+ return handle(JSObject::cast(receiver_map->prototype())->map());
+}
+
+
+Handle<Map> IC::GetICCacheHolder(HeapType* type, Isolate* isolate,
+ CacheHolderFlag* flag) {
+ Context* native_context = *isolate->native_context();
+ JSFunction* builtin_ctor = GetRootConstructor(type, native_context);
+ if (builtin_ctor != NULL) {
+ *flag = kCacheOnPrototype;
+ return handle(builtin_ctor->initial_map());
+ }
+ *flag = kCacheOnReceiver;
+ return TypeToMap(type, isolate);
+}
+
+
+IC::State CallIC::FeedbackToState(Handle<FixedArray> vector,
+ Handle<Smi> slot) const {
+ IC::State state = UNINITIALIZED;
+ Object* feedback = vector->get(slot->value());
+
+ if (feedback == *TypeFeedbackInfo::MegamorphicSentinel(isolate())) {
+ state = GENERIC;
+ } else if (feedback->IsAllocationSite() || feedback->IsJSFunction()) {
+ state = MONOMORPHIC;
+ } else {
+ CHECK(feedback == *TypeFeedbackInfo::UninitializedSentinel(isolate()));
+ }
+
+ return state;
+}
+}
+} // namespace v8::internal
+
+#endif // V8_IC_INL_H_
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/accessors.h"
+#include "src/api.h"
+#include "src/arguments.h"
+#include "src/codegen.h"
+#include "src/conversions.h"
+#include "src/execution.h"
+#include "src/ic/ic-inl.h"
+#include "src/ic/ic-compiler.h"
+#include "src/ic/stub-cache.h"
+#include "src/prototype.h"
+#include "src/runtime.h"
+
+namespace v8 {
+namespace internal {
+
+char IC::TransitionMarkFromState(IC::State state) {
+ switch (state) {
+ case UNINITIALIZED:
+ return '0';
+ case PREMONOMORPHIC:
+ return '.';
+ case MONOMORPHIC:
+ return '1';
+ case PROTOTYPE_FAILURE:
+ return '^';
+ case POLYMORPHIC:
+ return 'P';
+ case MEGAMORPHIC:
+ return 'N';
+ case GENERIC:
+ return 'G';
+
+ // We never see the debugger states here, because the state is
+ // computed from the original code - not the patched code. Let
+ // these cases fall through to the unreachable code below.
+ case DEBUG_STUB:
+ break;
+ // Type-vector-based ICs resolve state to one of the above.
+ case DEFAULT:
+ break;
+ }
+ UNREACHABLE();
+ return 0;
+}
+
+
+const char* GetTransitionMarkModifier(KeyedAccessStoreMode mode) {
+ if (mode == STORE_NO_TRANSITION_HANDLE_COW) return ".COW";
+ if (mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
+ return ".IGNORE_OOB";
+ }
+ if (IsGrowStoreMode(mode)) return ".GROW";
+ return "";
+}
+
+
+#ifdef DEBUG
+
+#define TRACE_GENERIC_IC(isolate, type, reason) \
+ do { \
+ if (FLAG_trace_ic) { \
+ PrintF("[%s patching generic stub in ", type); \
+ JavaScriptFrame::PrintTop(isolate, stdout, false, true); \
+ PrintF(" (%s)]\n", reason); \
+ } \
+ } while (false)
+
+#else
+
+#define TRACE_GENERIC_IC(isolate, type, reason)
+
+#endif // DEBUG
+
+
+void IC::TraceIC(const char* type, Handle<Object> name) {
+ if (FLAG_trace_ic) {
+ Code* new_target = raw_target();
+ State new_state = new_target->ic_state();
+ TraceIC(type, name, state(), new_state);
+ }
+}
+
+
+void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
+ State new_state) {
+ if (FLAG_trace_ic) {
+ Code* new_target = raw_target();
+ PrintF("[%s%s in ", new_target->is_keyed_stub() ? "Keyed" : "", type);
+
+ // TODO(jkummerow): Add support for "apply". The logic is roughly:
+ // marker = [fp_ + kMarkerOffset];
+ // if marker is smi and marker.value == INTERNAL and
+ // the frame's code == builtin(Builtins::kFunctionApply):
+ // then print "apply from" and advance one frame
+
+ Object* maybe_function =
+ Memory::Object_at(fp_ + JavaScriptFrameConstants::kFunctionOffset);
+ if (maybe_function->IsJSFunction()) {
+ JSFunction* function = JSFunction::cast(maybe_function);
+ JavaScriptFrame::PrintFunctionAndOffset(function, function->code(), pc(),
+ stdout, true);
+ }
+
+ ExtraICState extra_state = new_target->extra_ic_state();
+ const char* modifier = "";
+ if (new_target->kind() == Code::KEYED_STORE_IC) {
+ modifier = GetTransitionMarkModifier(
+ KeyedStoreIC::GetKeyedAccessStoreMode(extra_state));
+ }
+ PrintF(" (%c->%c%s)", TransitionMarkFromState(old_state),
+ TransitionMarkFromState(new_state), modifier);
+#ifdef OBJECT_PRINT
+ OFStream os(stdout);
+ name->Print(os);
+#else
+ name->ShortPrint(stdout);
+#endif
+ PrintF("]\n");
+ }
+}
+
+#define TRACE_IC(type, name) TraceIC(type, name)
+#define TRACE_VECTOR_IC(type, name, old_state, new_state) \
+ TraceIC(type, name, old_state, new_state)
+
+IC::IC(FrameDepth depth, Isolate* isolate)
+ : isolate_(isolate), target_set_(false), target_maps_set_(false) {
+ // To improve the performance of the (much used) IC code, we unfold a few
+ // levels of the stack frame iteration code. This yields a ~35% speedup when
+ // running DeltaBlue and a ~25% speedup of gbemu with the '--nouse-ic' flag.
+ const Address entry = Isolate::c_entry_fp(isolate->thread_local_top());
+ Address constant_pool = NULL;
+ if (FLAG_enable_ool_constant_pool) {
+ constant_pool =
+ Memory::Address_at(entry + ExitFrameConstants::kConstantPoolOffset);
+ }
+ Address* pc_address =
+ reinterpret_cast<Address*>(entry + ExitFrameConstants::kCallerPCOffset);
+ Address fp = Memory::Address_at(entry + ExitFrameConstants::kCallerFPOffset);
+ // If there's another JavaScript frame on the stack or a
+ // StubFailureTrampoline, we need to look one frame further down the stack to
+ // find the frame pointer and the return address stack slot.
+ if (depth == EXTRA_CALL_FRAME) {
+ if (FLAG_enable_ool_constant_pool) {
+ constant_pool =
+ Memory::Address_at(fp + StandardFrameConstants::kConstantPoolOffset);
+ }
+ const int kCallerPCOffset = StandardFrameConstants::kCallerPCOffset;
+ pc_address = reinterpret_cast<Address*>(fp + kCallerPCOffset);
+ fp = Memory::Address_at(fp + StandardFrameConstants::kCallerFPOffset);
+ }
+#ifdef DEBUG
+ StackFrameIterator it(isolate);
+ for (int i = 0; i < depth + 1; i++) it.Advance();
+ StackFrame* frame = it.frame();
+ DCHECK(fp == frame->fp() && pc_address == frame->pc_address());
+#endif
+ fp_ = fp;
+ if (FLAG_enable_ool_constant_pool) {
+ raw_constant_pool_ = handle(
+ ConstantPoolArray::cast(reinterpret_cast<Object*>(constant_pool)),
+ isolate);
+ }
+ pc_address_ = StackFrame::ResolveReturnAddressLocation(pc_address);
+ target_ = handle(raw_target(), isolate);
+ state_ = target_->ic_state();
+ kind_ = target_->kind();
+ extra_ic_state_ = target_->extra_ic_state();
+}
+
+
+SharedFunctionInfo* IC::GetSharedFunctionInfo() const {
+ // Compute the JavaScript frame for the frame pointer of this IC
+ // structure. We need this to be able to find the function
+ // corresponding to the frame.
+ StackFrameIterator it(isolate());
+ while (it.frame()->fp() != this->fp()) it.Advance();
+ JavaScriptFrame* frame = JavaScriptFrame::cast(it.frame());
+ // Find the function on the stack and both the active code for the
+ // function and the original code.
+ JSFunction* function = frame->function();
+ return function->shared();
+}
+
+
+Code* IC::GetCode() const {
+ HandleScope scope(isolate());
+ Handle<SharedFunctionInfo> shared(GetSharedFunctionInfo(), isolate());
+ Code* code = shared->code();
+ return code;
+}
+
+
+Code* IC::GetOriginalCode() const {
+ HandleScope scope(isolate());
+ Handle<SharedFunctionInfo> shared(GetSharedFunctionInfo(), isolate());
+ DCHECK(Debug::HasDebugInfo(shared));
+ Code* original_code = Debug::GetDebugInfo(shared)->original_code();
+ DCHECK(original_code->IsCode());
+ return original_code;
+}
+
+
+static void LookupForRead(LookupIterator* it) {
+ for (; it->IsFound(); it->Next()) {
+ switch (it->state()) {
+ case LookupIterator::NOT_FOUND:
+ case LookupIterator::TRANSITION:
+ UNREACHABLE();
+ case LookupIterator::JSPROXY:
+ return;
+ case LookupIterator::INTERCEPTOR: {
+ // If there is a getter, return; otherwise loop to perform the lookup.
+ Handle<JSObject> holder = it->GetHolder<JSObject>();
+ if (!holder->GetNamedInterceptor()->getter()->IsUndefined()) {
+ return;
+ }
+ break;
+ }
+ case LookupIterator::ACCESS_CHECK:
+ // PropertyHandlerCompiler::CheckPrototypes() knows how to emit
+ // access checks for global proxies.
+ if (it->GetHolder<JSObject>()->IsJSGlobalProxy() &&
+ it->HasAccess(v8::ACCESS_GET)) {
+ break;
+ }
+ return;
+ case LookupIterator::PROPERTY:
+ if (it->HasProperty()) return; // Yay!
+ break;
+ }
+ }
+}
+
+
+bool IC::TryRemoveInvalidPrototypeDependentStub(Handle<Object> receiver,
+ Handle<String> name) {
+ if (!IsNameCompatibleWithPrototypeFailure(name)) return false;
+ Handle<Map> receiver_map = TypeToMap(*receiver_type(), isolate());
+ maybe_handler_ = target()->FindHandlerForMap(*receiver_map);
+
+ // The current map wasn't handled yet. There's no reason to stay monomorphic,
+ // *unless* we're moving from a deprecated map to its replacement, or
+ // to a more general elements kind.
+ // TODO(verwaest): Check if the current map is actually what the old map
+ // would transition to.
+ if (maybe_handler_.is_null()) {
+ if (!receiver_map->IsJSObjectMap()) return false;
+ Map* first_map = FirstTargetMap();
+ if (first_map == NULL) return false;
+ Handle<Map> old_map(first_map);
+ if (old_map->is_deprecated()) return true;
+ if (IsMoreGeneralElementsKindTransition(old_map->elements_kind(),
+ receiver_map->elements_kind())) {
+ return true;
+ }
+ return false;
+ }
+
+ CacheHolderFlag flag;
+ Handle<Map> ic_holder_map(
+ GetICCacheHolder(*receiver_type(), isolate(), &flag));
+
+ DCHECK(flag != kCacheOnReceiver || receiver->IsJSObject());
+ DCHECK(flag != kCacheOnPrototype || !receiver->IsJSReceiver());
+ DCHECK(flag != kCacheOnPrototypeReceiverIsDictionary);
+
+ if (state() == MONOMORPHIC) {
+ int index = ic_holder_map->IndexInCodeCache(*name, *target());
+ if (index >= 0) {
+ ic_holder_map->RemoveFromCodeCache(*name, *target(), index);
+ }
+ }
+
+ if (receiver->IsGlobalObject()) {
+ Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver);
+ LookupIterator it(global, name, LookupIterator::CHECK_PROPERTY);
+ if (!it.IsFound() || !it.HasProperty()) return false;
+ Handle<PropertyCell> cell = it.GetPropertyCell();
+ return cell->type()->IsConstant();
+ }
+
+ return true;
+}
+
+
+bool IC::IsNameCompatibleWithPrototypeFailure(Handle<Object> name) {
+ if (target()->is_keyed_stub()) {
+ // Determine whether the failure is due to a name failure.
+ if (!name->IsName()) return false;
+ Name* stub_name = target()->FindFirstName();
+ if (*name != stub_name) return false;
+ }
+
+ return true;
+}
+
+
+void IC::UpdateState(Handle<Object> receiver, Handle<Object> name) {
+ update_receiver_type(receiver);
+ if (!name->IsString()) return;
+ if (state() != MONOMORPHIC && state() != POLYMORPHIC) return;
+ if (receiver->IsUndefined() || receiver->IsNull()) return;
+
+ // Remove the target from the code cache if it became invalid
+ // because of changes in the prototype chain to avoid hitting it
+ // again.
+ if (TryRemoveInvalidPrototypeDependentStub(receiver,
+ Handle<String>::cast(name))) {
+ MarkPrototypeFailure(name);
+ return;
+ }
+
+ // The builtins object is special. It only changes when JavaScript
+ // builtins are loaded lazily. It is important to keep inline
+ // caches for the builtins object monomorphic. Therefore, if we get
+ // an inline cache miss for the builtins object after lazily loading
+ // JavaScript builtins, we return uninitialized as the state to
+ // force the inline cache back to monomorphic state.
+ if (receiver->IsJSBuiltinsObject()) state_ = UNINITIALIZED;
+}
+
+
+MaybeHandle<Object> IC::TypeError(const char* type, Handle<Object> object,
+ Handle<Object> key) {
+ HandleScope scope(isolate());
+ Handle<Object> args[2] = {key, object};
+ Handle<Object> error =
+ isolate()->factory()->NewTypeError(type, HandleVector(args, 2));
+ return isolate()->Throw<Object>(error);
+}
+
+
+MaybeHandle<Object> IC::ReferenceError(const char* type, Handle<Name> name) {
+ HandleScope scope(isolate());
+ Handle<Object> error =
+ isolate()->factory()->NewReferenceError(type, HandleVector(&name, 1));
+ return isolate()->Throw<Object>(error);
+}
+
+
+static void ComputeTypeInfoCountDelta(IC::State old_state, IC::State new_state,
+ int* polymorphic_delta,
+ int* generic_delta) {
+ switch (old_state) {
+ case UNINITIALIZED:
+ case PREMONOMORPHIC:
+ if (new_state == UNINITIALIZED || new_state == PREMONOMORPHIC) break;
+ if (new_state == MONOMORPHIC || new_state == POLYMORPHIC) {
+ *polymorphic_delta = 1;
+ } else if (new_state == MEGAMORPHIC || new_state == GENERIC) {
+ *generic_delta = 1;
+ }
+ break;
+ case MONOMORPHIC:
+ case POLYMORPHIC:
+ if (new_state == MONOMORPHIC || new_state == POLYMORPHIC) break;
+ *polymorphic_delta = -1;
+ if (new_state == MEGAMORPHIC || new_state == GENERIC) {
+ *generic_delta = 1;
+ }
+ break;
+ case MEGAMORPHIC:
+ case GENERIC:
+ if (new_state == MEGAMORPHIC || new_state == GENERIC) break;
+ *generic_delta = -1;
+ if (new_state == MONOMORPHIC || new_state == POLYMORPHIC) {
+ *polymorphic_delta = 1;
+ }
+ break;
+ case PROTOTYPE_FAILURE:
+ case DEBUG_STUB:
+ case DEFAULT:
+ UNREACHABLE();
+ }
+}
+
+
+void IC::OnTypeFeedbackChanged(Isolate* isolate, Address address,
+ State old_state, State new_state,
+ bool target_remains_ic_stub) {
+ Code* host =
+ isolate->inner_pointer_to_code_cache()->GetCacheEntry(address)->code;
+ if (host->kind() != Code::FUNCTION) return;
+
+ if (FLAG_type_info_threshold > 0 && target_remains_ic_stub &&
+ // Not all Code objects have TypeFeedbackInfo.
+ host->type_feedback_info()->IsTypeFeedbackInfo()) {
+ int polymorphic_delta = 0; // "Polymorphic" here includes monomorphic.
+ int generic_delta = 0; // "Generic" here includes megamorphic.
+ ComputeTypeInfoCountDelta(old_state, new_state, &polymorphic_delta,
+ &generic_delta);
+ TypeFeedbackInfo* info = TypeFeedbackInfo::cast(host->type_feedback_info());
+ info->change_ic_with_type_info_count(polymorphic_delta);
+ info->change_ic_generic_count(generic_delta);
+ }
+ if (host->type_feedback_info()->IsTypeFeedbackInfo()) {
+ TypeFeedbackInfo* info = TypeFeedbackInfo::cast(host->type_feedback_info());
+ info->change_own_type_change_checksum();
+ }
+ host->set_profiler_ticks(0);
+ isolate->runtime_profiler()->NotifyICChanged();
+ // TODO(2029): When an optimized function is patched, it would
+ // be nice to propagate the corresponding type information to its
+ // unoptimized version for the benefit of later inlining.
+}
+
+
+void IC::PostPatching(Address address, Code* target, Code* old_target) {
+ // Type vector based ICs update these statistics at a different time because
+ // they don't always patch on state change.
+ if (target->kind() == Code::CALL_IC) return;
+
+ Isolate* isolate = target->GetHeap()->isolate();
+ State old_state = UNINITIALIZED;
+ State new_state = UNINITIALIZED;
+ bool target_remains_ic_stub = false;
+ if (old_target->is_inline_cache_stub() && target->is_inline_cache_stub()) {
+ old_state = old_target->ic_state();
+ new_state = target->ic_state();
+ target_remains_ic_stub = true;
+ }
+
+ OnTypeFeedbackChanged(isolate, address, old_state, new_state,
+ target_remains_ic_stub);
+}
+
+
+void IC::RegisterWeakMapDependency(Handle<Code> stub) {
+ if (FLAG_collect_maps && FLAG_weak_embedded_maps_in_ic &&
+ stub->CanBeWeakStub()) {
+ DCHECK(!stub->is_weak_stub());
+ MapHandleList maps;
+ stub->FindAllMaps(&maps);
+ if (maps.length() == 1 && stub->IsWeakObjectInIC(*maps.at(0))) {
+ Map::AddDependentIC(maps.at(0), stub);
+ stub->mark_as_weak_stub();
+ if (FLAG_enable_ool_constant_pool) {
+ stub->constant_pool()->set_weak_object_state(
+ ConstantPoolArray::WEAK_OBJECTS_IN_IC);
+ }
+ }
+ }
+}
+
+
+void IC::InvalidateMaps(Code* stub) {
+ DCHECK(stub->is_weak_stub());
+ stub->mark_as_invalidated_weak_stub();
+ Isolate* isolate = stub->GetIsolate();
+ Heap* heap = isolate->heap();
+ Object* undefined = heap->undefined_value();
+ int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+ for (RelocIterator it(stub, mode_mask); !it.done(); it.next()) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ if (mode == RelocInfo::EMBEDDED_OBJECT &&
+ it.rinfo()->target_object()->IsMap()) {
+ it.rinfo()->set_target_object(undefined, SKIP_WRITE_BARRIER);
+ }
+ }
+ CpuFeatures::FlushICache(stub->instruction_start(), stub->instruction_size());
+}
+
+
+void IC::Clear(Isolate* isolate, Address address,
+ ConstantPoolArray* constant_pool) {
+ Code* target = GetTargetAtAddress(address, constant_pool);
+
+ // Don't clear debug break inline cache as it will remove the break point.
+ if (target->is_debug_stub()) return;
+
+ switch (target->kind()) {
+ case Code::LOAD_IC:
+ return LoadIC::Clear(isolate, address, target, constant_pool);
+ case Code::KEYED_LOAD_IC:
+ return KeyedLoadIC::Clear(isolate, address, target, constant_pool);
+ case Code::STORE_IC:
+ return StoreIC::Clear(isolate, address, target, constant_pool);
+ case Code::KEYED_STORE_IC:
+ return KeyedStoreIC::Clear(isolate, address, target, constant_pool);
+ case Code::CALL_IC:
+ return CallIC::Clear(isolate, address, target, constant_pool);
+ case Code::COMPARE_IC:
+ return CompareIC::Clear(isolate, address, target, constant_pool);
+ case Code::COMPARE_NIL_IC:
+ return CompareNilIC::Clear(address, target, constant_pool);
+ case Code::BINARY_OP_IC:
+ case Code::TO_BOOLEAN_IC:
+ // Clearing these is tricky and does not
+ // make any performance difference.
+ return;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void KeyedLoadIC::Clear(Isolate* isolate, Address address, Code* target,
+ ConstantPoolArray* constant_pool) {
+ if (IsCleared(target)) return;
+ // Make sure to also clear the map used in inline fast cases. If we
+ // do not clear these maps, cached code can keep objects alive
+ // through the embedded maps.
+ SetTargetAtAddress(address, *pre_monomorphic_stub(isolate), constant_pool);
+}
+
+
+void CallIC::Clear(Isolate* isolate, Address address, Code* target,
+ ConstantPoolArray* constant_pool) {
+ // Currently, CallIC doesn't have state changes.
+}
+
+
+void LoadIC::Clear(Isolate* isolate, Address address, Code* target,
+ ConstantPoolArray* constant_pool) {
+ if (IsCleared(target)) return;
+ Code* code = PropertyICCompiler::FindPreMonomorphic(isolate, Code::LOAD_IC,
+ target->extra_ic_state());
+ SetTargetAtAddress(address, code, constant_pool);
+}
+
+
+void StoreIC::Clear(Isolate* isolate, Address address, Code* target,
+ ConstantPoolArray* constant_pool) {
+ if (IsCleared(target)) return;
+ Code* code = PropertyICCompiler::FindPreMonomorphic(isolate, Code::STORE_IC,
+ target->extra_ic_state());
+ SetTargetAtAddress(address, code, constant_pool);
+}
+
+
+void KeyedStoreIC::Clear(Isolate* isolate, Address address, Code* target,
+ ConstantPoolArray* constant_pool) {
+ if (IsCleared(target)) return;
+ SetTargetAtAddress(
+ address, *pre_monomorphic_stub(
+ isolate, StoreIC::GetStrictMode(target->extra_ic_state())),
+ constant_pool);
+}
+
+
+void CompareIC::Clear(Isolate* isolate, Address address, Code* target,
+ ConstantPoolArray* constant_pool) {
+ DCHECK(CodeStub::GetMajorKey(target) == CodeStub::CompareIC);
+ CompareIC::State handler_state;
+ Token::Value op;
+ ICCompareStub::DecodeKey(target->stub_key(), NULL, NULL, &handler_state, &op);
+ // Only clear CompareICs that can retain objects.
+ if (handler_state != KNOWN_OBJECT) return;
+ SetTargetAtAddress(address, GetRawUninitialized(isolate, op), constant_pool);
+ PatchInlinedSmiCode(address, DISABLE_INLINED_SMI_CHECK);
+}
+
+
+// static
+Handle<Code> KeyedLoadIC::generic_stub(Isolate* isolate) {
+ if (FLAG_compiled_keyed_generic_loads) {
+ return KeyedLoadGenericStub(isolate).GetCode();
+ } else {
+ return isolate->builtins()->KeyedLoadIC_Generic();
+ }
+}
+
+
+static bool MigrateDeprecated(Handle<Object> object) {
+ if (!object->IsJSObject()) return false;
+ Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ if (!receiver->map()->is_deprecated()) return false;
+ JSObject::MigrateInstance(Handle<JSObject>::cast(object));
+ return true;
+}
+
+
+MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
+ // If the object is undefined or null it's illegal to try to get any
+ // of its properties; throw a TypeError in that case.
+ if (object->IsUndefined() || object->IsNull()) {
+ return TypeError("non_object_property_load", object, name);
+ }
+
+ // Check if the name is trivially convertible to an index and get
+ // the element or char if so.
+ uint32_t index;
+ if (kind() == Code::KEYED_LOAD_IC && name->AsArrayIndex(&index)) {
+ // Rewrite to the generic keyed load stub.
+ if (FLAG_use_ic) {
+ set_target(*KeyedLoadIC::generic_stub(isolate()));
+ TRACE_IC("LoadIC", name);
+ TRACE_GENERIC_IC(isolate(), "LoadIC", "name as array index");
+ }
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(), result,
+ Runtime::GetElementOrCharAt(isolate(), object, index), Object);
+ return result;
+ }
+
+ bool use_ic = MigrateDeprecated(object) ? false : FLAG_use_ic;
+
+ // Named lookup in the object.
+ LookupIterator it(object, name);
+ LookupForRead(&it);
+
+ if (it.IsFound() || !IsUndeclaredGlobal(object)) {
+ // Update inline cache and stub cache.
+ if (use_ic) UpdateCaches(&it);
+
+ // Get the property.
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate(), result, Object::GetProperty(&it),
+ Object);
+ if (it.IsFound()) {
+ return result;
+ } else if (!IsUndeclaredGlobal(object)) {
+ LOG(isolate(), SuspectReadEvent(*name, *object));
+ return result;
+ }
+ }
+ return ReferenceError("not_defined", name);
+}
+
+
+static bool AddOneReceiverMapIfMissing(MapHandleList* receiver_maps,
+ Handle<Map> new_receiver_map) {
+ DCHECK(!new_receiver_map.is_null());
+ for (int current = 0; current < receiver_maps->length(); ++current) {
+ if (!receiver_maps->at(current).is_null() &&
+ receiver_maps->at(current).is_identical_to(new_receiver_map)) {
+ return false;
+ }
+ }
+ receiver_maps->Add(new_receiver_map);
+ return true;
+}
+
+
+bool IC::UpdatePolymorphicIC(Handle<Name> name, Handle<Code> code) {
+ if (!code->is_handler()) return false;
+ if (target()->is_keyed_stub() && state() != PROTOTYPE_FAILURE) return false;
+ Handle<HeapType> type = receiver_type();
+ TypeHandleList types;
+ CodeHandleList handlers;
+
+ TargetTypes(&types);
+ int number_of_types = types.length();
+ int deprecated_types = 0;
+ int handler_to_overwrite = -1;
+
+ for (int i = 0; i < number_of_types; i++) {
+ Handle<HeapType> current_type = types.at(i);
+ if (current_type->IsClass() &&
+ current_type->AsClass()->Map()->is_deprecated()) {
+ // Filter out deprecated maps to ensure their instances get migrated.
+ ++deprecated_types;
+ } else if (type->NowIs(current_type)) {
+ // If the receiver type is already in the polymorphic IC, this indicates
+ // there was a prototoype chain failure. In that case, just overwrite the
+ // handler.
+ handler_to_overwrite = i;
+ } else if (handler_to_overwrite == -1 && current_type->IsClass() &&
+ type->IsClass() &&
+ IsTransitionOfMonomorphicTarget(*current_type->AsClass()->Map(),
+ *type->AsClass()->Map())) {
+ handler_to_overwrite = i;
+ }
+ }
+
+ int number_of_valid_types =
+ number_of_types - deprecated_types - (handler_to_overwrite != -1);
+
+ if (number_of_valid_types >= 4) return false;
+ if (number_of_types == 0) return false;
+ if (!target()->FindHandlers(&handlers, types.length())) return false;
+
+ number_of_valid_types++;
+ if (number_of_valid_types > 1 && target()->is_keyed_stub()) return false;
+ Handle<Code> ic;
+ if (number_of_valid_types == 1) {
+ ic = PropertyICCompiler::ComputeMonomorphic(kind(), name, type, code,
+ extra_ic_state());
+ } else {
+ if (handler_to_overwrite >= 0) {
+ handlers.Set(handler_to_overwrite, code);
+ if (!type->NowIs(types.at(handler_to_overwrite))) {
+ types.Set(handler_to_overwrite, type);
+ }
+ } else {
+ types.Add(type);
+ handlers.Add(code);
+ }
+ ic = PropertyICCompiler::ComputePolymorphic(kind(), &types, &handlers,
+ number_of_valid_types, name,
+ extra_ic_state());
+ }
+ set_target(*ic);
+ return true;
+}
+
+
+Handle<HeapType> IC::CurrentTypeOf(Handle<Object> object, Isolate* isolate) {
+ return object->IsJSGlobalObject()
+ ? HeapType::Constant(Handle<JSGlobalObject>::cast(object), isolate)
+ : HeapType::NowOf(object, isolate);
+}
+
+
+Handle<Map> IC::TypeToMap(HeapType* type, Isolate* isolate) {
+ if (type->Is(HeapType::Number()))
+ return isolate->factory()->heap_number_map();
+ if (type->Is(HeapType::Boolean())) return isolate->factory()->boolean_map();
+ if (type->IsConstant()) {
+ return handle(
+ Handle<JSGlobalObject>::cast(type->AsConstant()->Value())->map());
+ }
+ DCHECK(type->IsClass());
+ return type->AsClass()->Map();
+}
+
+
+template <class T>
+typename T::TypeHandle IC::MapToType(Handle<Map> map,
+ typename T::Region* region) {
+ if (map->instance_type() == HEAP_NUMBER_TYPE) {
+ return T::Number(region);
+ } else if (map->instance_type() == ODDBALL_TYPE) {
+ // The only oddballs that can be recorded in ICs are booleans.
+ return T::Boolean(region);
+ } else {
+ return T::Class(map, region);
+ }
+}
+
+
+template Type* IC::MapToType<Type>(Handle<Map> map, Zone* zone);
+
+
+template Handle<HeapType> IC::MapToType<HeapType>(Handle<Map> map,
+ Isolate* region);
+
+
+void IC::UpdateMonomorphicIC(Handle<Code> handler, Handle<Name> name) {
+ DCHECK(handler->is_handler());
+ Handle<Code> ic = PropertyICCompiler::ComputeMonomorphic(
+ kind(), name, receiver_type(), handler, extra_ic_state());
+ set_target(*ic);
+}
+
+
+void IC::CopyICToMegamorphicCache(Handle<Name> name) {
+ TypeHandleList types;
+ CodeHandleList handlers;
+ TargetTypes(&types);
+ if (!target()->FindHandlers(&handlers, types.length())) return;
+ for (int i = 0; i < types.length(); i++) {
+ UpdateMegamorphicCache(*types.at(i), *name, *handlers.at(i));
+ }
+}
+
+
+bool IC::IsTransitionOfMonomorphicTarget(Map* source_map, Map* target_map) {
+ if (source_map == NULL) return true;
+ if (target_map == NULL) return false;
+ ElementsKind target_elements_kind = target_map->elements_kind();
+ bool more_general_transition = IsMoreGeneralElementsKindTransition(
+ source_map->elements_kind(), target_elements_kind);
+ Map* transitioned_map =
+ more_general_transition
+ ? source_map->LookupElementsTransitionMap(target_elements_kind)
+ : NULL;
+
+ return transitioned_map == target_map;
+}
+
+
+void IC::PatchCache(Handle<Name> name, Handle<Code> code) {
+ switch (state()) {
+ case UNINITIALIZED:
+ case PREMONOMORPHIC:
+ UpdateMonomorphicIC(code, name);
+ break;
+ case PROTOTYPE_FAILURE:
+ case MONOMORPHIC:
+ case POLYMORPHIC:
+ if (!target()->is_keyed_stub() || state() == PROTOTYPE_FAILURE) {
+ if (UpdatePolymorphicIC(name, code)) break;
+ CopyICToMegamorphicCache(name);
+ }
+ set_target(*megamorphic_stub());
+ // Fall through.
+ case MEGAMORPHIC:
+ UpdateMegamorphicCache(*receiver_type(), *name, *code);
+ break;
+ case DEBUG_STUB:
+ break;
+ case DEFAULT:
+ case GENERIC:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+Handle<Code> LoadIC::initialize_stub(Isolate* isolate,
+ ExtraICState extra_state) {
+ return PropertyICCompiler::ComputeLoad(isolate, UNINITIALIZED, extra_state);
+}
+
+
+Handle<Code> LoadIC::megamorphic_stub() {
+ if (kind() == Code::LOAD_IC) {
+ return PropertyICCompiler::ComputeLoad(isolate(), MEGAMORPHIC,
+ extra_ic_state());
+ } else {
+ DCHECK_EQ(Code::KEYED_LOAD_IC, kind());
+ return KeyedLoadIC::generic_stub(isolate());
+ }
+}
+
+
+Handle<Code> LoadIC::pre_monomorphic_stub(Isolate* isolate,
+ ExtraICState extra_state) {
+ return PropertyICCompiler::ComputeLoad(isolate, PREMONOMORPHIC, extra_state);
+}
+
+
+Handle<Code> KeyedLoadIC::pre_monomorphic_stub(Isolate* isolate) {
+ return isolate->builtins()->KeyedLoadIC_PreMonomorphic();
+}
+
+
+Handle<Code> LoadIC::pre_monomorphic_stub() const {
+ if (kind() == Code::LOAD_IC) {
+ return LoadIC::pre_monomorphic_stub(isolate(), extra_ic_state());
+ } else {
+ DCHECK_EQ(Code::KEYED_LOAD_IC, kind());
+ return KeyedLoadIC::pre_monomorphic_stub(isolate());
+ }
+}
+
+
+Handle<Code> LoadIC::SimpleFieldLoad(FieldIndex index) {
+ LoadFieldStub stub(isolate(), index);
+ return stub.GetCode();
+}
+
+
+void LoadIC::UpdateCaches(LookupIterator* lookup) {
+ if (state() == UNINITIALIZED) {
+ // This is the first time we execute this inline cache. Set the target to
+ // the pre monomorphic stub to delay setting the monomorphic state.
+ set_target(*pre_monomorphic_stub());
+ TRACE_IC("LoadIC", lookup->name());
+ return;
+ }
+
+ Handle<Code> code;
+ if (lookup->state() == LookupIterator::JSPROXY ||
+ lookup->state() == LookupIterator::ACCESS_CHECK) {
+ code = slow_stub();
+ } else if (!lookup->IsFound()) {
+ if (kind() == Code::LOAD_IC) {
+ code = NamedLoadHandlerCompiler::ComputeLoadNonexistent(lookup->name(),
+ receiver_type());
+ // TODO(jkummerow/verwaest): Introduce a builtin that handles this case.
+ if (code.is_null()) code = slow_stub();
+ } else {
+ code = slow_stub();
+ }
+ } else {
+ code = ComputeHandler(lookup);
+ }
+
+ PatchCache(lookup->name(), code);
+ TRACE_IC("LoadIC", lookup->name());
+}
+
+
+void IC::UpdateMegamorphicCache(HeapType* type, Name* name, Code* code) {
+ if (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC) return;
+ Map* map = *TypeToMap(type, isolate());
+ isolate()->stub_cache()->Set(name, map, code);
+}
+
+
+Handle<Code> IC::ComputeHandler(LookupIterator* lookup, Handle<Object> value) {
+ bool receiver_is_holder =
+ lookup->GetReceiver().is_identical_to(lookup->GetHolder<JSObject>());
+ CacheHolderFlag flag;
+ Handle<Map> stub_holder_map = IC::GetHandlerCacheHolder(
+ *receiver_type(), receiver_is_holder, isolate(), &flag);
+
+ Handle<Code> code = PropertyHandlerCompiler::Find(
+ lookup->name(), stub_holder_map, kind(), flag,
+ lookup->holder_map()->is_dictionary_map() ? Code::NORMAL : Code::FAST);
+ // Use the cached value if it exists, and if it is different from the
+ // handler that just missed.
+ if (!code.is_null()) {
+ if (!maybe_handler_.is_null() &&
+ !maybe_handler_.ToHandleChecked().is_identical_to(code)) {
+ return code;
+ }
+ if (maybe_handler_.is_null()) {
+ // maybe_handler_ is only populated for MONOMORPHIC and POLYMORPHIC ICs.
+ // In MEGAMORPHIC case, check if the handler in the megamorphic stub
+ // cache (which just missed) is different from the cached handler.
+ if (state() == MEGAMORPHIC && lookup->GetReceiver()->IsHeapObject()) {
+ Map* map = Handle<HeapObject>::cast(lookup->GetReceiver())->map();
+ Code* megamorphic_cached_code =
+ isolate()->stub_cache()->Get(*lookup->name(), map, code->flags());
+ if (megamorphic_cached_code != *code) return code;
+ } else {
+ return code;
+ }
+ }
+ }
+
+ code = CompileHandler(lookup, value, flag);
+ DCHECK(code->is_handler());
+
+ if (code->type() != Code::NORMAL) {
+ Map::UpdateCodeCache(stub_holder_map, lookup->name(), code);
+ }
+
+ return code;
+}
+
+
+Handle<Code> LoadIC::CompileHandler(LookupIterator* lookup,
+ Handle<Object> unused,
+ CacheHolderFlag cache_holder) {
+ Handle<Object> receiver = lookup->GetReceiver();
+ if (receiver->IsString() &&
+ Name::Equals(isolate()->factory()->length_string(), lookup->name())) {
+ FieldIndex index = FieldIndex::ForInObjectOffset(String::kLengthOffset);
+ return SimpleFieldLoad(index);
+ }
+
+ if (receiver->IsStringWrapper() &&
+ Name::Equals(isolate()->factory()->length_string(), lookup->name())) {
+ StringLengthStub string_length_stub(isolate());
+ return string_length_stub.GetCode();
+ }
+
+ // Use specialized code for getting prototype of functions.
+ if (receiver->IsJSFunction() &&
+ Name::Equals(isolate()->factory()->prototype_string(), lookup->name()) &&
+ Handle<JSFunction>::cast(receiver)->should_have_prototype() &&
+ !Handle<JSFunction>::cast(receiver)
+ ->map()
+ ->has_non_instance_prototype()) {
+ Handle<Code> stub;
+ FunctionPrototypeStub function_prototype_stub(isolate());
+ return function_prototype_stub.GetCode();
+ }
+
+ Handle<HeapType> type = receiver_type();
+ Handle<JSObject> holder = lookup->GetHolder<JSObject>();
+ bool receiver_is_holder = receiver.is_identical_to(holder);
+ // -------------- Interceptors --------------
+ if (lookup->state() == LookupIterator::INTERCEPTOR) {
+ DCHECK(!holder->GetNamedInterceptor()->getter()->IsUndefined());
+ NamedLoadHandlerCompiler compiler(isolate(), receiver_type(), holder,
+ cache_holder);
+ // Perform a lookup behind the interceptor. Copy the LookupIterator since
+ // the original iterator will be used to fetch the value.
+ LookupIterator it(lookup);
+ it.Next();
+ LookupForRead(&it);
+ return compiler.CompileLoadInterceptor(&it);
+ }
+
+ // -------------- Accessors --------------
+ DCHECK(lookup->state() == LookupIterator::PROPERTY);
+ if (lookup->property_kind() == LookupIterator::ACCESSOR) {
+ // Use simple field loads for some well-known callback properties.
+ if (receiver_is_holder) {
+ DCHECK(receiver->IsJSObject());
+ Handle<JSObject> js_receiver = Handle<JSObject>::cast(receiver);
+ int object_offset;
+ if (Accessors::IsJSObjectFieldAccessor<HeapType>(type, lookup->name(),
+ &object_offset)) {
+ FieldIndex index =
+ FieldIndex::ForInObjectOffset(object_offset, js_receiver->map());
+ return SimpleFieldLoad(index);
+ }
+ }
+
+ Handle<Object> accessors = lookup->GetAccessors();
+ if (accessors->IsExecutableAccessorInfo()) {
+ Handle<ExecutableAccessorInfo> info =
+ Handle<ExecutableAccessorInfo>::cast(accessors);
+ if (v8::ToCData<Address>(info->getter()) == 0) return slow_stub();
+ if (!ExecutableAccessorInfo::IsCompatibleReceiverType(isolate(), info,
+ type)) {
+ return slow_stub();
+ }
+ if (!holder->HasFastProperties()) return slow_stub();
+ NamedLoadHandlerCompiler compiler(isolate(), receiver_type(), holder,
+ cache_holder);
+ return compiler.CompileLoadCallback(lookup->name(), info);
+ }
+ if (accessors->IsAccessorPair()) {
+ Handle<Object> getter(Handle<AccessorPair>::cast(accessors)->getter(),
+ isolate());
+ if (!getter->IsJSFunction()) return slow_stub();
+ if (!holder->HasFastProperties()) return slow_stub();
+ Handle<JSFunction> function = Handle<JSFunction>::cast(getter);
+ if (!receiver->IsJSObject() && !function->IsBuiltin() &&
+ function->shared()->strict_mode() == SLOPPY) {
+ // Calling sloppy non-builtins with a value as the receiver
+ // requires boxing.
+ return slow_stub();
+ }
+ CallOptimization call_optimization(function);
+ NamedLoadHandlerCompiler compiler(isolate(), receiver_type(), holder,
+ cache_holder);
+ if (call_optimization.is_simple_api_call() &&
+ call_optimization.IsCompatibleReceiver(receiver, holder)) {
+ return compiler.CompileLoadCallback(lookup->name(), call_optimization);
+ }
+ return compiler.CompileLoadViaGetter(lookup->name(), function);
+ }
+ // TODO(dcarney): Handle correctly.
+ DCHECK(accessors->IsDeclaredAccessorInfo());
+ return slow_stub();
+ }
+
+ // -------------- Dictionary properties --------------
+ DCHECK(lookup->property_kind() == LookupIterator::DATA);
+ if (lookup->property_encoding() == LookupIterator::DICTIONARY) {
+ if (kind() != Code::LOAD_IC) return slow_stub();
+ if (holder->IsGlobalObject()) {
+ NamedLoadHandlerCompiler compiler(isolate(), receiver_type(), holder,
+ cache_holder);
+ Handle<PropertyCell> cell = lookup->GetPropertyCell();
+ Handle<Code> code = compiler.CompileLoadGlobal(cell, lookup->name(),
+ lookup->IsConfigurable());
+ // TODO(verwaest): Move caching of these NORMAL stubs outside as well.
+ CacheHolderFlag flag;
+ Handle<Map> stub_holder_map =
+ GetHandlerCacheHolder(*type, receiver_is_holder, isolate(), &flag);
+ Map::UpdateCodeCache(stub_holder_map, lookup->name(), code);
+ return code;
+ }
+ // There is only one shared stub for loading normalized
+ // properties. It does not traverse the prototype chain, so the
+ // property must be found in the object for the stub to be
+ // applicable.
+ if (!receiver_is_holder) return slow_stub();
+ return isolate()->builtins()->LoadIC_Normal();
+ }
+
+ // -------------- Fields --------------
+ DCHECK(lookup->property_encoding() == LookupIterator::DESCRIPTOR);
+ if (lookup->property_details().type() == FIELD) {
+ FieldIndex field = lookup->GetFieldIndex();
+ if (receiver_is_holder) {
+ return SimpleFieldLoad(field);
+ }
+ NamedLoadHandlerCompiler compiler(isolate(), receiver_type(), holder,
+ cache_holder);
+ return compiler.CompileLoadField(lookup->name(), field);
+ }
+
+ // -------------- Constant properties --------------
+ DCHECK(lookup->property_details().type() == CONSTANT);
+ if (receiver_is_holder) {
+ LoadConstantStub stub(isolate(), lookup->GetConstantIndex());
+ return stub.GetCode();
+ }
+ NamedLoadHandlerCompiler compiler(isolate(), receiver_type(), holder,
+ cache_holder);
+ return compiler.CompileLoadConstant(lookup->name(),
+ lookup->GetConstantIndex());
+}
+
+
+static Handle<Object> TryConvertKey(Handle<Object> key, Isolate* isolate) {
+ // This helper implements a few common fast cases for converting
+ // non-smi keys of keyed loads/stores to a smi or a string.
+ if (key->IsHeapNumber()) {
+ double value = Handle<HeapNumber>::cast(key)->value();
+ if (std::isnan(value)) {
+ key = isolate->factory()->nan_string();
+ } else {
+ int int_value = FastD2I(value);
+ if (value == int_value && Smi::IsValid(int_value)) {
+ key = Handle<Smi>(Smi::FromInt(int_value), isolate);
+ }
+ }
+ } else if (key->IsUndefined()) {
+ key = isolate->factory()->undefined_string();
+ }
+ return key;
+}
+
+
+Handle<Code> KeyedLoadIC::LoadElementStub(Handle<JSObject> receiver) {
+ // Don't handle megamorphic property accesses for INTERCEPTORS or CALLBACKS
+ // via megamorphic stubs, since they don't have a map in their relocation info
+ // and so the stubs can't be harvested for the object needed for a map check.
+ if (target()->type() != Code::NORMAL) {
+ TRACE_GENERIC_IC(isolate(), "KeyedIC", "non-NORMAL target type");
+ return generic_stub();
+ }
+
+ Handle<Map> receiver_map(receiver->map(), isolate());
+ MapHandleList target_receiver_maps;
+ if (target().is_identical_to(string_stub())) {
+ target_receiver_maps.Add(isolate()->factory()->string_map());
+ } else {
+ TargetMaps(&target_receiver_maps);
+ }
+ if (target_receiver_maps.length() == 0) {
+ return PropertyICCompiler::ComputeKeyedLoadMonomorphic(receiver_map);
+ }
+
+ // The first time a receiver is seen that is a transitioned version of the
+ // previous monomorphic receiver type, assume the new ElementsKind is the
+ // monomorphic type. This benefits global arrays that only transition
+ // once, and all call sites accessing them are faster if they remain
+ // monomorphic. If this optimistic assumption is not true, the IC will
+ // miss again and it will become polymorphic and support both the
+ // untransitioned and transitioned maps.
+ if (state() == MONOMORPHIC && IsMoreGeneralElementsKindTransition(
+ target_receiver_maps.at(0)->elements_kind(),
+ receiver->GetElementsKind())) {
+ return PropertyICCompiler::ComputeKeyedLoadMonomorphic(receiver_map);
+ }
+
+ DCHECK(state() != GENERIC);
+
+ // Determine the list of receiver maps that this call site has seen,
+ // adding the map that was just encountered.
+ if (!AddOneReceiverMapIfMissing(&target_receiver_maps, receiver_map)) {
+ // If the miss wasn't due to an unseen map, a polymorphic stub
+ // won't help, use the generic stub.
+ TRACE_GENERIC_IC(isolate(), "KeyedIC", "same map added twice");
+ return generic_stub();
+ }
+
+ // If the maximum number of receiver maps has been exceeded, use the generic
+ // version of the IC.
+ if (target_receiver_maps.length() > kMaxKeyedPolymorphism) {
+ TRACE_GENERIC_IC(isolate(), "KeyedIC", "max polymorph exceeded");
+ return generic_stub();
+ }
+
+ return PropertyICCompiler::ComputeKeyedLoadPolymorphic(&target_receiver_maps);
+}
+
+
+MaybeHandle<Object> KeyedLoadIC::Load(Handle<Object> object,
+ Handle<Object> key) {
+ if (MigrateDeprecated(object)) {
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(), result, Runtime::GetObjectProperty(isolate(), object, key),
+ Object);
+ return result;
+ }
+
+ Handle<Object> load_handle;
+ Handle<Code> stub = generic_stub();
+
+ // Check for non-string values that can be converted into an
+ // internalized string directly or is representable as a smi.
+ key = TryConvertKey(key, isolate());
+
+ if (key->IsInternalizedString() || key->IsSymbol()) {
+ ASSIGN_RETURN_ON_EXCEPTION(isolate(), load_handle,
+ LoadIC::Load(object, Handle<Name>::cast(key)),
+ Object);
+ } else if (FLAG_use_ic && !object->IsAccessCheckNeeded()) {
+ if (object->IsString() && key->IsNumber()) {
+ if (state() == UNINITIALIZED) stub = string_stub();
+ } else if (object->IsJSObject()) {
+ Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ if (receiver->elements()->map() ==
+ isolate()->heap()->sloppy_arguments_elements_map()) {
+ stub = sloppy_arguments_stub();
+ } else if (receiver->HasIndexedInterceptor()) {
+ stub = indexed_interceptor_stub();
+ } else if (!Object::ToSmi(isolate(), key).is_null() &&
+ (!target().is_identical_to(sloppy_arguments_stub()))) {
+ stub = LoadElementStub(receiver);
+ }
+ }
+ }
+
+ if (!is_target_set()) {
+ Code* generic = *generic_stub();
+ if (*stub == generic) {
+ TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "set generic");
+ }
+ set_target(*stub);
+ TRACE_IC("LoadIC", key);
+ }
+
+ if (!load_handle.is_null()) return load_handle;
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate(), result,
+ Runtime::GetObjectProperty(isolate(), object, key),
+ Object);
+ return result;
+}
+
+
+bool StoreIC::LookupForWrite(LookupIterator* it, Handle<Object> value,
+ JSReceiver::StoreFromKeyed store_mode) {
+ // Disable ICs for non-JSObjects for now.
+ Handle<Object> receiver = it->GetReceiver();
+ if (!receiver->IsJSObject()) return false;
+ DCHECK(!Handle<JSObject>::cast(receiver)->map()->is_deprecated());
+
+ for (; it->IsFound(); it->Next()) {
+ switch (it->state()) {
+ case LookupIterator::NOT_FOUND:
+ case LookupIterator::TRANSITION:
+ UNREACHABLE();
+ case LookupIterator::JSPROXY:
+ return false;
+ case LookupIterator::INTERCEPTOR: {
+ Handle<JSObject> holder = it->GetHolder<JSObject>();
+ InterceptorInfo* info = holder->GetNamedInterceptor();
+ if (it->HolderIsReceiverOrHiddenPrototype()) {
+ if (!info->setter()->IsUndefined()) return true;
+ } else if (!info->getter()->IsUndefined() ||
+ !info->query()->IsUndefined()) {
+ return false;
+ }
+ break;
+ }
+ case LookupIterator::ACCESS_CHECK:
+ if (it->GetHolder<JSObject>()->IsAccessCheckNeeded()) return false;
+ break;
+ case LookupIterator::PROPERTY:
+ if (!it->HasProperty()) break;
+ if (it->IsReadOnly()) return false;
+ if (it->property_kind() == LookupIterator::ACCESSOR) return true;
+ if (it->GetHolder<Object>().is_identical_to(receiver)) {
+ it->PrepareForDataProperty(value);
+ // The previous receiver map might just have been deprecated,
+ // so reload it.
+ update_receiver_type(receiver);
+ return true;
+ }
+
+ // Receiver != holder.
+ if (receiver->IsJSGlobalProxy()) {
+ PrototypeIterator iter(it->isolate(), receiver);
+ return it->GetHolder<Object>().is_identical_to(
+ PrototypeIterator::GetCurrent(iter));
+ }
+
+ it->PrepareTransitionToDataProperty(value, NONE, store_mode);
+ return it->IsCacheableTransition();
+ }
+ }
+
+ it->PrepareTransitionToDataProperty(value, NONE, store_mode);
+ return it->IsCacheableTransition();
+}
+
+
+MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
+ Handle<Object> value,
+ JSReceiver::StoreFromKeyed store_mode) {
+ // TODO(verwaest): Let SetProperty do the migration, since storing a property
+ // might deprecate the current map again, if value does not fit.
+ if (MigrateDeprecated(object) || object->IsJSProxy()) {
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(), result,
+ Object::SetProperty(object, name, value, strict_mode()), Object);
+ return result;
+ }
+
+ // If the object is undefined or null it's illegal to try to set any
+ // properties on it; throw a TypeError in that case.
+ if (object->IsUndefined() || object->IsNull()) {
+ return TypeError("non_object_property_store", object, name);
+ }
+
+ // Check if the given name is an array index.
+ uint32_t index;
+ if (name->AsArrayIndex(&index)) {
+ // Ignore other stores where the receiver is not a JSObject.
+ // TODO(1475): Must check prototype chains of object wrappers.
+ if (!object->IsJSObject()) return value;
+ Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(), result,
+ JSObject::SetElement(receiver, index, value, NONE, strict_mode()),
+ Object);
+ return value;
+ }
+
+ // Observed objects are always modified through the runtime.
+ if (object->IsHeapObject() &&
+ Handle<HeapObject>::cast(object)->map()->is_observed()) {
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(), result,
+ Object::SetProperty(object, name, value, strict_mode(), store_mode),
+ Object);
+ return result;
+ }
+
+ LookupIterator it(object, name);
+ if (FLAG_use_ic) UpdateCaches(&it, value, store_mode);
+
+ // Set the property.
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(), result,
+ Object::SetProperty(&it, value, strict_mode(), store_mode), Object);
+ return result;
+}
+
+
+OStream& operator<<(OStream& os, const CallIC::State& s) {
+ return os << "(args(" << s.arg_count() << "), "
+ << (s.call_type() == CallIC::METHOD ? "METHOD" : "FUNCTION")
+ << ", ";
+}
+
+
+Handle<Code> CallIC::initialize_stub(Isolate* isolate, int argc,
+ CallType call_type) {
+ CallICStub stub(isolate, State(argc, call_type));
+ Handle<Code> code = stub.GetCode();
+ return code;
+}
+
+
+Handle<Code> StoreIC::initialize_stub(Isolate* isolate,
+ StrictMode strict_mode) {
+ ExtraICState extra_state = ComputeExtraICState(strict_mode);
+ Handle<Code> ic =
+ PropertyICCompiler::ComputeStore(isolate, UNINITIALIZED, extra_state);
+ return ic;
+}
+
+
+Handle<Code> StoreIC::megamorphic_stub() {
+ return PropertyICCompiler::ComputeStore(isolate(), MEGAMORPHIC,
+ extra_ic_state());
+}
+
+
+Handle<Code> StoreIC::generic_stub() const {
+ return PropertyICCompiler::ComputeStore(isolate(), GENERIC, extra_ic_state());
+}
+
+
+Handle<Code> StoreIC::pre_monomorphic_stub(Isolate* isolate,
+ StrictMode strict_mode) {
+ ExtraICState state = ComputeExtraICState(strict_mode);
+ return PropertyICCompiler::ComputeStore(isolate, PREMONOMORPHIC, state);
+}
+
+
+void StoreIC::UpdateCaches(LookupIterator* lookup, Handle<Object> value,
+ JSReceiver::StoreFromKeyed store_mode) {
+ if (state() == UNINITIALIZED) {
+ // This is the first time we execute this inline cache. Set the target to
+ // the pre monomorphic stub to delay setting the monomorphic state.
+ set_target(*pre_monomorphic_stub());
+ TRACE_IC("StoreIC", lookup->name());
+ return;
+ }
+
+ Handle<Code> code = LookupForWrite(lookup, value, store_mode)
+ ? ComputeHandler(lookup, value)
+ : slow_stub();
+
+ PatchCache(lookup->name(), code);
+ TRACE_IC("StoreIC", lookup->name());
+}
+
+
+Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup,
+ Handle<Object> value,
+ CacheHolderFlag cache_holder) {
+ DCHECK_NE(LookupIterator::JSPROXY, lookup->state());
+
+ // This is currently guaranteed by checks in StoreIC::Store.
+ Handle<JSObject> receiver = Handle<JSObject>::cast(lookup->GetReceiver());
+ Handle<JSObject> holder = lookup->GetHolder<JSObject>();
+ DCHECK(!receiver->IsAccessCheckNeeded());
+
+ // -------------- Transition --------------
+ if (lookup->state() == LookupIterator::TRANSITION) {
+ Handle<Map> transition = lookup->transition_map();
+ // Currently not handled by CompileStoreTransition.
+ if (!holder->HasFastProperties()) return slow_stub();
+
+ DCHECK(lookup->IsCacheableTransition());
+ NamedStoreHandlerCompiler compiler(isolate(), receiver_type(), holder);
+ return compiler.CompileStoreTransition(transition, lookup->name());
+ }
+
+ // -------------- Interceptors --------------
+ if (lookup->state() == LookupIterator::INTERCEPTOR) {
+ DCHECK(!holder->GetNamedInterceptor()->setter()->IsUndefined());
+ NamedStoreHandlerCompiler compiler(isolate(), receiver_type(), holder);
+ return compiler.CompileStoreInterceptor(lookup->name());
+ }
+
+ // -------------- Accessors --------------
+ DCHECK(lookup->state() == LookupIterator::PROPERTY);
+ if (lookup->property_kind() == LookupIterator::ACCESSOR) {
+ if (!holder->HasFastProperties()) return slow_stub();
+ Handle<Object> accessors = lookup->GetAccessors();
+ if (accessors->IsExecutableAccessorInfo()) {
+ Handle<ExecutableAccessorInfo> info =
+ Handle<ExecutableAccessorInfo>::cast(accessors);
+ if (v8::ToCData<Address>(info->setter()) == 0) return slow_stub();
+ if (!ExecutableAccessorInfo::IsCompatibleReceiverType(isolate(), info,
+ receiver_type())) {
+ return slow_stub();
+ }
+ NamedStoreHandlerCompiler compiler(isolate(), receiver_type(), holder);
+ return compiler.CompileStoreCallback(receiver, lookup->name(), info);
+ } else if (accessors->IsAccessorPair()) {
+ Handle<Object> setter(Handle<AccessorPair>::cast(accessors)->setter(),
+ isolate());
+ if (!setter->IsJSFunction()) return slow_stub();
+ Handle<JSFunction> function = Handle<JSFunction>::cast(setter);
+ CallOptimization call_optimization(function);
+ NamedStoreHandlerCompiler compiler(isolate(), receiver_type(), holder);
+ if (call_optimization.is_simple_api_call() &&
+ call_optimization.IsCompatibleReceiver(receiver, holder)) {
+ return compiler.CompileStoreCallback(receiver, lookup->name(),
+ call_optimization);
+ }
+ return compiler.CompileStoreViaSetter(receiver, lookup->name(),
+ Handle<JSFunction>::cast(setter));
+ }
+ // TODO(dcarney): Handle correctly.
+ DCHECK(accessors->IsDeclaredAccessorInfo());
+ return slow_stub();
+ }
+
+ // -------------- Dictionary properties --------------
+ DCHECK(lookup->property_kind() == LookupIterator::DATA);
+ if (lookup->property_encoding() == LookupIterator::DICTIONARY) {
+ if (holder->IsGlobalObject()) {
+ Handle<PropertyCell> cell = lookup->GetPropertyCell();
+ Handle<HeapType> union_type = PropertyCell::UpdatedType(cell, value);
+ StoreGlobalStub stub(isolate(), union_type->IsConstant(),
+ receiver->IsJSGlobalProxy());
+ Handle<Code> code = stub.GetCodeCopyFromTemplate(
+ Handle<GlobalObject>::cast(holder), cell);
+ // TODO(verwaest): Move caching of these NORMAL stubs outside as well.
+ HeapObject::UpdateMapCodeCache(receiver, lookup->name(), code);
+ return code;
+ }
+ DCHECK(holder.is_identical_to(receiver));
+ return isolate()->builtins()->StoreIC_Normal();
+ }
+
+ // -------------- Fields --------------
+ DCHECK(lookup->property_encoding() == LookupIterator::DESCRIPTOR);
+ if (lookup->property_details().type() == FIELD) {
+ bool use_stub = true;
+ if (lookup->representation().IsHeapObject()) {
+ // Only use a generic stub if no types need to be tracked.
+ Handle<HeapType> field_type = lookup->GetFieldType();
+ HeapType::Iterator<Map> it = field_type->Classes();
+ use_stub = it.Done();
+ }
+ if (use_stub) {
+ StoreFieldStub stub(isolate(), lookup->GetFieldIndex(),
+ lookup->representation());
+ return stub.GetCode();
+ }
+ NamedStoreHandlerCompiler compiler(isolate(), receiver_type(), holder);
+ return compiler.CompileStoreField(lookup);
+ }
+
+ // -------------- Constant properties --------------
+ DCHECK(lookup->property_details().type() == CONSTANT);
+ return slow_stub();
+}
+
+
+Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
+ KeyedAccessStoreMode store_mode) {
+ // Don't handle megamorphic property accesses for INTERCEPTORS or CALLBACKS
+ // via megamorphic stubs, since they don't have a map in their relocation info
+ // and so the stubs can't be harvested for the object needed for a map check.
+ if (target()->type() != Code::NORMAL) {
+ TRACE_GENERIC_IC(isolate(), "KeyedIC", "non-NORMAL target type");
+ return generic_stub();
+ }
+
+ Handle<Map> receiver_map(receiver->map(), isolate());
+ MapHandleList target_receiver_maps;
+ TargetMaps(&target_receiver_maps);
+ if (target_receiver_maps.length() == 0) {
+ Handle<Map> monomorphic_map =
+ ComputeTransitionedMap(receiver_map, store_mode);
+ store_mode = GetNonTransitioningStoreMode(store_mode);
+ return PropertyICCompiler::ComputeKeyedStoreMonomorphic(
+ monomorphic_map, strict_mode(), store_mode);
+ }
+
+ // There are several special cases where an IC that is MONOMORPHIC can still
+ // transition to a different GetNonTransitioningStoreMode IC that handles a
+ // superset of the original IC. Handle those here if the receiver map hasn't
+ // changed or it has transitioned to a more general kind.
+ KeyedAccessStoreMode old_store_mode =
+ KeyedStoreIC::GetKeyedAccessStoreMode(target()->extra_ic_state());
+ Handle<Map> previous_receiver_map = target_receiver_maps.at(0);
+ if (state() == MONOMORPHIC) {
+ Handle<Map> transitioned_receiver_map = receiver_map;
+ if (IsTransitionStoreMode(store_mode)) {
+ transitioned_receiver_map =
+ ComputeTransitionedMap(receiver_map, store_mode);
+ }
+ if ((receiver_map.is_identical_to(previous_receiver_map) &&
+ IsTransitionStoreMode(store_mode)) ||
+ IsTransitionOfMonomorphicTarget(*previous_receiver_map,
+ *transitioned_receiver_map)) {
+ // If the "old" and "new" maps are in the same elements map family, or
+ // if they at least come from the same origin for a transitioning store,
+ // stay MONOMORPHIC and use the map for the most generic ElementsKind.
+ store_mode = GetNonTransitioningStoreMode(store_mode);
+ return PropertyICCompiler::ComputeKeyedStoreMonomorphic(
+ transitioned_receiver_map, strict_mode(), store_mode);
+ } else if (*previous_receiver_map == receiver->map() &&
+ old_store_mode == STANDARD_STORE &&
+ (store_mode == STORE_AND_GROW_NO_TRANSITION ||
+ store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
+ store_mode == STORE_NO_TRANSITION_HANDLE_COW)) {
+ // A "normal" IC that handles stores can switch to a version that can
+ // grow at the end of the array, handle OOB accesses or copy COW arrays
+ // and still stay MONOMORPHIC.
+ return PropertyICCompiler::ComputeKeyedStoreMonomorphic(
+ receiver_map, strict_mode(), store_mode);
+ }
+ }
+
+ DCHECK(state() != GENERIC);
+
+ bool map_added =
+ AddOneReceiverMapIfMissing(&target_receiver_maps, receiver_map);
+
+ if (IsTransitionStoreMode(store_mode)) {
+ Handle<Map> transitioned_receiver_map =
+ ComputeTransitionedMap(receiver_map, store_mode);
+ map_added |= AddOneReceiverMapIfMissing(&target_receiver_maps,
+ transitioned_receiver_map);
+ }
+
+ if (!map_added) {
+ // If the miss wasn't due to an unseen map, a polymorphic stub
+ // won't help, use the generic stub.
+ TRACE_GENERIC_IC(isolate(), "KeyedIC", "same map added twice");
+ return generic_stub();
+ }
+
+ // If the maximum number of receiver maps has been exceeded, use the generic
+ // version of the IC.
+ if (target_receiver_maps.length() > kMaxKeyedPolymorphism) {
+ TRACE_GENERIC_IC(isolate(), "KeyedIC", "max polymorph exceeded");
+ return generic_stub();
+ }
+
+ // Make sure all polymorphic handlers have the same store mode, otherwise the
+ // generic stub must be used.
+ store_mode = GetNonTransitioningStoreMode(store_mode);
+ if (old_store_mode != STANDARD_STORE) {
+ if (store_mode == STANDARD_STORE) {
+ store_mode = old_store_mode;
+ } else if (store_mode != old_store_mode) {
+ TRACE_GENERIC_IC(isolate(), "KeyedIC", "store mode mismatch");
+ return generic_stub();
+ }
+ }
+
+ // If the store mode isn't the standard mode, make sure that all polymorphic
+ // receivers are either external arrays, or all "normal" arrays. Otherwise,
+ // use the generic stub.
+ if (store_mode != STANDARD_STORE) {
+ int external_arrays = 0;
+ for (int i = 0; i < target_receiver_maps.length(); ++i) {
+ if (target_receiver_maps[i]->has_external_array_elements() ||
+ target_receiver_maps[i]->has_fixed_typed_array_elements()) {
+ external_arrays++;
+ }
+ }
+ if (external_arrays != 0 &&
+ external_arrays != target_receiver_maps.length()) {
+ TRACE_GENERIC_IC(isolate(), "KeyedIC",
+ "unsupported combination of external and normal arrays");
+ return generic_stub();
+ }
+ }
+
+ return PropertyICCompiler::ComputeKeyedStorePolymorphic(
+ &target_receiver_maps, store_mode, strict_mode());
+}
+
+
+Handle<Map> KeyedStoreIC::ComputeTransitionedMap(
+ Handle<Map> map, KeyedAccessStoreMode store_mode) {
+ switch (store_mode) {
+ case STORE_TRANSITION_SMI_TO_OBJECT:
+ case STORE_TRANSITION_DOUBLE_TO_OBJECT:
+ case STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT:
+ case STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT:
+ return Map::TransitionElementsTo(map, FAST_ELEMENTS);
+ case STORE_TRANSITION_SMI_TO_DOUBLE:
+ case STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE:
+ return Map::TransitionElementsTo(map, FAST_DOUBLE_ELEMENTS);
+ case STORE_TRANSITION_HOLEY_SMI_TO_OBJECT:
+ case STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT:
+ case STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT:
+ case STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT:
+ return Map::TransitionElementsTo(map, FAST_HOLEY_ELEMENTS);
+ case STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE:
+ case STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE:
+ return Map::TransitionElementsTo(map, FAST_HOLEY_DOUBLE_ELEMENTS);
+ case STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS:
+ DCHECK(map->has_external_array_elements());
+ // Fall through
+ case STORE_NO_TRANSITION_HANDLE_COW:
+ case STANDARD_STORE:
+ case STORE_AND_GROW_NO_TRANSITION:
+ return map;
+ }
+ UNREACHABLE();
+ return MaybeHandle<Map>().ToHandleChecked();
+}
+
+
+bool IsOutOfBoundsAccess(Handle<JSObject> receiver, int index) {
+ if (receiver->IsJSArray()) {
+ return JSArray::cast(*receiver)->length()->IsSmi() &&
+ index >= Smi::cast(JSArray::cast(*receiver)->length())->value();
+ }
+ return index >= receiver->elements()->length();
+}
+
+
+KeyedAccessStoreMode KeyedStoreIC::GetStoreMode(Handle<JSObject> receiver,
+ Handle<Object> key,
+ Handle<Object> value) {
+ Handle<Smi> smi_key = Object::ToSmi(isolate(), key).ToHandleChecked();
+ int index = smi_key->value();
+ bool oob_access = IsOutOfBoundsAccess(receiver, index);
+ // Don't consider this a growing store if the store would send the receiver to
+ // dictionary mode.
+ bool allow_growth = receiver->IsJSArray() && oob_access &&
+ !receiver->WouldConvertToSlowElements(key);
+ if (allow_growth) {
+ // Handle growing array in stub if necessary.
+ if (receiver->HasFastSmiElements()) {
+ if (value->IsHeapNumber()) {
+ if (receiver->HasFastHoleyElements()) {
+ return STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE;
+ } else {
+ return STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE;
+ }
+ }
+ if (value->IsHeapObject()) {
+ if (receiver->HasFastHoleyElements()) {
+ return STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT;
+ } else {
+ return STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT;
+ }
+ }
+ } else if (receiver->HasFastDoubleElements()) {
+ if (!value->IsSmi() && !value->IsHeapNumber()) {
+ if (receiver->HasFastHoleyElements()) {
+ return STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT;
+ } else {
+ return STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT;
+ }
+ }
+ }
+ return STORE_AND_GROW_NO_TRANSITION;
+ } else {
+ // Handle only in-bounds elements accesses.
+ if (receiver->HasFastSmiElements()) {
+ if (value->IsHeapNumber()) {
+ if (receiver->HasFastHoleyElements()) {
+ return STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE;
+ } else {
+ return STORE_TRANSITION_SMI_TO_DOUBLE;
+ }
+ } else if (value->IsHeapObject()) {
+ if (receiver->HasFastHoleyElements()) {
+ return STORE_TRANSITION_HOLEY_SMI_TO_OBJECT;
+ } else {
+ return STORE_TRANSITION_SMI_TO_OBJECT;
+ }
+ }
+ } else if (receiver->HasFastDoubleElements()) {
+ if (!value->IsSmi() && !value->IsHeapNumber()) {
+ if (receiver->HasFastHoleyElements()) {
+ return STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT;
+ } else {
+ return STORE_TRANSITION_DOUBLE_TO_OBJECT;
+ }
+ }
+ }
+ if (!FLAG_trace_external_array_abuse &&
+ receiver->map()->has_external_array_elements() && oob_access) {
+ return STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS;
+ }
+ Heap* heap = receiver->GetHeap();
+ if (receiver->elements()->map() == heap->fixed_cow_array_map()) {
+ return STORE_NO_TRANSITION_HANDLE_COW;
+ } else {
+ return STANDARD_STORE;
+ }
+ }
+}
+
+
+MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
+ Handle<Object> key,
+ Handle<Object> value) {
+ // TODO(verwaest): Let SetProperty do the migration, since storing a property
+ // might deprecate the current map again, if value does not fit.
+ if (MigrateDeprecated(object)) {
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(), result, Runtime::SetObjectProperty(isolate(), object, key,
+ value, strict_mode()),
+ Object);
+ return result;
+ }
+
+ // Check for non-string values that can be converted into an
+ // internalized string directly or is representable as a smi.
+ key = TryConvertKey(key, isolate());
+
+ Handle<Object> store_handle;
+ Handle<Code> stub = generic_stub();
+
+ if (key->IsInternalizedString()) {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(), store_handle,
+ StoreIC::Store(object, Handle<String>::cast(key), value,
+ JSReceiver::MAY_BE_STORE_FROM_KEYED),
+ Object);
+ TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "set generic");
+ set_target(*stub);
+ return store_handle;
+ }
+
+ bool use_ic =
+ FLAG_use_ic && !object->IsStringWrapper() &&
+ !object->IsAccessCheckNeeded() && !object->IsJSGlobalProxy() &&
+ !(object->IsJSObject() && JSObject::cast(*object)->map()->is_observed());
+ if (use_ic && !object->IsSmi()) {
+ // Don't use ICs for maps of the objects in Array's prototype chain. We
+ // expect to be able to trap element sets to objects with those maps in
+ // the runtime to enable optimization of element hole access.
+ Handle<HeapObject> heap_object = Handle<HeapObject>::cast(object);
+ if (heap_object->map()->IsMapInArrayPrototypeChain()) use_ic = false;
+ }
+
+ if (use_ic) {
+ DCHECK(!object->IsAccessCheckNeeded());
+
+ if (object->IsJSObject()) {
+ Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ bool key_is_smi_like = !Object::ToSmi(isolate(), key).is_null();
+ if (receiver->elements()->map() ==
+ isolate()->heap()->sloppy_arguments_elements_map()) {
+ if (strict_mode() == SLOPPY) {
+ stub = sloppy_arguments_stub();
+ }
+ } else if (key_is_smi_like &&
+ !(target().is_identical_to(sloppy_arguments_stub()))) {
+ // We should go generic if receiver isn't a dictionary, but our
+ // prototype chain does have dictionary elements. This ensures that
+ // other non-dictionary receivers in the polymorphic case benefit
+ // from fast path keyed stores.
+ if (!(receiver->map()->DictionaryElementsInPrototypeChainOnly())) {
+ KeyedAccessStoreMode store_mode = GetStoreMode(receiver, key, value);
+ stub = StoreElementStub(receiver, store_mode);
+ }
+ }
+ }
+ }
+
+ if (store_handle.is_null()) {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(), store_handle,
+ Runtime::SetObjectProperty(isolate(), object, key, value,
+ strict_mode()),
+ Object);
+ }
+
+ DCHECK(!is_target_set());
+ Code* generic = *generic_stub();
+ if (*stub == generic) {
+ TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "set generic");
+ }
+ DCHECK(!stub.is_null());
+ set_target(*stub);
+ TRACE_IC("StoreIC", key);
+
+ return store_handle;
+}
+
+
+CallIC::State::State(ExtraICState extra_ic_state)
+ : argc_(ArgcBits::decode(extra_ic_state)),
+ call_type_(CallTypeBits::decode(extra_ic_state)) {}
+
+
+ExtraICState CallIC::State::GetExtraICState() const {
+ ExtraICState extra_ic_state =
+ ArgcBits::encode(argc_) | CallTypeBits::encode(call_type_);
+ return extra_ic_state;
+}
+
+
+bool CallIC::DoCustomHandler(Handle<Object> receiver, Handle<Object> function,
+ Handle<FixedArray> vector, Handle<Smi> slot,
+ const State& state) {
+ DCHECK(FLAG_use_ic && function->IsJSFunction());
+
+ // Are we the array function?
+ Handle<JSFunction> array_function =
+ Handle<JSFunction>(isolate()->native_context()->array_function());
+ if (array_function.is_identical_to(Handle<JSFunction>::cast(function))) {
+ // Alter the slot.
+ IC::State old_state = FeedbackToState(vector, slot);
+ Object* feedback = vector->get(slot->value());
+ if (!feedback->IsAllocationSite()) {
+ Handle<AllocationSite> new_site =
+ isolate()->factory()->NewAllocationSite();
+ vector->set(slot->value(), *new_site);
+ }
+
+ CallIC_ArrayStub stub(isolate(), state);
+ set_target(*stub.GetCode());
+ Handle<String> name;
+ if (array_function->shared()->name()->IsString()) {
+ name = Handle<String>(String::cast(array_function->shared()->name()),
+ isolate());
+ }
+
+ IC::State new_state = FeedbackToState(vector, slot);
+ OnTypeFeedbackChanged(isolate(), address(), old_state, new_state, true);
+ TRACE_VECTOR_IC("CallIC (custom handler)", name, old_state, new_state);
+ return true;
+ }
+ return false;
+}
+
+
+void CallIC::PatchMegamorphic(Handle<Object> function,
+ Handle<FixedArray> vector, Handle<Smi> slot) {
+ State state(target()->extra_ic_state());
+ IC::State old_state = FeedbackToState(vector, slot);
+
+ // We are going generic.
+ vector->set(slot->value(), *TypeFeedbackInfo::MegamorphicSentinel(isolate()),
+ SKIP_WRITE_BARRIER);
+
+ CallICStub stub(isolate(), state);
+ Handle<Code> code = stub.GetCode();
+ set_target(*code);
+
+ Handle<Object> name = isolate()->factory()->empty_string();
+ if (function->IsJSFunction()) {
+ Handle<JSFunction> js_function = Handle<JSFunction>::cast(function);
+ name = handle(js_function->shared()->name(), isolate());
+ }
+
+ IC::State new_state = FeedbackToState(vector, slot);
+ OnTypeFeedbackChanged(isolate(), address(), old_state, new_state, true);
+ TRACE_VECTOR_IC("CallIC", name, old_state, new_state);
+}
+
+
+void CallIC::HandleMiss(Handle<Object> receiver, Handle<Object> function,
+ Handle<FixedArray> vector, Handle<Smi> slot) {
+ State state(target()->extra_ic_state());
+ IC::State old_state = FeedbackToState(vector, slot);
+ Handle<Object> name = isolate()->factory()->empty_string();
+ Object* feedback = vector->get(slot->value());
+
+ // Hand-coded MISS handling is easier if CallIC slots don't contain smis.
+ DCHECK(!feedback->IsSmi());
+
+ if (feedback->IsJSFunction() || !function->IsJSFunction()) {
+ // We are going generic.
+ vector->set(slot->value(),
+ *TypeFeedbackInfo::MegamorphicSentinel(isolate()),
+ SKIP_WRITE_BARRIER);
+ } else {
+ // The feedback is either uninitialized or an allocation site.
+ // It might be an allocation site because if we re-compile the full code
+ // to add deoptimization support, we call with the default call-ic, and
+ // merely need to patch the target to match the feedback.
+ // TODO(mvstanton): the better approach is to dispense with patching
+ // altogether, which is in progress.
+ DCHECK(feedback == *TypeFeedbackInfo::UninitializedSentinel(isolate()) ||
+ feedback->IsAllocationSite());
+
+ // Do we want to install a custom handler?
+ if (FLAG_use_ic &&
+ DoCustomHandler(receiver, function, vector, slot, state)) {
+ return;
+ }
+
+ vector->set(slot->value(), *function);
+ }
+
+ if (function->IsJSFunction()) {
+ Handle<JSFunction> js_function = Handle<JSFunction>::cast(function);
+ name = handle(js_function->shared()->name(), isolate());
+ }
+
+ IC::State new_state = FeedbackToState(vector, slot);
+ OnTypeFeedbackChanged(isolate(), address(), old_state, new_state, true);
+ TRACE_VECTOR_IC("CallIC", name, old_state, new_state);
+}
+
+
+#undef TRACE_IC
+
+
+// ----------------------------------------------------------------------------
+// Static IC stub generators.
+//
+
+// Used from ic-<arch>.cc.
+RUNTIME_FUNCTION(CallIC_Miss) {
+ TimerEventScope<TimerEventIcMiss> timer(isolate);
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 4);
+ CallIC ic(isolate);
+ Handle<Object> receiver = args.at<Object>(0);
+ Handle<Object> function = args.at<Object>(1);
+ Handle<FixedArray> vector = args.at<FixedArray>(2);
+ Handle<Smi> slot = args.at<Smi>(3);
+ ic.HandleMiss(receiver, function, vector, slot);
+ return *function;
+}
+
+
+RUNTIME_FUNCTION(CallIC_Customization_Miss) {
+ TimerEventScope<TimerEventIcMiss> timer(isolate);
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 4);
+ // A miss on a custom call ic always results in going megamorphic.
+ CallIC ic(isolate);
+ Handle<Object> function = args.at<Object>(1);
+ Handle<FixedArray> vector = args.at<FixedArray>(2);
+ Handle<Smi> slot = args.at<Smi>(3);
+ ic.PatchMegamorphic(function, vector, slot);
+ return *function;
+}
+
+
+// Used from ic-<arch>.cc.
+RUNTIME_FUNCTION(LoadIC_Miss) {
+ TimerEventScope<TimerEventIcMiss> timer(isolate);
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ LoadIC ic(IC::NO_EXTRA_FRAME, isolate);
+ Handle<Object> receiver = args.at<Object>(0);
+ Handle<Name> key = args.at<Name>(1);
+ ic.UpdateState(receiver, key);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
+ return *result;
+}
+
+
+// Used from ic-<arch>.cc
+RUNTIME_FUNCTION(KeyedLoadIC_Miss) {
+ TimerEventScope<TimerEventIcMiss> timer(isolate);
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate);
+ Handle<Object> receiver = args.at<Object>(0);
+ Handle<Object> key = args.at<Object>(1);
+ ic.UpdateState(receiver, key);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(KeyedLoadIC_MissFromStubFailure) {
+ TimerEventScope<TimerEventIcMiss> timer(isolate);
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ KeyedLoadIC ic(IC::EXTRA_CALL_FRAME, isolate);
+ Handle<Object> receiver = args.at<Object>(0);
+ Handle<Object> key = args.at<Object>(1);
+ ic.UpdateState(receiver, key);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
+ return *result;
+}
+
+
+// Used from ic-<arch>.cc.
+RUNTIME_FUNCTION(StoreIC_Miss) {
+ TimerEventScope<TimerEventIcMiss> timer(isolate);
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 3);
+ StoreIC ic(IC::NO_EXTRA_FRAME, isolate);
+ Handle<Object> receiver = args.at<Object>(0);
+ Handle<String> key = args.at<String>(1);
+ ic.UpdateState(receiver, key);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, ic.Store(receiver, key, args.at<Object>(2)));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(StoreIC_MissFromStubFailure) {
+ TimerEventScope<TimerEventIcMiss> timer(isolate);
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 3);
+ StoreIC ic(IC::EXTRA_CALL_FRAME, isolate);
+ Handle<Object> receiver = args.at<Object>(0);
+ Handle<String> key = args.at<String>(1);
+ ic.UpdateState(receiver, key);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, ic.Store(receiver, key, args.at<Object>(2)));
+ return *result;
+}
+
+
+// Extend storage is called in a store inline cache when
+// it is necessary to extend the properties array of a
+// JSObject.
+RUNTIME_FUNCTION(SharedStoreIC_ExtendStorage) {
+ TimerEventScope<TimerEventIcMiss> timer(isolate);
+ HandleScope shs(isolate);
+ DCHECK(args.length() == 3);
+
+ // Convert the parameters
+ Handle<JSObject> object = args.at<JSObject>(0);
+ Handle<Map> transition = args.at<Map>(1);
+ Handle<Object> value = args.at<Object>(2);
+
+ // Check the object has run out out property space.
+ DCHECK(object->HasFastProperties());
+ DCHECK(object->map()->unused_property_fields() == 0);
+
+ JSObject::MigrateToNewProperty(object, transition, value);
+
+ // Return the stored value.
+ return *value;
+}
+
+
+// Used from ic-<arch>.cc.
+RUNTIME_FUNCTION(KeyedStoreIC_Miss) {
+ TimerEventScope<TimerEventIcMiss> timer(isolate);
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 3);
+ KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate);
+ Handle<Object> receiver = args.at<Object>(0);
+ Handle<Object> key = args.at<Object>(1);
+ ic.UpdateState(receiver, key);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, ic.Store(receiver, key, args.at<Object>(2)));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(KeyedStoreIC_MissFromStubFailure) {
+ TimerEventScope<TimerEventIcMiss> timer(isolate);
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 3);
+ KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate);
+ Handle<Object> receiver = args.at<Object>(0);
+ Handle<Object> key = args.at<Object>(1);
+ ic.UpdateState(receiver, key);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, ic.Store(receiver, key, args.at<Object>(2)));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(StoreIC_Slow) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 3);
+ StoreIC ic(IC::NO_EXTRA_FRAME, isolate);
+ Handle<Object> object = args.at<Object>(0);
+ Handle<Object> key = args.at<Object>(1);
+ Handle<Object> value = args.at<Object>(2);
+ StrictMode strict_mode = ic.strict_mode();
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ Runtime::SetObjectProperty(isolate, object, key, value, strict_mode));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(KeyedStoreIC_Slow) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 3);
+ KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate);
+ Handle<Object> object = args.at<Object>(0);
+ Handle<Object> key = args.at<Object>(1);
+ Handle<Object> value = args.at<Object>(2);
+ StrictMode strict_mode = ic.strict_mode();
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ Runtime::SetObjectProperty(isolate, object, key, value, strict_mode));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(ElementsTransitionAndStoreIC_Miss) {
+ TimerEventScope<TimerEventIcMiss> timer(isolate);
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 4);
+ KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate);
+ Handle<Object> value = args.at<Object>(0);
+ Handle<Map> map = args.at<Map>(1);
+ Handle<Object> key = args.at<Object>(2);
+ Handle<Object> object = args.at<Object>(3);
+ StrictMode strict_mode = ic.strict_mode();
+ if (object->IsJSObject()) {
+ JSObject::TransitionElementsKind(Handle<JSObject>::cast(object),
+ map->elements_kind());
+ }
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ Runtime::SetObjectProperty(isolate, object, key, value, strict_mode));
+ return *result;
+}
+
+
+BinaryOpIC::State::State(Isolate* isolate, ExtraICState extra_ic_state)
+ : isolate_(isolate) {
+ op_ =
+ static_cast<Token::Value>(FIRST_TOKEN + OpField::decode(extra_ic_state));
+ mode_ = OverwriteModeField::decode(extra_ic_state);
+ fixed_right_arg_ =
+ Maybe<int>(HasFixedRightArgField::decode(extra_ic_state),
+ 1 << FixedRightArgValueField::decode(extra_ic_state));
+ left_kind_ = LeftKindField::decode(extra_ic_state);
+ if (fixed_right_arg_.has_value) {
+ right_kind_ = Smi::IsValid(fixed_right_arg_.value) ? SMI : INT32;
+ } else {
+ right_kind_ = RightKindField::decode(extra_ic_state);
+ }
+ result_kind_ = ResultKindField::decode(extra_ic_state);
+ DCHECK_LE(FIRST_TOKEN, op_);
+ DCHECK_LE(op_, LAST_TOKEN);
+}
+
+
+ExtraICState BinaryOpIC::State::GetExtraICState() const {
+ ExtraICState extra_ic_state =
+ OpField::encode(op_ - FIRST_TOKEN) | OverwriteModeField::encode(mode_) |
+ LeftKindField::encode(left_kind_) |
+ ResultKindField::encode(result_kind_) |
+ HasFixedRightArgField::encode(fixed_right_arg_.has_value);
+ if (fixed_right_arg_.has_value) {
+ extra_ic_state = FixedRightArgValueField::update(
+ extra_ic_state, WhichPowerOf2(fixed_right_arg_.value));
+ } else {
+ extra_ic_state = RightKindField::update(extra_ic_state, right_kind_);
+ }
+ return extra_ic_state;
+}
+
+
+// static
+void BinaryOpIC::State::GenerateAheadOfTime(Isolate* isolate,
+ void (*Generate)(Isolate*,
+ const State&)) {
+// TODO(olivf) We should investigate why adding stubs to the snapshot is so
+// expensive at runtime. When solved we should be able to add most binops to
+// the snapshot instead of hand-picking them.
+// Generated list of commonly used stubs
+#define GENERATE(op, left_kind, right_kind, result_kind, mode) \
+ do { \
+ State state(isolate, op, mode); \
+ state.left_kind_ = left_kind; \
+ state.fixed_right_arg_.has_value = false; \
+ state.right_kind_ = right_kind; \
+ state.result_kind_ = result_kind; \
+ Generate(isolate, state); \
+ } while (false)
+ GENERATE(Token::ADD, INT32, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::ADD, INT32, INT32, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::ADD, INT32, INT32, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::ADD, INT32, INT32, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::ADD, INT32, NUMBER, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::ADD, INT32, NUMBER, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::ADD, INT32, NUMBER, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::ADD, INT32, SMI, INT32, NO_OVERWRITE);
+ GENERATE(Token::ADD, INT32, SMI, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::ADD, INT32, SMI, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::ADD, NUMBER, INT32, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::ADD, NUMBER, INT32, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::ADD, NUMBER, INT32, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::ADD, NUMBER, NUMBER, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::ADD, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::ADD, NUMBER, NUMBER, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::ADD, NUMBER, SMI, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::ADD, NUMBER, SMI, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::ADD, NUMBER, SMI, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::ADD, SMI, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::ADD, SMI, INT32, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::ADD, SMI, INT32, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::ADD, SMI, NUMBER, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::ADD, SMI, NUMBER, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::ADD, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::ADD, SMI, SMI, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::ADD, SMI, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_AND, INT32, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::BIT_AND, INT32, INT32, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_AND, INT32, INT32, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_AND, INT32, INT32, SMI, NO_OVERWRITE);
+ GENERATE(Token::BIT_AND, INT32, INT32, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_AND, INT32, SMI, INT32, NO_OVERWRITE);
+ GENERATE(Token::BIT_AND, INT32, SMI, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_AND, INT32, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::BIT_AND, INT32, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_AND, INT32, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_AND, NUMBER, INT32, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_AND, NUMBER, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::BIT_AND, NUMBER, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_AND, SMI, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::BIT_AND, SMI, INT32, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_AND, SMI, NUMBER, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_AND, SMI, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::BIT_AND, SMI, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_AND, SMI, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_OR, INT32, INT32, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_OR, INT32, INT32, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_OR, INT32, INT32, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_OR, INT32, SMI, INT32, NO_OVERWRITE);
+ GENERATE(Token::BIT_OR, INT32, SMI, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_OR, INT32, SMI, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_OR, INT32, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::BIT_OR, INT32, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_OR, NUMBER, SMI, INT32, NO_OVERWRITE);
+ GENERATE(Token::BIT_OR, NUMBER, SMI, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_OR, NUMBER, SMI, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_OR, NUMBER, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::BIT_OR, NUMBER, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_OR, SMI, INT32, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_OR, SMI, INT32, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_OR, SMI, INT32, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_OR, SMI, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_OR, SMI, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_XOR, INT32, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::BIT_XOR, INT32, INT32, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_XOR, INT32, INT32, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_XOR, INT32, INT32, SMI, NO_OVERWRITE);
+ GENERATE(Token::BIT_XOR, INT32, INT32, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_XOR, INT32, NUMBER, SMI, NO_OVERWRITE);
+ GENERATE(Token::BIT_XOR, INT32, SMI, INT32, NO_OVERWRITE);
+ GENERATE(Token::BIT_XOR, INT32, SMI, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_XOR, INT32, SMI, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::BIT_XOR, NUMBER, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::BIT_XOR, NUMBER, SMI, INT32, NO_OVERWRITE);
+ GENERATE(Token::BIT_XOR, NUMBER, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::BIT_XOR, SMI, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::BIT_XOR, SMI, INT32, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_XOR, SMI, INT32, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_XOR, SMI, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::BIT_XOR, SMI, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::BIT_XOR, SMI, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::DIV, INT32, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::DIV, INT32, INT32, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::DIV, INT32, NUMBER, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::DIV, INT32, NUMBER, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::DIV, INT32, SMI, INT32, NO_OVERWRITE);
+ GENERATE(Token::DIV, INT32, SMI, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::DIV, NUMBER, INT32, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::DIV, NUMBER, INT32, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::DIV, NUMBER, NUMBER, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::DIV, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::DIV, NUMBER, NUMBER, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::DIV, NUMBER, SMI, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::DIV, NUMBER, SMI, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::DIV, SMI, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::DIV, SMI, INT32, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::DIV, SMI, INT32, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::DIV, SMI, NUMBER, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::DIV, SMI, NUMBER, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::DIV, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::DIV, SMI, SMI, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::DIV, SMI, SMI, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::DIV, SMI, SMI, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::DIV, SMI, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::DIV, SMI, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::DIV, SMI, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::MOD, NUMBER, SMI, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::MOD, SMI, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::MOD, SMI, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::MUL, INT32, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::MUL, INT32, INT32, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::MUL, INT32, NUMBER, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::MUL, INT32, NUMBER, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::MUL, INT32, SMI, INT32, NO_OVERWRITE);
+ GENERATE(Token::MUL, INT32, SMI, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::MUL, INT32, SMI, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::MUL, NUMBER, INT32, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::MUL, NUMBER, INT32, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::MUL, NUMBER, INT32, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::MUL, NUMBER, NUMBER, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::MUL, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::MUL, NUMBER, SMI, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::MUL, NUMBER, SMI, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::MUL, NUMBER, SMI, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::MUL, SMI, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::MUL, SMI, INT32, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::MUL, SMI, INT32, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::MUL, SMI, NUMBER, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::MUL, SMI, NUMBER, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::MUL, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::MUL, SMI, SMI, INT32, NO_OVERWRITE);
+ GENERATE(Token::MUL, SMI, SMI, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::MUL, SMI, SMI, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::MUL, SMI, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::MUL, SMI, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::MUL, SMI, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::SAR, INT32, SMI, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::SAR, INT32, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::SAR, INT32, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::SAR, NUMBER, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::SAR, NUMBER, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::SAR, SMI, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::SAR, SMI, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::SHL, INT32, SMI, INT32, NO_OVERWRITE);
+ GENERATE(Token::SHL, INT32, SMI, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::SHL, INT32, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::SHL, INT32, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::SHL, NUMBER, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::SHL, SMI, SMI, INT32, NO_OVERWRITE);
+ GENERATE(Token::SHL, SMI, SMI, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::SHL, SMI, SMI, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::SHL, SMI, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::SHL, SMI, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::SHL, SMI, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::SHR, INT32, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::SHR, INT32, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::SHR, INT32, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::SHR, NUMBER, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::SHR, NUMBER, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::SHR, NUMBER, SMI, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::SHR, SMI, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::SHR, SMI, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::SHR, SMI, SMI, SMI, OVERWRITE_RIGHT);
+ GENERATE(Token::SUB, INT32, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::SUB, INT32, INT32, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::SUB, INT32, NUMBER, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::SUB, INT32, NUMBER, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::SUB, INT32, SMI, INT32, OVERWRITE_LEFT);
+ GENERATE(Token::SUB, INT32, SMI, INT32, OVERWRITE_RIGHT);
+ GENERATE(Token::SUB, NUMBER, INT32, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::SUB, NUMBER, INT32, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::SUB, NUMBER, NUMBER, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::SUB, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::SUB, NUMBER, NUMBER, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::SUB, NUMBER, SMI, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::SUB, NUMBER, SMI, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::SUB, NUMBER, SMI, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::SUB, SMI, INT32, INT32, NO_OVERWRITE);
+ GENERATE(Token::SUB, SMI, NUMBER, NUMBER, NO_OVERWRITE);
+ GENERATE(Token::SUB, SMI, NUMBER, NUMBER, OVERWRITE_LEFT);
+ GENERATE(Token::SUB, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT);
+ GENERATE(Token::SUB, SMI, SMI, SMI, NO_OVERWRITE);
+ GENERATE(Token::SUB, SMI, SMI, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::SUB, SMI, SMI, SMI, OVERWRITE_RIGHT);
+#undef GENERATE
+#define GENERATE(op, left_kind, fixed_right_arg_value, result_kind, mode) \
+ do { \
+ State state(isolate, op, mode); \
+ state.left_kind_ = left_kind; \
+ state.fixed_right_arg_.has_value = true; \
+ state.fixed_right_arg_.value = fixed_right_arg_value; \
+ state.right_kind_ = SMI; \
+ state.result_kind_ = result_kind; \
+ Generate(isolate, state); \
+ } while (false)
+ GENERATE(Token::MOD, SMI, 2, SMI, NO_OVERWRITE);
+ GENERATE(Token::MOD, SMI, 4, SMI, NO_OVERWRITE);
+ GENERATE(Token::MOD, SMI, 4, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::MOD, SMI, 8, SMI, NO_OVERWRITE);
+ GENERATE(Token::MOD, SMI, 16, SMI, OVERWRITE_LEFT);
+ GENERATE(Token::MOD, SMI, 32, SMI, NO_OVERWRITE);
+ GENERATE(Token::MOD, SMI, 2048, SMI, NO_OVERWRITE);
+#undef GENERATE
+}
+
+
+Type* BinaryOpIC::State::GetResultType(Zone* zone) const {
+ Kind result_kind = result_kind_;
+ if (HasSideEffects()) {
+ result_kind = NONE;
+ } else if (result_kind == GENERIC && op_ == Token::ADD) {
+ return Type::Union(Type::Number(zone), Type::String(zone), zone);
+ } else if (result_kind == NUMBER && op_ == Token::SHR) {
+ return Type::Unsigned32(zone);
+ }
+ DCHECK_NE(GENERIC, result_kind);
+ return KindToType(result_kind, zone);
+}
+
+
+OStream& operator<<(OStream& os, const BinaryOpIC::State& s) {
+ os << "(" << Token::Name(s.op_);
+ if (s.mode_ == OVERWRITE_LEFT)
+ os << "_ReuseLeft";
+ else if (s.mode_ == OVERWRITE_RIGHT)
+ os << "_ReuseRight";
+ if (s.CouldCreateAllocationMementos()) os << "_CreateAllocationMementos";
+ os << ":" << BinaryOpIC::State::KindToString(s.left_kind_) << "*";
+ if (s.fixed_right_arg_.has_value) {
+ os << s.fixed_right_arg_.value;
+ } else {
+ os << BinaryOpIC::State::KindToString(s.right_kind_);
+ }
+ return os << "->" << BinaryOpIC::State::KindToString(s.result_kind_) << ")";
+}
+
+
+void BinaryOpIC::State::Update(Handle<Object> left, Handle<Object> right,
+ Handle<Object> result) {
+ ExtraICState old_extra_ic_state = GetExtraICState();
+
+ left_kind_ = UpdateKind(left, left_kind_);
+ right_kind_ = UpdateKind(right, right_kind_);
+
+ int32_t fixed_right_arg_value = 0;
+ bool has_fixed_right_arg =
+ op_ == Token::MOD && right->ToInt32(&fixed_right_arg_value) &&
+ fixed_right_arg_value > 0 && IsPowerOf2(fixed_right_arg_value) &&
+ FixedRightArgValueField::is_valid(WhichPowerOf2(fixed_right_arg_value)) &&
+ (left_kind_ == SMI || left_kind_ == INT32) &&
+ (result_kind_ == NONE || !fixed_right_arg_.has_value);
+ fixed_right_arg_ = Maybe<int32_t>(has_fixed_right_arg, fixed_right_arg_value);
+
+ result_kind_ = UpdateKind(result, result_kind_);
+
+ if (!Token::IsTruncatingBinaryOp(op_)) {
+ Kind input_kind = Max(left_kind_, right_kind_);
+ if (result_kind_ < input_kind && input_kind <= NUMBER) {
+ result_kind_ = input_kind;
+ }
+ }
+
+ // We don't want to distinguish INT32 and NUMBER for string add (because
+ // NumberToString can't make use of this anyway).
+ if (left_kind_ == STRING && right_kind_ == INT32) {
+ DCHECK_EQ(STRING, result_kind_);
+ DCHECK_EQ(Token::ADD, op_);
+ right_kind_ = NUMBER;
+ } else if (right_kind_ == STRING && left_kind_ == INT32) {
+ DCHECK_EQ(STRING, result_kind_);
+ DCHECK_EQ(Token::ADD, op_);
+ left_kind_ = NUMBER;
+ }
+
+ // Reset overwrite mode unless we can actually make use of it, or may be able
+ // to make use of it at some point in the future.
+ if ((mode_ == OVERWRITE_LEFT && left_kind_ > NUMBER) ||
+ (mode_ == OVERWRITE_RIGHT && right_kind_ > NUMBER) ||
+ result_kind_ > NUMBER) {
+ mode_ = NO_OVERWRITE;
+ }
+
+ if (old_extra_ic_state == GetExtraICState()) {
+ // Tagged operations can lead to non-truncating HChanges
+ if (left->IsUndefined() || left->IsBoolean()) {
+ left_kind_ = GENERIC;
+ } else {
+ DCHECK(right->IsUndefined() || right->IsBoolean());
+ right_kind_ = GENERIC;
+ }
+ }
+}
+
+
+BinaryOpIC::State::Kind BinaryOpIC::State::UpdateKind(Handle<Object> object,
+ Kind kind) const {
+ Kind new_kind = GENERIC;
+ bool is_truncating = Token::IsTruncatingBinaryOp(op());
+ if (object->IsBoolean() && is_truncating) {
+ // Booleans will be automatically truncated by HChange.
+ new_kind = INT32;
+ } else if (object->IsUndefined()) {
+ // Undefined will be automatically truncated by HChange.
+ new_kind = is_truncating ? INT32 : NUMBER;
+ } else if (object->IsSmi()) {
+ new_kind = SMI;
+ } else if (object->IsHeapNumber()) {
+ double value = Handle<HeapNumber>::cast(object)->value();
+ new_kind = IsInt32Double(value) ? INT32 : NUMBER;
+ } else if (object->IsString() && op() == Token::ADD) {
+ new_kind = STRING;
+ }
+ if (new_kind == INT32 && SmiValuesAre32Bits()) {
+ new_kind = NUMBER;
+ }
+ if (kind != NONE && ((new_kind <= NUMBER && kind > NUMBER) ||
+ (new_kind > NUMBER && kind <= NUMBER))) {
+ new_kind = GENERIC;
+ }
+ return Max(kind, new_kind);
+}
+
+
+// static
+const char* BinaryOpIC::State::KindToString(Kind kind) {
+ switch (kind) {
+ case NONE:
+ return "None";
+ case SMI:
+ return "Smi";
+ case INT32:
+ return "Int32";
+ case NUMBER:
+ return "Number";
+ case STRING:
+ return "String";
+ case GENERIC:
+ return "Generic";
+ }
+ UNREACHABLE();
+ return NULL;
+}
+
+
+// static
+Type* BinaryOpIC::State::KindToType(Kind kind, Zone* zone) {
+ switch (kind) {
+ case NONE:
+ return Type::None(zone);
+ case SMI:
+ return Type::SignedSmall(zone);
+ case INT32:
+ return Type::Signed32(zone);
+ case NUMBER:
+ return Type::Number(zone);
+ case STRING:
+ return Type::String(zone);
+ case GENERIC:
+ return Type::Any(zone);
+ }
+ UNREACHABLE();
+ return NULL;
+}
+
+
+MaybeHandle<Object> BinaryOpIC::Transition(
+ Handle<AllocationSite> allocation_site, Handle<Object> left,
+ Handle<Object> right) {
+ State state(isolate(), target()->extra_ic_state());
+
+ // Compute the actual result using the builtin for the binary operation.
+ Object* builtin = isolate()->js_builtins_object()->javascript_builtin(
+ TokenToJSBuiltin(state.op()));
+ Handle<JSFunction> function = handle(JSFunction::cast(builtin), isolate());
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(), result, Execution::Call(isolate(), function, left, 1, &right),
+ Object);
+
+ // Execution::Call can execute arbitrary JavaScript, hence potentially
+ // update the state of this very IC, so we must update the stored state.
+ UpdateTarget();
+ // Compute the new state.
+ State old_state(isolate(), target()->extra_ic_state());
+ state.Update(left, right, result);
+
+ // Check if we have a string operation here.
+ Handle<Code> target;
+ if (!allocation_site.is_null() || state.ShouldCreateAllocationMementos()) {
+ // Setup the allocation site on-demand.
+ if (allocation_site.is_null()) {
+ allocation_site = isolate()->factory()->NewAllocationSite();
+ }
+
+ // Install the stub with an allocation site.
+ BinaryOpICWithAllocationSiteStub stub(isolate(), state);
+ target = stub.GetCodeCopyFromTemplate(allocation_site);
+
+ // Sanity check the trampoline stub.
+ DCHECK_EQ(*allocation_site, target->FindFirstAllocationSite());
+ } else {
+ // Install the generic stub.
+ BinaryOpICStub stub(isolate(), state);
+ target = stub.GetCode();
+
+ // Sanity check the generic stub.
+ DCHECK_EQ(NULL, target->FindFirstAllocationSite());
+ }
+ set_target(*target);
+
+ if (FLAG_trace_ic) {
+ OFStream os(stdout);
+ os << "[BinaryOpIC" << old_state << " => " << state << " @ "
+ << static_cast<void*>(*target) << " <- ";
+ JavaScriptFrame::PrintTop(isolate(), stdout, false, true);
+ if (!allocation_site.is_null()) {
+ os << " using allocation site " << static_cast<void*>(*allocation_site);
+ }
+ os << "]" << endl;
+ }
+
+ // Patch the inlined smi code as necessary.
+ if (!old_state.UseInlinedSmiCode() && state.UseInlinedSmiCode()) {
+ PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
+ } else if (old_state.UseInlinedSmiCode() && !state.UseInlinedSmiCode()) {
+ PatchInlinedSmiCode(address(), DISABLE_INLINED_SMI_CHECK);
+ }
+
+ return result;
+}
+
+
+RUNTIME_FUNCTION(BinaryOpIC_Miss) {
+ TimerEventScope<TimerEventIcMiss> timer(isolate);
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ Handle<Object> left = args.at<Object>(BinaryOpICStub::kLeft);
+ Handle<Object> right = args.at<Object>(BinaryOpICStub::kRight);
+ BinaryOpIC ic(isolate);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ ic.Transition(Handle<AllocationSite>::null(), left, right));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(BinaryOpIC_MissWithAllocationSite) {
+ TimerEventScope<TimerEventIcMiss> timer(isolate);
+ HandleScope scope(isolate);
+ DCHECK_EQ(3, args.length());
+ Handle<AllocationSite> allocation_site =
+ args.at<AllocationSite>(BinaryOpWithAllocationSiteStub::kAllocationSite);
+ Handle<Object> left = args.at<Object>(BinaryOpWithAllocationSiteStub::kLeft);
+ Handle<Object> right =
+ args.at<Object>(BinaryOpWithAllocationSiteStub::kRight);
+ BinaryOpIC ic(isolate);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, ic.Transition(allocation_site, left, right));
+ return *result;
+}
+
+
+Code* CompareIC::GetRawUninitialized(Isolate* isolate, Token::Value op) {
+ ICCompareStub stub(isolate, op, UNINITIALIZED, UNINITIALIZED, UNINITIALIZED);
+ Code* code = NULL;
+ CHECK(stub.FindCodeInCache(&code));
+ return code;
+}
+
+
+Handle<Code> CompareIC::GetUninitialized(Isolate* isolate, Token::Value op) {
+ ICCompareStub stub(isolate, op, UNINITIALIZED, UNINITIALIZED, UNINITIALIZED);
+ return stub.GetCode();
+}
+
+
+const char* CompareIC::GetStateName(State state) {
+ switch (state) {
+ case UNINITIALIZED:
+ return "UNINITIALIZED";
+ case SMI:
+ return "SMI";
+ case NUMBER:
+ return "NUMBER";
+ case INTERNALIZED_STRING:
+ return "INTERNALIZED_STRING";
+ case STRING:
+ return "STRING";
+ case UNIQUE_NAME:
+ return "UNIQUE_NAME";
+ case OBJECT:
+ return "OBJECT";
+ case KNOWN_OBJECT:
+ return "KNOWN_OBJECT";
+ case GENERIC:
+ return "GENERIC";
+ }
+ UNREACHABLE();
+ return NULL;
+}
+
+
+Type* CompareIC::StateToType(Zone* zone, CompareIC::State state,
+ Handle<Map> map) {
+ switch (state) {
+ case CompareIC::UNINITIALIZED:
+ return Type::None(zone);
+ case CompareIC::SMI:
+ return Type::SignedSmall(zone);
+ case CompareIC::NUMBER:
+ return Type::Number(zone);
+ case CompareIC::STRING:
+ return Type::String(zone);
+ case CompareIC::INTERNALIZED_STRING:
+ return Type::InternalizedString(zone);
+ case CompareIC::UNIQUE_NAME:
+ return Type::UniqueName(zone);
+ case CompareIC::OBJECT:
+ return Type::Receiver(zone);
+ case CompareIC::KNOWN_OBJECT:
+ return map.is_null() ? Type::Receiver(zone) : Type::Class(map, zone);
+ case CompareIC::GENERIC:
+ return Type::Any(zone);
+ }
+ UNREACHABLE();
+ return NULL;
+}
+
+
+void CompareIC::StubInfoToType(uint32_t stub_key, Type** left_type,
+ Type** right_type, Type** overall_type,
+ Handle<Map> map, Zone* zone) {
+ State left_state, right_state, handler_state;
+ ICCompareStub::DecodeKey(stub_key, &left_state, &right_state, &handler_state,
+ NULL);
+ *left_type = StateToType(zone, left_state);
+ *right_type = StateToType(zone, right_state);
+ *overall_type = StateToType(zone, handler_state, map);
+}
+
+
+CompareIC::State CompareIC::NewInputState(State old_state,
+ Handle<Object> value) {
+ switch (old_state) {
+ case UNINITIALIZED:
+ if (value->IsSmi()) return SMI;
+ if (value->IsHeapNumber()) return NUMBER;
+ if (value->IsInternalizedString()) return INTERNALIZED_STRING;
+ if (value->IsString()) return STRING;
+ if (value->IsSymbol()) return UNIQUE_NAME;
+ if (value->IsJSObject()) return OBJECT;
+ break;
+ case SMI:
+ if (value->IsSmi()) return SMI;
+ if (value->IsHeapNumber()) return NUMBER;
+ break;
+ case NUMBER:
+ if (value->IsNumber()) return NUMBER;
+ break;
+ case INTERNALIZED_STRING:
+ if (value->IsInternalizedString()) return INTERNALIZED_STRING;
+ if (value->IsString()) return STRING;
+ if (value->IsSymbol()) return UNIQUE_NAME;
+ break;
+ case STRING:
+ if (value->IsString()) return STRING;
+ break;
+ case UNIQUE_NAME:
+ if (value->IsUniqueName()) return UNIQUE_NAME;
+ break;
+ case OBJECT:
+ if (value->IsJSObject()) return OBJECT;
+ break;
+ case GENERIC:
+ break;
+ case KNOWN_OBJECT:
+ UNREACHABLE();
+ break;
+ }
+ return GENERIC;
+}
+
+
+CompareIC::State CompareIC::TargetState(State old_state, State old_left,
+ State old_right,
+ bool has_inlined_smi_code,
+ Handle<Object> x, Handle<Object> y) {
+ switch (old_state) {
+ case UNINITIALIZED:
+ if (x->IsSmi() && y->IsSmi()) return SMI;
+ if (x->IsNumber() && y->IsNumber()) return NUMBER;
+ if (Token::IsOrderedRelationalCompareOp(op_)) {
+ // Ordered comparisons treat undefined as NaN, so the
+ // NUMBER stub will do the right thing.
+ if ((x->IsNumber() && y->IsUndefined()) ||
+ (y->IsNumber() && x->IsUndefined())) {
+ return NUMBER;
+ }
+ }
+ if (x->IsInternalizedString() && y->IsInternalizedString()) {
+ // We compare internalized strings as plain ones if we need to determine
+ // the order in a non-equality compare.
+ return Token::IsEqualityOp(op_) ? INTERNALIZED_STRING : STRING;
+ }
+ if (x->IsString() && y->IsString()) return STRING;
+ if (!Token::IsEqualityOp(op_)) return GENERIC;
+ if (x->IsUniqueName() && y->IsUniqueName()) return UNIQUE_NAME;
+ if (x->IsJSObject() && y->IsJSObject()) {
+ if (Handle<JSObject>::cast(x)->map() ==
+ Handle<JSObject>::cast(y)->map()) {
+ return KNOWN_OBJECT;
+ } else {
+ return OBJECT;
+ }
+ }
+ return GENERIC;
+ case SMI:
+ return x->IsNumber() && y->IsNumber() ? NUMBER : GENERIC;
+ case INTERNALIZED_STRING:
+ DCHECK(Token::IsEqualityOp(op_));
+ if (x->IsString() && y->IsString()) return STRING;
+ if (x->IsUniqueName() && y->IsUniqueName()) return UNIQUE_NAME;
+ return GENERIC;
+ case NUMBER:
+ // If the failure was due to one side changing from smi to heap number,
+ // then keep the state (if other changed at the same time, we will get
+ // a second miss and then go to generic).
+ if (old_left == SMI && x->IsHeapNumber()) return NUMBER;
+ if (old_right == SMI && y->IsHeapNumber()) return NUMBER;
+ return GENERIC;
+ case KNOWN_OBJECT:
+ DCHECK(Token::IsEqualityOp(op_));
+ if (x->IsJSObject() && y->IsJSObject()) return OBJECT;
+ return GENERIC;
+ case STRING:
+ case UNIQUE_NAME:
+ case OBJECT:
+ case GENERIC:
+ return GENERIC;
+ }
+ UNREACHABLE();
+ return GENERIC; // Make the compiler happy.
+}
+
+
+Code* CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
+ HandleScope scope(isolate());
+ State previous_left, previous_right, previous_state;
+ ICCompareStub::DecodeKey(target()->stub_key(), &previous_left,
+ &previous_right, &previous_state, NULL);
+ State new_left = NewInputState(previous_left, x);
+ State new_right = NewInputState(previous_right, y);
+ State state = TargetState(previous_state, previous_left, previous_right,
+ HasInlinedSmiCode(address()), x, y);
+ ICCompareStub stub(isolate(), op_, new_left, new_right, state);
+ if (state == KNOWN_OBJECT) {
+ stub.set_known_map(
+ Handle<Map>(Handle<JSObject>::cast(x)->map(), isolate()));
+ }
+ Handle<Code> new_target = stub.GetCode();
+ set_target(*new_target);
+
+ if (FLAG_trace_ic) {
+ PrintF("[CompareIC in ");
+ JavaScriptFrame::PrintTop(isolate(), stdout, false, true);
+ PrintF(" ((%s+%s=%s)->(%s+%s=%s))#%s @ %p]\n", GetStateName(previous_left),
+ GetStateName(previous_right), GetStateName(previous_state),
+ GetStateName(new_left), GetStateName(new_right), GetStateName(state),
+ Token::Name(op_), static_cast<void*>(*stub.GetCode()));
+ }
+
+ // Activate inlined smi code.
+ if (previous_state == UNINITIALIZED) {
+ PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
+ }
+
+ return *new_target;
+}
+
+
+// Used from ICCompareStub::GenerateMiss in code-stubs-<arch>.cc.
+RUNTIME_FUNCTION(CompareIC_Miss) {
+ TimerEventScope<TimerEventIcMiss> timer(isolate);
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 3);
+ CompareIC ic(isolate, static_cast<Token::Value>(args.smi_at(2)));
+ return ic.UpdateCaches(args.at<Object>(0), args.at<Object>(1));
+}
+
+
+void CompareNilIC::Clear(Address address, Code* target,
+ ConstantPoolArray* constant_pool) {
+ if (IsCleared(target)) return;
+ ExtraICState state = target->extra_ic_state();
+
+ CompareNilICStub stub(target->GetIsolate(), state,
+ HydrogenCodeStub::UNINITIALIZED);
+ stub.ClearState();
+
+ Code* code = NULL;
+ CHECK(stub.FindCodeInCache(&code));
+
+ SetTargetAtAddress(address, code, constant_pool);
+}
+
+
+Handle<Object> CompareNilIC::DoCompareNilSlow(Isolate* isolate, NilValue nil,
+ Handle<Object> object) {
+ if (object->IsNull() || object->IsUndefined()) {
+ return handle(Smi::FromInt(true), isolate);
+ }
+ return handle(Smi::FromInt(object->IsUndetectableObject()), isolate);
+}
+
+
+Handle<Object> CompareNilIC::CompareNil(Handle<Object> object) {
+ ExtraICState extra_ic_state = target()->extra_ic_state();
+
+ CompareNilICStub stub(isolate(), extra_ic_state);
+
+ // Extract the current supported types from the patched IC and calculate what
+ // types must be supported as a result of the miss.
+ bool already_monomorphic = stub.IsMonomorphic();
+
+ stub.UpdateStatus(object);
+
+ NilValue nil = stub.GetNilValue();
+
+ // Find or create the specialized stub to support the new set of types.
+ Handle<Code> code;
+ if (stub.IsMonomorphic()) {
+ Handle<Map> monomorphic_map(already_monomorphic && FirstTargetMap() != NULL
+ ? FirstTargetMap()
+ : HeapObject::cast(*object)->map());
+ code = PropertyICCompiler::ComputeCompareNil(monomorphic_map, &stub);
+ } else {
+ code = stub.GetCode();
+ }
+ set_target(*code);
+ return DoCompareNilSlow(isolate(), nil, object);
+}
+
+
+RUNTIME_FUNCTION(CompareNilIC_Miss) {
+ TimerEventScope<TimerEventIcMiss> timer(isolate);
+ HandleScope scope(isolate);
+ Handle<Object> object = args.at<Object>(0);
+ CompareNilIC ic(isolate);
+ return *ic.CompareNil(object);
+}
+
+
+RUNTIME_FUNCTION(Unreachable) {
+ UNREACHABLE();
+ CHECK(false);
+ return isolate->heap()->undefined_value();
+}
+
+
+Builtins::JavaScript BinaryOpIC::TokenToJSBuiltin(Token::Value op) {
+ switch (op) {
+ default:
+ UNREACHABLE();
+ case Token::ADD:
+ return Builtins::ADD;
+ break;
+ case Token::SUB:
+ return Builtins::SUB;
+ break;
+ case Token::MUL:
+ return Builtins::MUL;
+ break;
+ case Token::DIV:
+ return Builtins::DIV;
+ break;
+ case Token::MOD:
+ return Builtins::MOD;
+ break;
+ case Token::BIT_OR:
+ return Builtins::BIT_OR;
+ break;
+ case Token::BIT_AND:
+ return Builtins::BIT_AND;
+ break;
+ case Token::BIT_XOR:
+ return Builtins::BIT_XOR;
+ break;
+ case Token::SAR:
+ return Builtins::SAR;
+ break;
+ case Token::SHR:
+ return Builtins::SHR;
+ break;
+ case Token::SHL:
+ return Builtins::SHL;
+ break;
+ }
+}
+
+
+Handle<Object> ToBooleanIC::ToBoolean(Handle<Object> object) {
+ ToBooleanStub stub(isolate(), target()->extra_ic_state());
+ bool to_boolean_value = stub.UpdateStatus(object);
+ Handle<Code> code = stub.GetCode();
+ set_target(*code);
+ return handle(Smi::FromInt(to_boolean_value ? 1 : 0), isolate());
+}
+
+
+RUNTIME_FUNCTION(ToBooleanIC_Miss) {
+ TimerEventScope<TimerEventIcMiss> timer(isolate);
+ DCHECK(args.length() == 1);
+ HandleScope scope(isolate);
+ Handle<Object> object = args.at<Object>(0);
+ ToBooleanIC ic(isolate);
+ return *ic.ToBoolean(object);
+}
+
+
+RUNTIME_FUNCTION(StoreCallbackProperty) {
+ Handle<JSObject> receiver = args.at<JSObject>(0);
+ Handle<JSObject> holder = args.at<JSObject>(1);
+ Handle<ExecutableAccessorInfo> callback = args.at<ExecutableAccessorInfo>(2);
+ Handle<Name> name = args.at<Name>(3);
+ Handle<Object> value = args.at<Object>(4);
+ HandleScope scope(isolate);
+
+ DCHECK(callback->IsCompatibleReceiver(*receiver));
+
+ Address setter_address = v8::ToCData<Address>(callback->setter());
+ v8::AccessorNameSetterCallback fun =
+ FUNCTION_CAST<v8::AccessorNameSetterCallback>(setter_address);
+ DCHECK(fun != NULL);
+
+ LOG(isolate, ApiNamedPropertyAccess("store", *receiver, *name));
+ PropertyCallbackArguments custom_args(isolate, callback->data(), *receiver,
+ *holder);
+ custom_args.Call(fun, v8::Utils::ToLocal(name), v8::Utils::ToLocal(value));
+ RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
+ return *value;
+}
+
+
+/**
+ * Attempts to load a property with an interceptor (which must be present),
+ * but doesn't search the prototype chain.
+ *
+ * Returns |Heap::no_interceptor_result_sentinel()| if interceptor doesn't
+ * provide any value for the given name.
+ */
+RUNTIME_FUNCTION(LoadPropertyWithInterceptorOnly) {
+ DCHECK(args.length() == NamedLoadHandlerCompiler::kInterceptorArgsLength);
+ Handle<Name> name_handle =
+ args.at<Name>(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex);
+ Handle<InterceptorInfo> interceptor_info = args.at<InterceptorInfo>(
+ NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex);
+
+ // TODO(rossberg): Support symbols in the API.
+ if (name_handle->IsSymbol())
+ return isolate->heap()->no_interceptor_result_sentinel();
+ Handle<String> name = Handle<String>::cast(name_handle);
+
+ Address getter_address = v8::ToCData<Address>(interceptor_info->getter());
+ v8::NamedPropertyGetterCallback getter =
+ FUNCTION_CAST<v8::NamedPropertyGetterCallback>(getter_address);
+ DCHECK(getter != NULL);
+
+ Handle<JSObject> receiver =
+ args.at<JSObject>(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex);
+ Handle<JSObject> holder =
+ args.at<JSObject>(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex);
+ PropertyCallbackArguments callback_args(isolate, interceptor_info->data(),
+ *receiver, *holder);
+ {
+ // Use the interceptor getter.
+ HandleScope scope(isolate);
+ v8::Handle<v8::Value> r =
+ callback_args.Call(getter, v8::Utils::ToLocal(name));
+ RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
+ if (!r.IsEmpty()) {
+ Handle<Object> result = v8::Utils::OpenHandle(*r);
+ result->VerifyApiCallResultType();
+ return *v8::Utils::OpenHandle(*r);
+ }
+ }
+
+ return isolate->heap()->no_interceptor_result_sentinel();
+}
+
+
+static Object* ThrowReferenceError(Isolate* isolate, Name* name) {
+ // If the load is non-contextual, just return the undefined result.
+ // Note that both keyed and non-keyed loads may end up here.
+ HandleScope scope(isolate);
+ LoadIC ic(IC::NO_EXTRA_FRAME, isolate);
+ if (ic.contextual_mode() != CONTEXTUAL) {
+ return isolate->heap()->undefined_value();
+ }
+
+ // Throw a reference error.
+ Handle<Name> name_handle(name);
+ Handle<Object> error = isolate->factory()->NewReferenceError(
+ "not_defined", HandleVector(&name_handle, 1));
+ return isolate->Throw(*error);
+}
+
+
+/**
+ * Loads a property with an interceptor performing post interceptor
+ * lookup if interceptor failed.
+ */
+RUNTIME_FUNCTION(LoadPropertyWithInterceptor) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == NamedLoadHandlerCompiler::kInterceptorArgsLength);
+ Handle<Name> name =
+ args.at<Name>(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex);
+ Handle<JSObject> receiver =
+ args.at<JSObject>(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex);
+ Handle<JSObject> holder =
+ args.at<JSObject>(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex);
+
+ Handle<Object> result;
+ LookupIterator it(receiver, name, holder);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ JSObject::GetProperty(&it));
+
+ if (it.IsFound()) return *result;
+
+ return ThrowReferenceError(isolate, Name::cast(args[0]));
+}
+
+
+RUNTIME_FUNCTION(StorePropertyWithInterceptor) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 3);
+ StoreIC ic(IC::NO_EXTRA_FRAME, isolate);
+ Handle<JSObject> receiver = args.at<JSObject>(0);
+ Handle<Name> name = args.at<Name>(1);
+ Handle<Object> value = args.at<Object>(2);
+#ifdef DEBUG
+ PrototypeIterator iter(isolate, receiver,
+ PrototypeIterator::START_AT_RECEIVER);
+ bool found = false;
+ while (!iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN)) {
+ Handle<Object> current = PrototypeIterator::GetCurrent(iter);
+ if (current->IsJSObject() &&
+ Handle<JSObject>::cast(current)->HasNamedInterceptor()) {
+ found = true;
+ break;
+ }
+ }
+ DCHECK(found);
+#endif
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ JSObject::SetProperty(receiver, name, value, ic.strict_mode()));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(LoadElementWithInterceptor) {
+ HandleScope scope(isolate);
+ Handle<JSObject> receiver = args.at<JSObject>(0);
+ DCHECK(args.smi_at(1) >= 0);
+ uint32_t index = args.smi_at(1);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ JSObject::GetElementWithInterceptor(receiver, receiver, index));
+ return *result;
+}
+
+
+static const Address IC_utilities[] = {
+#define ADDR(name) FUNCTION_ADDR(name),
+ IC_UTIL_LIST(ADDR) NULL
+#undef ADDR
+};
+
+
+Address IC::AddressFromUtilityId(IC::UtilityId id) { return IC_utilities[id]; }
+}
+} // namespace v8::internal
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_IC_H_
+#define V8_IC_H_
+
+#include "src/macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+
+const int kMaxKeyedPolymorphism = 4;
+
+
+// IC_UTIL_LIST defines all utility functions called from generated
+// inline caching code. The argument for the macro, ICU, is the function name.
+#define IC_UTIL_LIST(ICU) \
+ ICU(LoadIC_Miss) \
+ ICU(KeyedLoadIC_Miss) \
+ ICU(CallIC_Miss) \
+ ICU(CallIC_Customization_Miss) \
+ ICU(StoreIC_Miss) \
+ ICU(StoreIC_Slow) \
+ ICU(SharedStoreIC_ExtendStorage) \
+ ICU(KeyedStoreIC_Miss) \
+ ICU(KeyedStoreIC_Slow) \
+ /* Utilities for IC stubs. */ \
+ ICU(StoreCallbackProperty) \
+ ICU(LoadPropertyWithInterceptorOnly) \
+ ICU(LoadPropertyWithInterceptor) \
+ ICU(LoadElementWithInterceptor) \
+ ICU(StorePropertyWithInterceptor) \
+ ICU(CompareIC_Miss) \
+ ICU(BinaryOpIC_Miss) \
+ ICU(CompareNilIC_Miss) \
+ ICU(Unreachable) \
+ ICU(ToBooleanIC_Miss)
+//
+// IC is the base class for LoadIC, StoreIC, KeyedLoadIC, and KeyedStoreIC.
+//
+class IC {
+ public:
+ // The ids for utility called from the generated code.
+ enum UtilityId {
+#define CONST_NAME(name) k##name,
+ IC_UTIL_LIST(CONST_NAME)
+#undef CONST_NAME
+ kUtilityCount
+ };
+
+ // Looks up the address of the named utility.
+ static Address AddressFromUtilityId(UtilityId id);
+
+ // Alias the inline cache state type to make the IC code more readable.
+ typedef InlineCacheState State;
+
+ // The IC code is either invoked with no extra frames on the stack
+ // or with a single extra frame for supporting calls.
+ enum FrameDepth { NO_EXTRA_FRAME = 0, EXTRA_CALL_FRAME = 1 };
+
+ // Construct the IC structure with the given number of extra
+ // JavaScript frames on the stack.
+ IC(FrameDepth depth, Isolate* isolate);
+ virtual ~IC() {}
+
+ State state() const { return state_; }
+ inline Address address() const;
+
+ // Compute the current IC state based on the target stub, receiver and name.
+ void UpdateState(Handle<Object> receiver, Handle<Object> name);
+
+ bool IsNameCompatibleWithPrototypeFailure(Handle<Object> name);
+ void MarkPrototypeFailure(Handle<Object> name) {
+ DCHECK(IsNameCompatibleWithPrototypeFailure(name));
+ state_ = PROTOTYPE_FAILURE;
+ }
+
+ // If the stub contains weak maps then this function adds the stub to
+ // the dependent code array of each weak map.
+ static void RegisterWeakMapDependency(Handle<Code> stub);
+
+ // This function is called when a weak map in the stub is dying,
+ // invalidates the stub by setting maps in it to undefined.
+ static void InvalidateMaps(Code* stub);
+
+ // Clear the inline cache to initial state.
+ static void Clear(Isolate* isolate, Address address,
+ ConstantPoolArray* constant_pool);
+
+#ifdef DEBUG
+ bool IsLoadStub() const {
+ return target()->is_load_stub() || target()->is_keyed_load_stub();
+ }
+
+ bool IsStoreStub() const {
+ return target()->is_store_stub() || target()->is_keyed_store_stub();
+ }
+
+ bool IsCallStub() const { return target()->is_call_stub(); }
+#endif
+
+ template <class TypeClass>
+ static JSFunction* GetRootConstructor(TypeClass* type,
+ Context* native_context);
+ static inline Handle<Map> GetHandlerCacheHolder(HeapType* type,
+ bool receiver_is_holder,
+ Isolate* isolate,
+ CacheHolderFlag* flag);
+ static inline Handle<Map> GetICCacheHolder(HeapType* type, Isolate* isolate,
+ CacheHolderFlag* flag);
+
+ static bool IsCleared(Code* code) {
+ InlineCacheState state = code->ic_state();
+ return state == UNINITIALIZED || state == PREMONOMORPHIC;
+ }
+
+ // Utility functions to convert maps to types and back. There are two special
+ // cases:
+ // - The heap_number_map is used as a marker which includes heap numbers as
+ // well as smis.
+ // - The oddball map is only used for booleans.
+ static Handle<Map> TypeToMap(HeapType* type, Isolate* isolate);
+ template <class T>
+ static typename T::TypeHandle MapToType(Handle<Map> map,
+ typename T::Region* region);
+
+ static Handle<HeapType> CurrentTypeOf(Handle<Object> object,
+ Isolate* isolate);
+
+ protected:
+ // Get the call-site target; used for determining the state.
+ Handle<Code> target() const { return target_; }
+
+ Address fp() const { return fp_; }
+ Address pc() const { return *pc_address_; }
+ Isolate* isolate() const { return isolate_; }
+
+ // Get the shared function info of the caller.
+ SharedFunctionInfo* GetSharedFunctionInfo() const;
+ // Get the code object of the caller.
+ Code* GetCode() const;
+ // Get the original (non-breakpointed) code object of the caller.
+ Code* GetOriginalCode() const;
+
+ // Set the call-site target.
+ inline void set_target(Code* code);
+ bool is_target_set() { return target_set_; }
+
+ char TransitionMarkFromState(IC::State state);
+ void TraceIC(const char* type, Handle<Object> name);
+ void TraceIC(const char* type, Handle<Object> name, State old_state,
+ State new_state);
+
+ MaybeHandle<Object> TypeError(const char* type, Handle<Object> object,
+ Handle<Object> key);
+ MaybeHandle<Object> ReferenceError(const char* type, Handle<Name> name);
+
+ // Access the target code for the given IC address.
+ static inline Code* GetTargetAtAddress(Address address,
+ ConstantPoolArray* constant_pool);
+ static inline void SetTargetAtAddress(Address address, Code* target,
+ ConstantPoolArray* constant_pool);
+ static void OnTypeFeedbackChanged(Isolate* isolate, Address address,
+ State old_state, State new_state,
+ bool target_remains_ic_stub);
+ static void PostPatching(Address address, Code* target, Code* old_target);
+
+ // Compute the handler either by compiling or by retrieving a cached version.
+ Handle<Code> ComputeHandler(LookupIterator* lookup,
+ Handle<Object> value = Handle<Code>::null());
+ virtual Handle<Code> CompileHandler(LookupIterator* lookup,
+ Handle<Object> value,
+ CacheHolderFlag cache_holder) {
+ UNREACHABLE();
+ return Handle<Code>::null();
+ }
+
+ void UpdateMonomorphicIC(Handle<Code> handler, Handle<Name> name);
+ bool UpdatePolymorphicIC(Handle<Name> name, Handle<Code> code);
+ void UpdateMegamorphicCache(HeapType* type, Name* name, Code* code);
+
+ void CopyICToMegamorphicCache(Handle<Name> name);
+ bool IsTransitionOfMonomorphicTarget(Map* source_map, Map* target_map);
+ void PatchCache(Handle<Name> name, Handle<Code> code);
+ Code::Kind kind() const { return kind_; }
+ Code::Kind handler_kind() const {
+ if (kind_ == Code::KEYED_LOAD_IC) return Code::LOAD_IC;
+ DCHECK(kind_ == Code::LOAD_IC || kind_ == Code::STORE_IC ||
+ kind_ == Code::KEYED_STORE_IC);
+ return kind_;
+ }
+ virtual Handle<Code> megamorphic_stub() {
+ UNREACHABLE();
+ return Handle<Code>::null();
+ }
+
+ bool TryRemoveInvalidPrototypeDependentStub(Handle<Object> receiver,
+ Handle<String> name);
+
+ ExtraICState extra_ic_state() const { return extra_ic_state_; }
+ void set_extra_ic_state(ExtraICState state) { extra_ic_state_ = state; }
+
+ Handle<HeapType> receiver_type() { return receiver_type_; }
+ void update_receiver_type(Handle<Object> receiver) {
+ receiver_type_ = CurrentTypeOf(receiver, isolate_);
+ }
+
+ void TargetMaps(MapHandleList* list) {
+ FindTargetMaps();
+ for (int i = 0; i < target_maps_.length(); i++) {
+ list->Add(target_maps_.at(i));
+ }
+ }
+
+ void TargetTypes(TypeHandleList* list) {
+ FindTargetMaps();
+ for (int i = 0; i < target_maps_.length(); i++) {
+ list->Add(IC::MapToType<HeapType>(target_maps_.at(i), isolate_));
+ }
+ }
+
+ Map* FirstTargetMap() {
+ FindTargetMaps();
+ return target_maps_.length() > 0 ? *target_maps_.at(0) : NULL;
+ }
+
+ protected:
+ inline void UpdateTarget();
+
+ private:
+ inline Code* raw_target() const;
+ inline ConstantPoolArray* constant_pool() const;
+ inline ConstantPoolArray* raw_constant_pool() const;
+
+ void FindTargetMaps() {
+ if (target_maps_set_) return;
+ target_maps_set_ = true;
+ if (state_ == MONOMORPHIC) {
+ Map* map = target_->FindFirstMap();
+ if (map != NULL) target_maps_.Add(handle(map));
+ } else if (state_ != UNINITIALIZED && state_ != PREMONOMORPHIC) {
+ target_->FindAllMaps(&target_maps_);
+ }
+ }
+
+ // Frame pointer for the frame that uses (calls) the IC.
+ Address fp_;
+
+ // All access to the program counter of an IC structure is indirect
+ // to make the code GC safe. This feature is crucial since
+ // GetProperty and SetProperty are called and they in turn might
+ // invoke the garbage collector.
+ Address* pc_address_;
+
+ Isolate* isolate_;
+
+ // The constant pool of the code which originally called the IC (which might
+ // be for the breakpointed copy of the original code).
+ Handle<ConstantPoolArray> raw_constant_pool_;
+
+ // The original code target that missed.
+ Handle<Code> target_;
+ bool target_set_;
+ State state_;
+ Code::Kind kind_;
+ Handle<HeapType> receiver_type_;
+ MaybeHandle<Code> maybe_handler_;
+
+ ExtraICState extra_ic_state_;
+ MapHandleList target_maps_;
+ bool target_maps_set_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(IC);
+};
+
+
+// An IC_Utility encapsulates IC::UtilityId. It exists mainly because you
+// cannot make forward declarations to an enum.
+class IC_Utility {
+ public:
+ explicit IC_Utility(IC::UtilityId id)
+ : address_(IC::AddressFromUtilityId(id)), id_(id) {}
+
+ Address address() const { return address_; }
+
+ IC::UtilityId id() const { return id_; }
+
+ private:
+ Address address_;
+ IC::UtilityId id_;
+};
+
+
+class CallIC : public IC {
+ public:
+ enum CallType { METHOD, FUNCTION };
+
+ class State V8_FINAL BASE_EMBEDDED {
+ public:
+ explicit State(ExtraICState extra_ic_state);
+
+ State(int argc, CallType call_type) : argc_(argc), call_type_(call_type) {}
+
+ ExtraICState GetExtraICState() const;
+
+ static void GenerateAheadOfTime(Isolate*,
+ void (*Generate)(Isolate*, const State&));
+
+ int arg_count() const { return argc_; }
+ CallType call_type() const { return call_type_; }
+
+ bool CallAsMethod() const { return call_type_ == METHOD; }
+
+ private:
+ class ArgcBits : public BitField<int, 0, Code::kArgumentsBits> {};
+ class CallTypeBits : public BitField<CallType, Code::kArgumentsBits, 1> {};
+
+ const int argc_;
+ const CallType call_type_;
+ };
+
+ explicit CallIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) {}
+
+ void PatchMegamorphic(Handle<Object> function, Handle<FixedArray> vector,
+ Handle<Smi> slot);
+
+ void HandleMiss(Handle<Object> receiver, Handle<Object> function,
+ Handle<FixedArray> vector, Handle<Smi> slot);
+
+ // Returns true if a custom handler was installed.
+ bool DoCustomHandler(Handle<Object> receiver, Handle<Object> function,
+ Handle<FixedArray> vector, Handle<Smi> slot,
+ const State& state);
+
+ // Code generator routines.
+ static Handle<Code> initialize_stub(Isolate* isolate, int argc,
+ CallType call_type);
+
+ static void Clear(Isolate* isolate, Address address, Code* target,
+ ConstantPoolArray* constant_pool);
+
+ private:
+ inline IC::State FeedbackToState(Handle<FixedArray> vector,
+ Handle<Smi> slot) const;
+};
+
+
+OStream& operator<<(OStream& os, const CallIC::State& s);
+
+
+class LoadIC : public IC {
+ public:
+ enum ParameterIndices { kReceiverIndex, kNameIndex, kParameterCount };
+ static const Register ReceiverRegister();
+ static const Register NameRegister();
+
+ // With flag vector-ics, there is an additional argument. And for calls from
+ // crankshaft, yet another.
+ static const Register SlotRegister();
+ static const Register VectorRegister();
+
+ class State V8_FINAL BASE_EMBEDDED {
+ public:
+ explicit State(ExtraICState extra_ic_state) : state_(extra_ic_state) {}
+
+ explicit State(ContextualMode mode)
+ : state_(ContextualModeBits::encode(mode)) {}
+
+ ExtraICState GetExtraICState() const { return state_; }
+
+ ContextualMode contextual_mode() const {
+ return ContextualModeBits::decode(state_);
+ }
+
+ private:
+ class ContextualModeBits : public BitField<ContextualMode, 0, 1> {};
+ STATIC_ASSERT(static_cast<int>(NOT_CONTEXTUAL) == 0);
+
+ const ExtraICState state_;
+ };
+
+ static ExtraICState ComputeExtraICState(ContextualMode contextual_mode) {
+ return State(contextual_mode).GetExtraICState();
+ }
+
+ static ContextualMode GetContextualMode(ExtraICState state) {
+ return State(state).contextual_mode();
+ }
+
+ ContextualMode contextual_mode() const {
+ return GetContextualMode(extra_ic_state());
+ }
+
+ explicit LoadIC(FrameDepth depth, Isolate* isolate) : IC(depth, isolate) {
+ DCHECK(IsLoadStub());
+ }
+
+ // Returns if this IC is for contextual (no explicit receiver)
+ // access to properties.
+ bool IsUndeclaredGlobal(Handle<Object> receiver) {
+ if (receiver->IsGlobalObject()) {
+ return contextual_mode() == CONTEXTUAL;
+ } else {
+ DCHECK(contextual_mode() != CONTEXTUAL);
+ return false;
+ }
+ }
+
+ // Code generator routines.
+ static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
+ static void GeneratePreMonomorphic(MacroAssembler* masm) {
+ GenerateMiss(masm);
+ }
+ static void GenerateMiss(MacroAssembler* masm);
+ static void GenerateMegamorphic(MacroAssembler* masm);
+ static void GenerateNormal(MacroAssembler* masm);
+ static void GenerateRuntimeGetProperty(MacroAssembler* masm);
+
+ static Handle<Code> initialize_stub(Isolate* isolate,
+ ExtraICState extra_state);
+
+ MUST_USE_RESULT MaybeHandle<Object> Load(Handle<Object> object,
+ Handle<Name> name);
+
+ protected:
+ inline void set_target(Code* code);
+
+ Handle<Code> slow_stub() const {
+ if (kind() == Code::LOAD_IC) {
+ return isolate()->builtins()->LoadIC_Slow();
+ } else {
+ DCHECK_EQ(Code::KEYED_LOAD_IC, kind());
+ return isolate()->builtins()->KeyedLoadIC_Slow();
+ }
+ }
+
+ virtual Handle<Code> megamorphic_stub();
+
+ // Update the inline cache and the global stub cache based on the
+ // lookup result.
+ void UpdateCaches(LookupIterator* lookup);
+
+ virtual Handle<Code> CompileHandler(LookupIterator* lookup,
+ Handle<Object> unused,
+ CacheHolderFlag cache_holder);
+
+ private:
+ virtual Handle<Code> pre_monomorphic_stub() const;
+ static Handle<Code> pre_monomorphic_stub(Isolate* isolate,
+ ExtraICState extra_state);
+
+ Handle<Code> SimpleFieldLoad(FieldIndex index);
+
+ static void Clear(Isolate* isolate, Address address, Code* target,
+ ConstantPoolArray* constant_pool);
+
+ friend class IC;
+};
+
+
+class KeyedLoadIC : public LoadIC {
+ public:
+ explicit KeyedLoadIC(FrameDepth depth, Isolate* isolate)
+ : LoadIC(depth, isolate) {
+ DCHECK(target()->is_keyed_load_stub());
+ }
+
+ MUST_USE_RESULT MaybeHandle<Object> Load(Handle<Object> object,
+ Handle<Object> key);
+
+ // Code generator routines.
+ static void GenerateMiss(MacroAssembler* masm);
+ static void GenerateRuntimeGetProperty(MacroAssembler* masm);
+ static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
+ static void GeneratePreMonomorphic(MacroAssembler* masm) {
+ GenerateMiss(masm);
+ }
+ static void GenerateGeneric(MacroAssembler* masm);
+ static void GenerateString(MacroAssembler* masm);
+ static void GenerateIndexedInterceptor(MacroAssembler* masm);
+ static void GenerateSloppyArguments(MacroAssembler* masm);
+
+ // Bit mask to be tested against bit field for the cases when
+ // generic stub should go into slow case.
+ // Access check is necessary explicitly since generic stub does not perform
+ // map checks.
+ static const int kSlowCaseBitFieldMask =
+ (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor);
+
+ static Handle<Code> generic_stub(Isolate* isolate);
+ static Handle<Code> pre_monomorphic_stub(Isolate* isolate);
+
+ protected:
+ Handle<Code> LoadElementStub(Handle<JSObject> receiver);
+ virtual Handle<Code> pre_monomorphic_stub() const {
+ return pre_monomorphic_stub(isolate());
+ }
+
+ private:
+ Handle<Code> generic_stub() const { return generic_stub(isolate()); }
+ Handle<Code> indexed_interceptor_stub() {
+ return isolate()->builtins()->KeyedLoadIC_IndexedInterceptor();
+ }
+ Handle<Code> sloppy_arguments_stub() {
+ return isolate()->builtins()->KeyedLoadIC_SloppyArguments();
+ }
+ Handle<Code> string_stub() {
+ return isolate()->builtins()->KeyedLoadIC_String();
+ }
+
+ static void Clear(Isolate* isolate, Address address, Code* target,
+ ConstantPoolArray* constant_pool);
+
+ friend class IC;
+};
+
+
+class StoreIC : public IC {
+ public:
+ class StrictModeState : public BitField<StrictMode, 1, 1> {};
+ static ExtraICState ComputeExtraICState(StrictMode flag) {
+ return StrictModeState::encode(flag);
+ }
+ static StrictMode GetStrictMode(ExtraICState state) {
+ return StrictModeState::decode(state);
+ }
+
+ // For convenience, a statically declared encoding of strict mode extra
+ // IC state.
+ static const ExtraICState kStrictModeState = 1 << StrictModeState::kShift;
+
+ enum ParameterIndices {
+ kReceiverIndex,
+ kNameIndex,
+ kValueIndex,
+ kParameterCount
+ };
+ static const Register ReceiverRegister();
+ static const Register NameRegister();
+ static const Register ValueRegister();
+
+ StoreIC(FrameDepth depth, Isolate* isolate) : IC(depth, isolate) {
+ DCHECK(IsStoreStub());
+ }
+
+ StrictMode strict_mode() const {
+ return StrictModeState::decode(extra_ic_state());
+ }
+
+ // Code generators for stub routines. Only called once at startup.
+ static void GenerateSlow(MacroAssembler* masm);
+ static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
+ static void GeneratePreMonomorphic(MacroAssembler* masm) {
+ GenerateMiss(masm);
+ }
+ static void GenerateMiss(MacroAssembler* masm);
+ static void GenerateMegamorphic(MacroAssembler* masm);
+ static void GenerateNormal(MacroAssembler* masm);
+ static void GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictMode strict_mode);
+
+ static Handle<Code> initialize_stub(Isolate* isolate, StrictMode strict_mode);
+
+ MUST_USE_RESULT MaybeHandle<Object> Store(
+ Handle<Object> object, Handle<Name> name, Handle<Object> value,
+ JSReceiver::StoreFromKeyed store_mode =
+ JSReceiver::CERTAINLY_NOT_STORE_FROM_KEYED);
+
+ bool LookupForWrite(LookupIterator* it, Handle<Object> value,
+ JSReceiver::StoreFromKeyed store_mode);
+
+ protected:
+ virtual Handle<Code> megamorphic_stub();
+
+ // Stub accessors.
+ virtual Handle<Code> generic_stub() const;
+
+ virtual Handle<Code> slow_stub() const {
+ return isolate()->builtins()->StoreIC_Slow();
+ }
+
+ virtual Handle<Code> pre_monomorphic_stub() const {
+ return pre_monomorphic_stub(isolate(), strict_mode());
+ }
+
+ static Handle<Code> pre_monomorphic_stub(Isolate* isolate,
+ StrictMode strict_mode);
+
+ // Update the inline cache and the global stub cache based on the
+ // lookup result.
+ void UpdateCaches(LookupIterator* lookup, Handle<Object> value,
+ JSReceiver::StoreFromKeyed store_mode);
+ virtual Handle<Code> CompileHandler(LookupIterator* lookup,
+ Handle<Object> value,
+ CacheHolderFlag cache_holder);
+
+ private:
+ inline void set_target(Code* code);
+
+ static void Clear(Isolate* isolate, Address address, Code* target,
+ ConstantPoolArray* constant_pool);
+
+ friend class IC;
+};
+
+
+enum KeyedStoreCheckMap { kDontCheckMap, kCheckMap };
+
+
+enum KeyedStoreIncrementLength { kDontIncrementLength, kIncrementLength };
+
+
+class KeyedStoreIC : public StoreIC {
+ public:
+ // ExtraICState bits (building on IC)
+ // ExtraICState bits
+ class ExtraICStateKeyedAccessStoreMode
+ : public BitField<KeyedAccessStoreMode, 2, 4> {}; // NOLINT
+
+ static ExtraICState ComputeExtraICState(StrictMode flag,
+ KeyedAccessStoreMode mode) {
+ return StrictModeState::encode(flag) |
+ ExtraICStateKeyedAccessStoreMode::encode(mode);
+ }
+
+ static KeyedAccessStoreMode GetKeyedAccessStoreMode(
+ ExtraICState extra_state) {
+ return ExtraICStateKeyedAccessStoreMode::decode(extra_state);
+ }
+
+ // The map register isn't part of the normal call specification, but
+ // ElementsTransitionAndStoreStub, used in polymorphic keyed store
+ // stub implementations requires it to be initialized.
+ static const Register MapRegister();
+
+ KeyedStoreIC(FrameDepth depth, Isolate* isolate) : StoreIC(depth, isolate) {
+ DCHECK(target()->is_keyed_store_stub());
+ }
+
+ MUST_USE_RESULT MaybeHandle<Object> Store(Handle<Object> object,
+ Handle<Object> name,
+ Handle<Object> value);
+
+ // Code generators for stub routines. Only called once at startup.
+ static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
+ static void GeneratePreMonomorphic(MacroAssembler* masm) {
+ GenerateMiss(masm);
+ }
+ static void GenerateMiss(MacroAssembler* masm);
+ static void GenerateSlow(MacroAssembler* masm);
+ static void GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictMode strict_mode);
+ static void GenerateGeneric(MacroAssembler* masm, StrictMode strict_mode);
+ static void GenerateSloppyArguments(MacroAssembler* masm);
+
+ protected:
+ virtual Handle<Code> pre_monomorphic_stub() const {
+ return pre_monomorphic_stub(isolate(), strict_mode());
+ }
+ static Handle<Code> pre_monomorphic_stub(Isolate* isolate,
+ StrictMode strict_mode) {
+ if (strict_mode == STRICT) {
+ return isolate->builtins()->KeyedStoreIC_PreMonomorphic_Strict();
+ } else {
+ return isolate->builtins()->KeyedStoreIC_PreMonomorphic();
+ }
+ }
+ virtual Handle<Code> slow_stub() const {
+ return isolate()->builtins()->KeyedStoreIC_Slow();
+ }
+ virtual Handle<Code> megamorphic_stub() {
+ if (strict_mode() == STRICT) {
+ return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
+ } else {
+ return isolate()->builtins()->KeyedStoreIC_Generic();
+ }
+ }
+
+ Handle<Code> StoreElementStub(Handle<JSObject> receiver,
+ KeyedAccessStoreMode store_mode);
+
+ private:
+ inline void set_target(Code* code);
+
+ // Stub accessors.
+ virtual Handle<Code> generic_stub() const {
+ if (strict_mode() == STRICT) {
+ return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
+ } else {
+ return isolate()->builtins()->KeyedStoreIC_Generic();
+ }
+ }
+
+ Handle<Code> sloppy_arguments_stub() {
+ return isolate()->builtins()->KeyedStoreIC_SloppyArguments();
+ }
+
+ static void Clear(Isolate* isolate, Address address, Code* target,
+ ConstantPoolArray* constant_pool);
+
+ KeyedAccessStoreMode GetStoreMode(Handle<JSObject> receiver,
+ Handle<Object> key, Handle<Object> value);
+
+ Handle<Map> ComputeTransitionedMap(Handle<Map> map,
+ KeyedAccessStoreMode store_mode);
+
+ friend class IC;
+};
+
+
+// Mode to overwrite BinaryExpression values.
+enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT };
+
+// Type Recording BinaryOpIC, that records the types of the inputs and outputs.
+class BinaryOpIC : public IC {
+ public:
+ class State V8_FINAL BASE_EMBEDDED {
+ public:
+ State(Isolate* isolate, ExtraICState extra_ic_state);
+
+ State(Isolate* isolate, Token::Value op, OverwriteMode mode)
+ : op_(op),
+ mode_(mode),
+ left_kind_(NONE),
+ right_kind_(NONE),
+ result_kind_(NONE),
+ isolate_(isolate) {
+ DCHECK_LE(FIRST_TOKEN, op);
+ DCHECK_LE(op, LAST_TOKEN);
+ }
+
+ InlineCacheState GetICState() const {
+ if (Max(left_kind_, right_kind_) == NONE) {
+ return ::v8::internal::UNINITIALIZED;
+ }
+ if (Max(left_kind_, right_kind_) == GENERIC) {
+ return ::v8::internal::MEGAMORPHIC;
+ }
+ if (Min(left_kind_, right_kind_) == GENERIC) {
+ return ::v8::internal::GENERIC;
+ }
+ return ::v8::internal::MONOMORPHIC;
+ }
+
+ ExtraICState GetExtraICState() const;
+
+ static void GenerateAheadOfTime(Isolate*,
+ void (*Generate)(Isolate*, const State&));
+
+ bool CanReuseDoubleBox() const {
+ return (result_kind_ > SMI && result_kind_ <= NUMBER) &&
+ ((mode_ == OVERWRITE_LEFT && left_kind_ > SMI &&
+ left_kind_ <= NUMBER) ||
+ (mode_ == OVERWRITE_RIGHT && right_kind_ > SMI &&
+ right_kind_ <= NUMBER));
+ }
+
+ // Returns true if the IC _could_ create allocation mementos.
+ bool CouldCreateAllocationMementos() const {
+ if (left_kind_ == STRING || right_kind_ == STRING) {
+ DCHECK_EQ(Token::ADD, op_);
+ return true;
+ }
+ return false;
+ }
+
+ // Returns true if the IC _should_ create allocation mementos.
+ bool ShouldCreateAllocationMementos() const {
+ return FLAG_allocation_site_pretenuring &&
+ CouldCreateAllocationMementos();
+ }
+
+ bool HasSideEffects() const {
+ return Max(left_kind_, right_kind_) == GENERIC;
+ }
+
+ // Returns true if the IC should enable the inline smi code (i.e. if either
+ // parameter may be a smi).
+ bool UseInlinedSmiCode() const {
+ return KindMaybeSmi(left_kind_) || KindMaybeSmi(right_kind_);
+ }
+
+ static const int FIRST_TOKEN = Token::BIT_OR;
+ static const int LAST_TOKEN = Token::MOD;
+
+ Token::Value op() const { return op_; }
+ OverwriteMode mode() const { return mode_; }
+ Maybe<int> fixed_right_arg() const { return fixed_right_arg_; }
+
+ Type* GetLeftType(Zone* zone) const { return KindToType(left_kind_, zone); }
+ Type* GetRightType(Zone* zone) const {
+ return KindToType(right_kind_, zone);
+ }
+ Type* GetResultType(Zone* zone) const;
+
+ void Update(Handle<Object> left, Handle<Object> right,
+ Handle<Object> result);
+
+ Isolate* isolate() const { return isolate_; }
+
+ private:
+ friend OStream& operator<<(OStream& os, const BinaryOpIC::State& s);
+
+ enum Kind { NONE, SMI, INT32, NUMBER, STRING, GENERIC };
+
+ Kind UpdateKind(Handle<Object> object, Kind kind) const;
+
+ static const char* KindToString(Kind kind);
+ static Type* KindToType(Kind kind, Zone* zone);
+ static bool KindMaybeSmi(Kind kind) {
+ return (kind >= SMI && kind <= NUMBER) || kind == GENERIC;
+ }
+
+ // We truncate the last bit of the token.
+ STATIC_ASSERT(LAST_TOKEN - FIRST_TOKEN < (1 << 4));
+ class OpField : public BitField<int, 0, 4> {};
+ class OverwriteModeField : public BitField<OverwriteMode, 4, 2> {};
+ class ResultKindField : public BitField<Kind, 6, 3> {};
+ class LeftKindField : public BitField<Kind, 9, 3> {};
+ // When fixed right arg is set, we don't need to store the right kind.
+ // Thus the two fields can overlap.
+ class HasFixedRightArgField : public BitField<bool, 12, 1> {};
+ class FixedRightArgValueField : public BitField<int, 13, 4> {};
+ class RightKindField : public BitField<Kind, 13, 3> {};
+
+ Token::Value op_;
+ OverwriteMode mode_;
+ Kind left_kind_;
+ Kind right_kind_;
+ Kind result_kind_;
+ Maybe<int> fixed_right_arg_;
+ Isolate* isolate_;
+ };
+
+ explicit BinaryOpIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) {}
+
+ static Builtins::JavaScript TokenToJSBuiltin(Token::Value op);
+
+ MaybeHandle<Object> Transition(Handle<AllocationSite> allocation_site,
+ Handle<Object> left,
+ Handle<Object> right) V8_WARN_UNUSED_RESULT;
+};
+
+
+OStream& operator<<(OStream& os, const BinaryOpIC::State& s);
+
+
+class CompareIC : public IC {
+ public:
+ // The type/state lattice is defined by the following inequations:
+ // UNINITIALIZED < ...
+ // ... < GENERIC
+ // SMI < NUMBER
+ // INTERNALIZED_STRING < STRING
+ // KNOWN_OBJECT < OBJECT
+ enum State {
+ UNINITIALIZED,
+ SMI,
+ NUMBER,
+ STRING,
+ INTERNALIZED_STRING,
+ UNIQUE_NAME, // Symbol or InternalizedString
+ OBJECT, // JSObject
+ KNOWN_OBJECT, // JSObject with specific map (faster check)
+ GENERIC
+ };
+
+ static State NewInputState(State old_state, Handle<Object> value);
+
+ static Type* StateToType(Zone* zone, State state,
+ Handle<Map> map = Handle<Map>());
+
+ static void StubInfoToType(uint32_t stub_key, Type** left_type,
+ Type** right_type, Type** overall_type,
+ Handle<Map> map, Zone* zone);
+
+ CompareIC(Isolate* isolate, Token::Value op)
+ : IC(EXTRA_CALL_FRAME, isolate), op_(op) {}
+
+ // Update the inline cache for the given operands.
+ Code* UpdateCaches(Handle<Object> x, Handle<Object> y);
+
+
+ // Factory method for getting an uninitialized compare stub.
+ static Handle<Code> GetUninitialized(Isolate* isolate, Token::Value op);
+
+ // Helper function for computing the condition for a compare operation.
+ static Condition ComputeCondition(Token::Value op);
+
+ static const char* GetStateName(State state);
+
+ private:
+ static bool HasInlinedSmiCode(Address address);
+
+ State TargetState(State old_state, State old_left, State old_right,
+ bool has_inlined_smi_code, Handle<Object> x,
+ Handle<Object> y);
+
+ bool strict() const { return op_ == Token::EQ_STRICT; }
+ Condition GetCondition() const { return ComputeCondition(op_); }
+
+ static Code* GetRawUninitialized(Isolate* isolate, Token::Value op);
+
+ static void Clear(Isolate* isolate, Address address, Code* target,
+ ConstantPoolArray* constant_pool);
+
+ Token::Value op_;
+
+ friend class IC;
+};
+
+
+class CompareNilIC : public IC {
+ public:
+ explicit CompareNilIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) {}
+
+ Handle<Object> CompareNil(Handle<Object> object);
+
+ static Handle<Code> GetUninitialized();
+
+ static void Clear(Address address, Code* target,
+ ConstantPoolArray* constant_pool);
+
+ static Handle<Object> DoCompareNilSlow(Isolate* isolate, NilValue nil,
+ Handle<Object> object);
+};
+
+
+class ToBooleanIC : public IC {
+ public:
+ explicit ToBooleanIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) {}
+
+ Handle<Object> ToBoolean(Handle<Object> object);
+};
+
+
+// Helper for BinaryOpIC and CompareIC.
+enum InlinedSmiCheck { ENABLE_INLINED_SMI_CHECK, DISABLE_INLINED_SMI_CHECK };
+void PatchInlinedSmiCode(Address address, InlinedSmiCheck check);
+
+DECLARE_RUNTIME_FUNCTION(KeyedLoadIC_MissFromStubFailure);
+DECLARE_RUNTIME_FUNCTION(KeyedStoreIC_MissFromStubFailure);
+DECLARE_RUNTIME_FUNCTION(UnaryOpIC_Miss);
+DECLARE_RUNTIME_FUNCTION(StoreIC_MissFromStubFailure);
+DECLARE_RUNTIME_FUNCTION(ElementsTransitionAndStoreIC_Miss);
+DECLARE_RUNTIME_FUNCTION(BinaryOpIC_Miss);
+DECLARE_RUNTIME_FUNCTION(BinaryOpIC_MissWithAllocationSite);
+DECLARE_RUNTIME_FUNCTION(CompareNilIC_Miss);
+DECLARE_RUNTIME_FUNCTION(ToBooleanIC_Miss);
+
+// Support functions for callbacks handlers.
+DECLARE_RUNTIME_FUNCTION(StoreCallbackProperty);
+
+// Support functions for interceptor handlers.
+DECLARE_RUNTIME_FUNCTION(LoadPropertyWithInterceptorOnly);
+DECLARE_RUNTIME_FUNCTION(LoadPropertyWithInterceptor);
+DECLARE_RUNTIME_FUNCTION(LoadElementWithInterceptor);
+DECLARE_RUNTIME_FUNCTION(StorePropertyWithInterceptor);
+}
+} // namespace v8::internal
+
+#endif // V8_IC_H_
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/ic/stub-cache.h"
+#include "src/type-info.h"
+
+namespace v8 {
+namespace internal {
+
+
+StubCache::StubCache(Isolate* isolate) : isolate_(isolate) {}
+
+
+void StubCache::Initialize() {
+ DCHECK(IsPowerOf2(kPrimaryTableSize));
+ DCHECK(IsPowerOf2(kSecondaryTableSize));
+ Clear();
+}
+
+
+static Code::Flags CommonStubCacheChecks(Name* name, Map* map,
+ Code::Flags flags) {
+ flags = Code::RemoveTypeAndHolderFromFlags(flags);
+
+ // Validate that the name does not move on scavenge, and that we
+ // can use identity checks instead of structural equality checks.
+ DCHECK(!name->GetHeap()->InNewSpace(name));
+ DCHECK(name->IsUniqueName());
+
+ // The state bits are not important to the hash function because the stub
+ // cache only contains handlers. Make sure that the bits are the least
+ // significant so they will be the ones masked out.
+ DCHECK_EQ(Code::HANDLER, Code::ExtractKindFromFlags(flags));
+ STATIC_ASSERT((Code::ICStateField::kMask & 1) == 1);
+
+ // Make sure that the code type and cache holder are not included in the hash.
+ DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
+ DCHECK(Code::ExtractCacheHolderFromFlags(flags) == 0);
+
+ return flags;
+}
+
+
+Code* StubCache::Set(Name* name, Map* map, Code* code) {
+ Code::Flags flags = CommonStubCacheChecks(name, map, code->flags());
+
+ // Compute the primary entry.
+ int primary_offset = PrimaryOffset(name, flags, map);
+ Entry* primary = entry(primary_, primary_offset);
+ Code* old_code = primary->value;
+
+ // If the primary entry has useful data in it, we retire it to the
+ // secondary cache before overwriting it.
+ if (old_code != isolate_->builtins()->builtin(Builtins::kIllegal)) {
+ Map* old_map = primary->map;
+ Code::Flags old_flags =
+ Code::RemoveTypeAndHolderFromFlags(old_code->flags());
+ int seed = PrimaryOffset(primary->key, old_flags, old_map);
+ int secondary_offset = SecondaryOffset(primary->key, old_flags, seed);
+ Entry* secondary = entry(secondary_, secondary_offset);
+ *secondary = *primary;
+ }
+
+ // Update primary cache.
+ primary->key = name;
+ primary->value = code;
+ primary->map = map;
+ isolate()->counters()->megamorphic_stub_cache_updates()->Increment();
+ return code;
+}
+
+
+Code* StubCache::Get(Name* name, Map* map, Code::Flags flags) {
+ flags = CommonStubCacheChecks(name, map, flags);
+ int primary_offset = PrimaryOffset(name, flags, map);
+ Entry* primary = entry(primary_, primary_offset);
+ if (primary->key == name && primary->map == map) {
+ return primary->value;
+ }
+ int secondary_offset = SecondaryOffset(name, flags, primary_offset);
+ Entry* secondary = entry(secondary_, secondary_offset);
+ if (secondary->key == name && secondary->map == map) {
+ return secondary->value;
+ }
+ return NULL;
+}
+
+
+void StubCache::Clear() {
+ Code* empty = isolate_->builtins()->builtin(Builtins::kIllegal);
+ for (int i = 0; i < kPrimaryTableSize; i++) {
+ primary_[i].key = isolate()->heap()->empty_string();
+ primary_[i].map = NULL;
+ primary_[i].value = empty;
+ }
+ for (int j = 0; j < kSecondaryTableSize; j++) {
+ secondary_[j].key = isolate()->heap()->empty_string();
+ secondary_[j].map = NULL;
+ secondary_[j].value = empty;
+ }
+}
+
+
+void StubCache::CollectMatchingMaps(SmallMapList* types, Handle<Name> name,
+ Code::Flags flags,
+ Handle<Context> native_context,
+ Zone* zone) {
+ for (int i = 0; i < kPrimaryTableSize; i++) {
+ if (primary_[i].key == *name) {
+ Map* map = primary_[i].map;
+ // Map can be NULL, if the stub is constant function call
+ // with a primitive receiver.
+ if (map == NULL) continue;
+
+ int offset = PrimaryOffset(*name, flags, map);
+ if (entry(primary_, offset) == &primary_[i] &&
+ !TypeFeedbackOracle::CanRetainOtherContext(map, *native_context)) {
+ types->AddMapIfMissing(Handle<Map>(map), zone);
+ }
+ }
+ }
+
+ for (int i = 0; i < kSecondaryTableSize; i++) {
+ if (secondary_[i].key == *name) {
+ Map* map = secondary_[i].map;
+ // Map can be NULL, if the stub is constant function call
+ // with a primitive receiver.
+ if (map == NULL) continue;
+
+ // Lookup in primary table and skip duplicates.
+ int primary_offset = PrimaryOffset(*name, flags, map);
+
+ // Lookup in secondary table and add matches.
+ int offset = SecondaryOffset(*name, flags, primary_offset);
+ if (entry(secondary_, offset) == &secondary_[i] &&
+ !TypeFeedbackOracle::CanRetainOtherContext(map, *native_context)) {
+ types->AddMapIfMissing(Handle<Map>(map), zone);
+ }
+ }
+ }
+}
+}
+} // namespace v8::internal
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_STUB_CACHE_H_
+#define V8_STUB_CACHE_H_
+
+#include "src/macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+
+// The stub cache is used for megamorphic property accesses.
+// It maps (map, name, type) to property access handlers. The cache does not
+// need explicit invalidation when a prototype chain is modified, since the
+// handlers verify the chain.
+
+
+class SCTableReference {
+ public:
+ Address address() const { return address_; }
+
+ private:
+ explicit SCTableReference(Address address) : address_(address) {}
+
+ Address address_;
+
+ friend class StubCache;
+};
+
+
+class StubCache {
+ public:
+ struct Entry {
+ Name* key;
+ Code* value;
+ Map* map;
+ };
+
+ void Initialize();
+ // Access cache for entry hash(name, map).
+ Code* Set(Name* name, Map* map, Code* code);
+ Code* Get(Name* name, Map* map, Code::Flags flags);
+ // Clear the lookup table (@ mark compact collection).
+ void Clear();
+ // Collect all maps that match the name and flags.
+ void CollectMatchingMaps(SmallMapList* types, Handle<Name> name,
+ Code::Flags flags, Handle<Context> native_context,
+ Zone* zone);
+ // Generate code for probing the stub cache table.
+ // Arguments extra, extra2 and extra3 may be used to pass additional scratch
+ // registers. Set to no_reg if not needed.
+ void GenerateProbe(MacroAssembler* masm, Code::Flags flags, Register receiver,
+ Register name, Register scratch, Register extra,
+ Register extra2 = no_reg, Register extra3 = no_reg);
+
+ enum Table { kPrimary, kSecondary };
+
+ SCTableReference key_reference(StubCache::Table table) {
+ return SCTableReference(
+ reinterpret_cast<Address>(&first_entry(table)->key));
+ }
+
+ SCTableReference map_reference(StubCache::Table table) {
+ return SCTableReference(
+ reinterpret_cast<Address>(&first_entry(table)->map));
+ }
+
+ SCTableReference value_reference(StubCache::Table table) {
+ return SCTableReference(
+ reinterpret_cast<Address>(&first_entry(table)->value));
+ }
+
+ StubCache::Entry* first_entry(StubCache::Table table) {
+ switch (table) {
+ case StubCache::kPrimary:
+ return StubCache::primary_;
+ case StubCache::kSecondary:
+ return StubCache::secondary_;
+ }
+ UNREACHABLE();
+ return NULL;
+ }
+
+ Isolate* isolate() { return isolate_; }
+
+ // Setting the entry size such that the index is shifted by Name::kHashShift
+ // is convenient; shifting down the length field (to extract the hash code)
+ // automatically discards the hash bit field.
+ static const int kCacheIndexShift = Name::kHashShift;
+
+ private:
+ explicit StubCache(Isolate* isolate);
+
+ // The stub cache has a primary and secondary level. The two levels have
+ // different hashing algorithms in order to avoid simultaneous collisions
+ // in both caches. Unlike a probing strategy (quadratic or otherwise) the
+ // update strategy on updates is fairly clear and simple: Any existing entry
+ // in the primary cache is moved to the secondary cache, and secondary cache
+ // entries are overwritten.
+
+ // Hash algorithm for the primary table. This algorithm is replicated in
+ // assembler for every architecture. Returns an index into the table that
+ // is scaled by 1 << kCacheIndexShift.
+ static int PrimaryOffset(Name* name, Code::Flags flags, Map* map) {
+ STATIC_ASSERT(kCacheIndexShift == Name::kHashShift);
+ // Compute the hash of the name (use entire hash field).
+ DCHECK(name->HasHashCode());
+ uint32_t field = name->hash_field();
+ // Using only the low bits in 64-bit mode is unlikely to increase the
+ // risk of collision even if the heap is spread over an area larger than
+ // 4Gb (and not at all if it isn't).
+ uint32_t map_low32bits =
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map));
+ // We always set the in_loop bit to zero when generating the lookup code
+ // so do it here too so the hash codes match.
+ uint32_t iflags =
+ (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
+ // Base the offset on a simple combination of name, flags, and map.
+ uint32_t key = (map_low32bits + field) ^ iflags;
+ return key & ((kPrimaryTableSize - 1) << kCacheIndexShift);
+ }
+
+ // Hash algorithm for the secondary table. This algorithm is replicated in
+ // assembler for every architecture. Returns an index into the table that
+ // is scaled by 1 << kCacheIndexShift.
+ static int SecondaryOffset(Name* name, Code::Flags flags, int seed) {
+ // Use the seed from the primary cache in the secondary cache.
+ uint32_t name_low32bits =
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name));
+ // We always set the in_loop bit to zero when generating the lookup code
+ // so do it here too so the hash codes match.
+ uint32_t iflags =
+ (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
+ uint32_t key = (seed - name_low32bits) + iflags;
+ return key & ((kSecondaryTableSize - 1) << kCacheIndexShift);
+ }
+
+ // Compute the entry for a given offset in exactly the same way as
+ // we do in generated code. We generate an hash code that already
+ // ends in Name::kHashShift 0s. Then we multiply it so it is a multiple
+ // of sizeof(Entry). This makes it easier to avoid making mistakes
+ // in the hashed offset computations.
+ static Entry* entry(Entry* table, int offset) {
+ const int multiplier = sizeof(*table) >> Name::kHashShift;
+ return reinterpret_cast<Entry*>(reinterpret_cast<Address>(table) +
+ offset * multiplier);
+ }
+
+ static const int kPrimaryTableBits = 11;
+ static const int kPrimaryTableSize = (1 << kPrimaryTableBits);
+ static const int kSecondaryTableBits = 9;
+ static const int kSecondaryTableSize = (1 << kSecondaryTableBits);
+
+ Entry primary_[kPrimaryTableSize];
+ Entry secondary_[kSecondaryTableSize];
+ Isolate* isolate_;
+
+ friend class Isolate;
+ friend class SCTableReference;
+
+ DISALLOW_COPY_AND_ASSIGN(StubCache);
+};
+}
+} // namespace v8::internal
+
+#endif // V8_STUB_CACHE_H_
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_X64
+
+#include "src/ic/ic-compiler.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
+ MacroAssembler* masm, Label* miss_label, Register receiver,
+ Handle<Name> name, Register scratch0, Register scratch1) {
+ DCHECK(name->IsUniqueName());
+ DCHECK(!receiver.is(scratch0));
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->negative_lookups(), 1);
+ __ IncrementCounter(counters->negative_lookups_miss(), 1);
+
+ __ movp(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
+
+ const int kInterceptorOrAccessCheckNeededMask =
+ (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
+
+ // Bail out if the receiver has a named interceptor or requires access checks.
+ __ testb(FieldOperand(scratch0, Map::kBitFieldOffset),
+ Immediate(kInterceptorOrAccessCheckNeededMask));
+ __ j(not_zero, miss_label);
+
+ // Check that receiver is a JSObject.
+ __ CmpInstanceType(scratch0, FIRST_SPEC_OBJECT_TYPE);
+ __ j(below, miss_label);
+
+ // Load properties array.
+ Register properties = scratch0;
+ __ movp(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
+
+ // Check that the properties array is a dictionary.
+ __ CompareRoot(FieldOperand(properties, HeapObject::kMapOffset),
+ Heap::kHashTableMapRootIndex);
+ __ j(not_equal, miss_label);
+
+ Label done;
+ NameDictionaryLookupStub::GenerateNegativeLookup(masm, miss_label, &done,
+ properties, name, scratch1);
+ __ bind(&done);
+ __ DecrementCounter(counters->negative_lookups_miss(), 1);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
+ MacroAssembler* masm, int index, Register prototype, Label* miss) {
+ Isolate* isolate = masm->isolate();
+ // Get the global function with the given index.
+ Handle<JSFunction> function(
+ JSFunction::cast(isolate->native_context()->get(index)));
+
+ // Check we're still in the same context.
+ Register scratch = prototype;
+ const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
+ __ movp(scratch, Operand(rsi, offset));
+ __ movp(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
+ __ Cmp(Operand(scratch, Context::SlotOffset(index)), function);
+ __ j(not_equal, miss);
+
+ // Load its initial map. The global functions all have initial maps.
+ __ Move(prototype, Handle<Map>(function->initial_map()));
+ // Load the prototype from the initial map.
+ __ movp(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
+ MacroAssembler* masm, Register receiver, Register result, Register scratch,
+ Label* miss_label) {
+ __ TryGetFunctionPrototype(receiver, result, miss_label);
+ if (!result.is(rax)) __ movp(rax, result);
+ __ ret(0);
+}
+
+
+static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
+ Register holder, Register name,
+ Handle<JSObject> holder_obj) {
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex == 1);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 2);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 3);
+ STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 4);
+ __ Push(name);
+ Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
+ DCHECK(!masm->isolate()->heap()->InNewSpace(*interceptor));
+ __ Move(kScratchRegister, interceptor);
+ __ Push(kScratchRegister);
+ __ Push(receiver);
+ __ Push(holder);
+}
+
+
+static void CompileCallLoadPropertyWithInterceptor(
+ MacroAssembler* masm, Register receiver, Register holder, Register name,
+ Handle<JSObject> holder_obj, IC::UtilityId id) {
+ PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+ __ CallExternalReference(ExternalReference(IC_Utility(id), masm->isolate()),
+ NamedLoadHandlerCompiler::kInterceptorArgsLength);
+}
+
+
+// Generate call to api function.
+void PropertyHandlerCompiler::GenerateFastApiCall(
+ MacroAssembler* masm, const CallOptimization& optimization,
+ Handle<Map> receiver_map, Register receiver, Register scratch_in,
+ bool is_store, int argc, Register* values) {
+ DCHECK(optimization.is_simple_api_call());
+
+ __ PopReturnAddressTo(scratch_in);
+ // receiver
+ __ Push(receiver);
+ // Write the arguments to stack frame.
+ for (int i = 0; i < argc; i++) {
+ Register arg = values[argc - 1 - i];
+ DCHECK(!receiver.is(arg));
+ DCHECK(!scratch_in.is(arg));
+ __ Push(arg);
+ }
+ __ PushReturnAddressFrom(scratch_in);
+ // Stack now matches JSFunction abi.
+
+ // Abi for CallApiFunctionStub.
+ Register callee = rax;
+ Register call_data = rbx;
+ Register holder = rcx;
+ Register api_function_address = rdx;
+ Register scratch = rdi; // scratch_in is no longer valid.
+
+ // Put holder in place.
+ CallOptimization::HolderLookup holder_lookup;
+ Handle<JSObject> api_holder =
+ optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
+ switch (holder_lookup) {
+ case CallOptimization::kHolderIsReceiver:
+ __ Move(holder, receiver);
+ break;
+ case CallOptimization::kHolderFound:
+ __ Move(holder, api_holder);
+ break;
+ case CallOptimization::kHolderNotFound:
+ UNREACHABLE();
+ break;
+ }
+
+ Isolate* isolate = masm->isolate();
+ Handle<JSFunction> function = optimization.constant_function();
+ Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+ Handle<Object> call_data_obj(api_call_info->data(), isolate);
+
+ // Put callee in place.
+ __ Move(callee, function);
+
+ bool call_data_undefined = false;
+ // Put call_data in place.
+ if (isolate->heap()->InNewSpace(*call_data_obj)) {
+ __ Move(scratch, api_call_info);
+ __ movp(call_data, FieldOperand(scratch, CallHandlerInfo::kDataOffset));
+ } else if (call_data_obj->IsUndefined()) {
+ call_data_undefined = true;
+ __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
+ } else {
+ __ Move(call_data, call_data_obj);
+ }
+
+ // Put api_function_address in place.
+ Address function_address = v8::ToCData<Address>(api_call_info->callback());
+ __ Move(api_function_address, function_address,
+ RelocInfo::EXTERNAL_REFERENCE);
+
+ // Jump to stub.
+ CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc);
+ __ TailCallStub(&stub);
+}
+
+
+void PropertyHandlerCompiler::GenerateCheckPropertyCell(
+ MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
+ Register scratch, Label* miss) {
+ Handle<PropertyCell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
+ DCHECK(cell->value()->IsTheHole());
+ __ Move(scratch, cell);
+ __ Cmp(FieldOperand(scratch, Cell::kValueOffset),
+ masm->isolate()->factory()->the_hole_value());
+ __ j(not_equal, miss);
+}
+
+
+void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
+ Handle<Code> code) {
+ __ jmp(code, RelocInfo::CODE_TARGET);
+}
+
+
+#undef __
+#define __ ACCESS_MASM((masm()))
+
+
+void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
+ Handle<Name> name) {
+ if (!label->is_unused()) {
+ __ bind(label);
+ __ Move(this->name(), name);
+ }
+}
+
+
+// Receiver_reg is preserved on jumps to miss_label, but may be destroyed if
+// store is successful.
+void NamedStoreHandlerCompiler::GenerateStoreTransition(
+ Handle<Map> transition, Handle<Name> name, Register receiver_reg,
+ Register storage_reg, Register value_reg, Register scratch1,
+ Register scratch2, Register unused, Label* miss_label, Label* slow) {
+ int descriptor = transition->LastAdded();
+ DescriptorArray* descriptors = transition->instance_descriptors();
+ PropertyDetails details = descriptors->GetDetails(descriptor);
+ Representation representation = details.representation();
+ DCHECK(!representation.IsNone());
+
+ if (details.type() == CONSTANT) {
+ Handle<Object> constant(descriptors->GetValue(descriptor), isolate());
+ __ Cmp(value_reg, constant);
+ __ j(not_equal, miss_label);
+ } else if (representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_label);
+ } else if (representation.IsHeapObject()) {
+ __ JumpIfSmi(value_reg, miss_label);
+ HeapType* field_type = descriptors->GetFieldType(descriptor);
+ HeapType::Iterator<Map> it = field_type->Classes();
+ if (!it.Done()) {
+ Label do_store;
+ while (true) {
+ __ CompareMap(value_reg, it.Current());
+ it.Advance();
+ if (it.Done()) {
+ __ j(not_equal, miss_label);
+ break;
+ }
+ __ j(equal, &do_store, Label::kNear);
+ }
+ __ bind(&do_store);
+ }
+ } else if (representation.IsDouble()) {
+ Label do_store, heap_number;
+ __ AllocateHeapNumber(storage_reg, scratch1, slow, MUTABLE);
+
+ __ JumpIfNotSmi(value_reg, &heap_number);
+ __ SmiToInteger32(scratch1, value_reg);
+ __ Cvtlsi2sd(xmm0, scratch1);
+ __ jmp(&do_store);
+
+ __ bind(&heap_number);
+ __ CheckMap(value_reg, isolate()->factory()->heap_number_map(), miss_label,
+ DONT_DO_SMI_CHECK);
+ __ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
+
+ __ bind(&do_store);
+ __ movsd(FieldOperand(storage_reg, HeapNumber::kValueOffset), xmm0);
+ }
+
+ // Stub never generated for objects that require access checks.
+ DCHECK(!transition->is_access_check_needed());
+
+ // Perform map transition for the receiver if necessary.
+ if (details.type() == FIELD &&
+ Map::cast(transition->GetBackPointer())->unused_property_fields() == 0) {
+ // The properties must be extended before we can store the value.
+ // We jump to a runtime call that extends the properties array.
+ __ PopReturnAddressTo(scratch1);
+ __ Push(receiver_reg);
+ __ Push(transition);
+ __ Push(value_reg);
+ __ PushReturnAddressFrom(scratch1);
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
+ isolate()),
+ 3, 1);
+ return;
+ }
+
+ // Update the map of the object.
+ __ Move(scratch1, transition);
+ __ movp(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
+
+ // Update the write barrier for the map field.
+ __ RecordWriteField(receiver_reg, HeapObject::kMapOffset, scratch1, scratch2,
+ kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ if (details.type() == CONSTANT) {
+ DCHECK(value_reg.is(rax));
+ __ ret(0);
+ return;
+ }
+
+ int index = transition->instance_descriptors()->GetFieldIndex(
+ transition->LastAdded());
+
+ // Adjust for the number of properties stored in the object. Even in the
+ // face of a transition we can use the old map here because the size of the
+ // object and the number of in-object properties is not going to change.
+ index -= transition->inobject_properties();
+
+ // TODO(verwaest): Share this code as a code stub.
+ SmiCheck smi_check =
+ representation.IsTagged() ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
+ if (index < 0) {
+ // Set the property straight into the object.
+ int offset = transition->instance_size() + (index * kPointerSize);
+ if (representation.IsDouble()) {
+ __ movp(FieldOperand(receiver_reg, offset), storage_reg);
+ } else {
+ __ movp(FieldOperand(receiver_reg, offset), value_reg);
+ }
+
+ if (!representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ if (!representation.IsDouble()) {
+ __ movp(storage_reg, value_reg);
+ }
+ __ RecordWriteField(receiver_reg, offset, storage_reg, scratch1,
+ kDontSaveFPRegs, EMIT_REMEMBERED_SET, smi_check);
+ }
+ } else {
+ // Write to the properties array.
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ // Get the properties array (optimistically).
+ __ movp(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
+ if (representation.IsDouble()) {
+ __ movp(FieldOperand(scratch1, offset), storage_reg);
+ } else {
+ __ movp(FieldOperand(scratch1, offset), value_reg);
+ }
+
+ if (!representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ if (!representation.IsDouble()) {
+ __ movp(storage_reg, value_reg);
+ }
+ __ RecordWriteField(scratch1, offset, storage_reg, receiver_reg,
+ kDontSaveFPRegs, EMIT_REMEMBERED_SET, smi_check);
+ }
+ }
+
+ // Return the value (register rax).
+ DCHECK(value_reg.is(rax));
+ __ ret(0);
+}
+
+
+void NamedStoreHandlerCompiler::GenerateStoreField(LookupIterator* lookup,
+ Register value_reg,
+ Label* miss_label) {
+ DCHECK(lookup->representation().IsHeapObject());
+ __ JumpIfSmi(value_reg, miss_label);
+ HeapType::Iterator<Map> it = lookup->GetFieldType()->Classes();
+ Label do_store;
+ while (true) {
+ __ CompareMap(value_reg, it.Current());
+ it.Advance();
+ if (it.Done()) {
+ __ j(not_equal, miss_label);
+ break;
+ }
+ __ j(equal, &do_store, Label::kNear);
+ }
+ __ bind(&do_store);
+
+ StoreFieldStub stub(isolate(), lookup->GetFieldIndex(),
+ lookup->representation());
+ GenerateTailCall(masm(), stub.GetCode());
+}
+
+
+Register PropertyHandlerCompiler::CheckPrototypes(
+ Register object_reg, Register holder_reg, Register scratch1,
+ Register scratch2, Handle<Name> name, Label* miss,
+ PrototypeCheckType check) {
+ Handle<Map> receiver_map(IC::TypeToMap(*type(), isolate()));
+
+ // Make sure there's no overlap between holder and object registers.
+ DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
+ DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) &&
+ !scratch2.is(scratch1));
+
+ // Keep track of the current object in register reg. On the first
+ // iteration, reg is an alias for object_reg, on later iterations,
+ // it is an alias for holder_reg.
+ Register reg = object_reg;
+ int depth = 0;
+
+ Handle<JSObject> current = Handle<JSObject>::null();
+ if (type()->IsConstant()) {
+ current = Handle<JSObject>::cast(type()->AsConstant()->Value());
+ }
+ Handle<JSObject> prototype = Handle<JSObject>::null();
+ Handle<Map> current_map = receiver_map;
+ Handle<Map> holder_map(holder()->map());
+ // Traverse the prototype chain and check the maps in the prototype chain for
+ // fast and global objects or do negative lookup for normal objects.
+ while (!current_map.is_identical_to(holder_map)) {
+ ++depth;
+
+ // Only global objects and objects that do not require access
+ // checks are allowed in stubs.
+ DCHECK(current_map->IsJSGlobalProxyMap() ||
+ !current_map->is_access_check_needed());
+
+ prototype = handle(JSObject::cast(current_map->prototype()));
+ if (current_map->is_dictionary_map() &&
+ !current_map->IsJSGlobalObjectMap()) {
+ DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
+ if (!name->IsUniqueName()) {
+ DCHECK(name->IsString());
+ name = factory()->InternalizeString(Handle<String>::cast(name));
+ }
+ DCHECK(current.is_null() ||
+ current->property_dictionary()->FindEntry(name) ==
+ NameDictionary::kNotFound);
+
+ GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
+ scratch2);
+
+ __ movp(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
+ reg = holder_reg; // From now on the object will be in holder_reg.
+ __ movp(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
+ } else {
+ bool in_new_space = heap()->InNewSpace(*prototype);
+ // Two possible reasons for loading the prototype from the map:
+ // (1) Can't store references to new space in code.
+ // (2) Handler is shared for all receivers with the same prototype
+ // map (but not necessarily the same prototype instance).
+ bool load_prototype_from_map = in_new_space || depth == 1;
+ if (load_prototype_from_map) {
+ // Save the map in scratch1 for later.
+ __ movp(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
+ }
+ if (depth != 1 || check == CHECK_ALL_MAPS) {
+ __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
+ }
+
+ // Check access rights to the global object. This has to happen after
+ // the map check so that we know that the object is actually a global
+ // object.
+ // This allows us to install generated handlers for accesses to the
+ // global proxy (as opposed to using slow ICs). See corresponding code
+ // in LookupForRead().
+ if (current_map->IsJSGlobalProxyMap()) {
+ __ CheckAccessGlobalProxy(reg, scratch2, miss);
+ } else if (current_map->IsJSGlobalObjectMap()) {
+ GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
+ name, scratch2, miss);
+ }
+ reg = holder_reg; // From now on the object will be in holder_reg.
+
+ if (load_prototype_from_map) {
+ __ movp(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
+ } else {
+ __ Move(reg, prototype);
+ }
+ }
+
+ // Go to the next object in the prototype chain.
+ current = prototype;
+ current_map = handle(current->map());
+ }
+
+ // Log the check depth.
+ LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
+
+ if (depth != 0 || check == CHECK_ALL_MAPS) {
+ // Check the holder map.
+ __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
+ }
+
+ // Perform security check for access to the global object.
+ DCHECK(current_map->IsJSGlobalProxyMap() ||
+ !current_map->is_access_check_needed());
+ if (current_map->IsJSGlobalProxyMap()) {
+ __ CheckAccessGlobalProxy(reg, scratch1, miss);
+ }
+
+ // Return the register containing the holder.
+ return reg;
+}
+
+
+void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
+ if (!miss->is_unused()) {
+ Label success;
+ __ jmp(&success);
+ __ bind(miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+ __ bind(&success);
+ }
+}
+
+
+void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
+ if (!miss->is_unused()) {
+ Label success;
+ __ jmp(&success);
+ GenerateRestoreName(miss, name);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+ __ bind(&success);
+ }
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadCallback(
+ Register reg, Handle<ExecutableAccessorInfo> callback) {
+ // Insert additional parameters into the stack frame above return address.
+ DCHECK(!scratch4().is(reg));
+ __ PopReturnAddressTo(scratch4());
+
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
+ __ Push(receiver()); // receiver
+ if (heap()->InNewSpace(callback->data())) {
+ DCHECK(!scratch2().is(reg));
+ __ Move(scratch2(), callback);
+ __ Push(FieldOperand(scratch2(),
+ ExecutableAccessorInfo::kDataOffset)); // data
+ } else {
+ __ Push(Handle<Object>(callback->data(), isolate()));
+ }
+ DCHECK(!kScratchRegister.is(reg));
+ __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
+ __ Push(kScratchRegister); // return value
+ __ Push(kScratchRegister); // return value default
+ __ PushAddress(ExternalReference::isolate_address(isolate()));
+ __ Push(reg); // holder
+ __ Push(name()); // name
+ // Save a pointer to where we pushed the arguments pointer. This will be
+ // passed as the const PropertyAccessorInfo& to the C++ callback.
+
+ __ PushReturnAddressFrom(scratch4());
+
+ // Abi for CallApiGetter
+ Register api_function_address = r8;
+ Address getter_address = v8::ToCData<Address>(callback->getter());
+ __ Move(api_function_address, getter_address, RelocInfo::EXTERNAL_REFERENCE);
+
+ CallApiGetterStub stub(isolate());
+ __ TailCallStub(&stub);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
+ // Return the constant value.
+ __ Move(rax, value);
+ __ ret(0);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
+ LookupIterator* it, Register holder_reg) {
+ DCHECK(holder()->HasNamedInterceptor());
+ DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+
+ // Compile the interceptor call, followed by inline code to load the
+ // property from further up the prototype chain if the call fails.
+ // Check that the maps haven't changed.
+ DCHECK(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
+
+ // Preserve the receiver register explicitly whenever it is different from the
+ // holder and it is needed should the interceptor return without any result.
+ // The ACCESSOR case needs the receiver to be passed into C++ code, the FIELD
+ // case might cause a miss during the prototype check.
+ bool must_perform_prototype_check =
+ !holder().is_identical_to(it->GetHolder<JSObject>());
+ bool must_preserve_receiver_reg =
+ !receiver().is(holder_reg) &&
+ (it->property_kind() == LookupIterator::ACCESSOR ||
+ must_perform_prototype_check);
+
+ // Save necessary data before invoking an interceptor.
+ // Requires a frame to make GC aware of pushed pointers.
+ {
+ FrameScope frame_scope(masm(), StackFrame::INTERNAL);
+
+ if (must_preserve_receiver_reg) {
+ __ Push(receiver());
+ }
+ __ Push(holder_reg);
+ __ Push(this->name());
+
+ // Invoke an interceptor. Note: map checks from receiver to
+ // interceptor's holder has been compiled before (see a caller
+ // of this method.)
+ CompileCallLoadPropertyWithInterceptor(
+ masm(), receiver(), holder_reg, this->name(), holder(),
+ IC::kLoadPropertyWithInterceptorOnly);
+
+ // Check if interceptor provided a value for property. If it's
+ // the case, return immediately.
+ Label interceptor_failed;
+ __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
+ __ j(equal, &interceptor_failed);
+ frame_scope.GenerateLeaveFrame();
+ __ ret(0);
+
+ __ bind(&interceptor_failed);
+ __ Pop(this->name());
+ __ Pop(holder_reg);
+ if (must_preserve_receiver_reg) {
+ __ Pop(receiver());
+ }
+
+ // Leave the internal frame.
+ }
+
+ GenerateLoadPostInterceptor(it, holder_reg);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
+ // Call the runtime system to load the interceptor.
+ DCHECK(holder()->HasNamedInterceptor());
+ DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+ __ PopReturnAddressTo(scratch2());
+ PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
+ holder());
+ __ PushReturnAddressFrom(scratch2());
+
+ ExternalReference ref = ExternalReference(
+ IC_Utility(IC::kLoadPropertyWithInterceptor), isolate());
+ __ TailCallExternalReference(
+ ref, NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
+}
+
+
+Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
+ Handle<JSObject> object, Handle<Name> name,
+ Handle<ExecutableAccessorInfo> callback) {
+ Register holder_reg = Frontend(receiver(), name);
+
+ __ PopReturnAddressTo(scratch1());
+ __ Push(receiver());
+ __ Push(holder_reg);
+ __ Push(callback); // callback info
+ __ Push(name);
+ __ Push(value());
+ __ PushReturnAddressFrom(scratch1());
+
+ // Do tail-call to the runtime system.
+ ExternalReference store_callback_property =
+ ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
+ __ TailCallExternalReference(store_callback_property, 5, 1);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
+ MacroAssembler* masm, Handle<HeapType> type, Register receiver,
+ Handle<JSFunction> setter) {
+ // ----------- S t a t e -------------
+ // -- rsp[0] : return address
+ // -----------------------------------
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Save value register, so we can restore it later.
+ __ Push(value());
+
+ if (!setter.is_null()) {
+ // Call the JavaScript setter with receiver and value on the stack.
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ movp(receiver,
+ FieldOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+ }
+ __ Push(receiver);
+ __ Push(value());
+ ParameterCount actual(1);
+ ParameterCount expected(setter);
+ __ InvokeFunction(setter, expected, actual, CALL_FUNCTION,
+ NullCallWrapper());
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // We have to return the passed value, not the return value of the setter.
+ __ Pop(rax);
+
+ // Restore context register.
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ }
+ __ ret(0);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
+ Handle<Name> name) {
+ __ PopReturnAddressTo(scratch1());
+ __ Push(receiver());
+ __ Push(this->name());
+ __ Push(value());
+ __ PushReturnAddressFrom(scratch1());
+
+ // Do tail-call to the runtime system.
+ ExternalReference store_ic_property = ExternalReference(
+ IC_Utility(IC::kStorePropertyWithInterceptor), isolate());
+ __ TailCallExternalReference(store_ic_property, 3, 1);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::FAST, name);
+}
+
+
+Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
+ MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
+ MapHandleList* transitioned_maps) {
+ Label miss;
+ __ JumpIfSmi(receiver(), &miss, Label::kNear);
+
+ __ movp(scratch1(), FieldOperand(receiver(), HeapObject::kMapOffset));
+ int receiver_count = receiver_maps->length();
+ for (int i = 0; i < receiver_count; ++i) {
+ // Check map and tail call if there's a match
+ __ Cmp(scratch1(), receiver_maps->at(i));
+ if (transitioned_maps->at(i).is_null()) {
+ __ j(equal, handler_stubs->at(i), RelocInfo::CODE_TARGET);
+ } else {
+ Label next_map;
+ __ j(not_equal, &next_map, Label::kNear);
+ __ Move(transition_map(), transitioned_maps->at(i),
+ RelocInfo::EMBEDDED_OBJECT);
+ __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
+ __ bind(&next_map);
+ }
+ }
+
+ __ bind(&miss);
+
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ // Return the generated code.
+ return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
+}
+
+
+Register* PropertyAccessCompiler::load_calling_convention() {
+ // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ Register receiver = LoadIC::ReceiverRegister();
+ Register name = LoadIC::NameRegister();
+ static Register registers[] = {receiver, name, rax, rbx, rdi, r8};
+ return registers;
+}
+
+
+Register* PropertyAccessCompiler::store_calling_convention() {
+ // receiver, name, scratch1, scratch2, scratch3.
+ Register receiver = KeyedStoreIC::ReceiverRegister();
+ Register name = KeyedStoreIC::NameRegister();
+ DCHECK(rbx.is(KeyedStoreIC::MapRegister()));
+ static Register registers[] = {receiver, name, rbx, rdi, r8};
+ return registers;
+}
+
+
+Register NamedStoreHandlerCompiler::value() { return StoreIC::ValueRegister(); }
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
+ MacroAssembler* masm, Handle<HeapType> type, Register receiver,
+ Handle<JSFunction> getter) {
+ // ----------- S t a t e -------------
+ // -- rax : receiver
+ // -- rcx : name
+ // -- rsp[0] : return address
+ // -----------------------------------
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ if (!getter.is_null()) {
+ // Call the JavaScript getter with the receiver on the stack.
+ if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ // Swap in the global receiver.
+ __ movp(receiver,
+ FieldOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+ }
+ __ Push(receiver);
+ ParameterCount actual(0);
+ ParameterCount expected(getter);
+ __ InvokeFunction(getter, expected, actual, CALL_FUNCTION,
+ NullCallWrapper());
+ } else {
+ // If we generate a global code snippet for deoptimization only, remember
+ // the place to continue after deoptimization.
+ masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
+ }
+
+ // Restore context register.
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ }
+ __ ret(0);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
+ Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
+ Label miss;
+ FrontendHeader(receiver(), name, &miss);
+
+ // Get the value from the cell.
+ Register result = StoreIC::ValueRegister();
+ __ Move(result, cell);
+ __ movp(result, FieldOperand(result, PropertyCell::kValueOffset));
+
+ // Check for deleted property if property can actually be deleted.
+ if (is_configurable) {
+ __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
+ __ j(equal, &miss);
+ } else if (FLAG_debug_code) {
+ __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
+ __ Check(not_equal, kDontDeleteCellsCannotContainTheHole);
+ }
+
+ Counters* counters = isolate()->counters();
+ __ IncrementCounter(counters->named_load_global_stub(), 1);
+ __ ret(0);
+
+ FrontendFooter(name, &miss);
+
+ // Return the generated code.
+ return GetCode(kind(), Code::NORMAL, name);
+}
+
+
+Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
+ CodeHandleList* handlers,
+ Handle<Name> name,
+ Code::StubType type,
+ IcCheckType check) {
+ Label miss;
+
+ if (check == PROPERTY &&
+ (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
+ // In case we are compiling an IC for dictionary loads and stores, just
+ // check whether the name is unique.
+ if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
+ __ JumpIfNotUniqueName(this->name(), &miss);
+ } else {
+ __ Cmp(this->name(), name);
+ __ j(not_equal, &miss);
+ }
+ }
+
+ Label number_case;
+ Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
+ __ JumpIfSmi(receiver(), smi_target);
+
+ // Polymorphic keyed stores may use the map register
+ Register map_reg = scratch1();
+ DCHECK(kind() != Code::KEYED_STORE_IC ||
+ map_reg.is(KeyedStoreIC::MapRegister()));
+ __ movp(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
+ int receiver_count = types->length();
+ int number_of_handled_maps = 0;
+ for (int current = 0; current < receiver_count; ++current) {
+ Handle<HeapType> type = types->at(current);
+ Handle<Map> map = IC::TypeToMap(*type, isolate());
+ if (!map->is_deprecated()) {
+ number_of_handled_maps++;
+ // Check map and tail call if there's a match
+ __ Cmp(map_reg, map);
+ if (type->Is(HeapType::Number())) {
+ DCHECK(!number_case.is_unused());
+ __ bind(&number_case);
+ }
+ __ j(equal, handlers->at(current), RelocInfo::CODE_TARGET);
+ }
+ }
+ DCHECK(number_of_handled_maps > 0);
+
+ __ bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ // Return the generated code.
+ InlineCacheState state =
+ number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
+ return GetCode(kind(), type, name, state);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void ElementHandlerCompiler::GenerateLoadDictionaryElement(
+ MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ DCHECK(rdx.is(LoadIC::ReceiverRegister()));
+ DCHECK(rcx.is(LoadIC::NameRegister()));
+ Label slow, miss;
+
+ // This stub is meant to be tail-jumped to, the receiver must already
+ // have been verified by the caller to not be a smi.
+
+ __ JumpIfNotSmi(rcx, &miss);
+ __ SmiToInteger32(rbx, rcx);
+ __ movp(rax, FieldOperand(rdx, JSObject::kElementsOffset));
+
+ // Check whether the elements is a number dictionary.
+ // rdx: receiver
+ // rcx: key
+ // rbx: key as untagged int32
+ // rax: elements
+ __ LoadFromNumberDictionary(&slow, rax, rcx, rbx, r9, rdi, rax);
+ __ ret(0);
+
+ __ bind(&slow);
+ // ----------- S t a t e -------------
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
+
+ __ bind(&miss);
+ // ----------- S t a t e -------------
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
+}
+
+
+#undef __
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X64
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_X64
+
+#include "src/codegen.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+// ----------------------------------------------------------------------------
+// Static IC stub generators.
+//
+
+#define __ ACCESS_MASM(masm)
+
+
+static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
+ Label* global_object) {
+ // Register usage:
+ // type: holds the receiver instance type on entry.
+ __ cmpb(type, Immediate(JS_GLOBAL_OBJECT_TYPE));
+ __ j(equal, global_object);
+ __ cmpb(type, Immediate(JS_BUILTINS_OBJECT_TYPE));
+ __ j(equal, global_object);
+ __ cmpb(type, Immediate(JS_GLOBAL_PROXY_TYPE));
+ __ j(equal, global_object);
+}
+
+
+// Helper function used to load a property from a dictionary backing storage.
+// This function may return false negatives, so miss_label
+// must always call a backup property load that is complete.
+// This function is safe to call if name is not an internalized string,
+// and will jump to the miss_label in that case.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
+ Register elements, Register name,
+ Register r0, Register r1, Register result) {
+ // Register use:
+ //
+ // elements - holds the property dictionary on entry and is unchanged.
+ //
+ // name - holds the name of the property on entry and is unchanged.
+ //
+ // r0 - used to hold the capacity of the property dictionary.
+ //
+ // r1 - used to hold the index into the property dictionary.
+ //
+ // result - holds the result on exit if the load succeeded.
+
+ Label done;
+
+ // Probe the dictionary.
+ NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss_label, &done,
+ elements, name, r0, r1);
+
+ // If probing finds an entry in the dictionary, r1 contains the
+ // index into the dictionary. Check that the value is a normal
+ // property.
+ __ bind(&done);
+ const int kElementsStartOffset =
+ NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+ const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+ __ Test(Operand(elements, r1, times_pointer_size,
+ kDetailsOffset - kHeapObjectTag),
+ Smi::FromInt(PropertyDetails::TypeField::kMask));
+ __ j(not_zero, miss_label);
+
+ // Get the value at the masked, scaled index.
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ movp(result, Operand(elements, r1, times_pointer_size,
+ kValueOffset - kHeapObjectTag));
+}
+
+
+// Helper function used to store a property to a dictionary backing
+// storage. This function may fail to store a property even though it
+// is in the dictionary, so code at miss_label must always call a
+// backup property store that is complete. This function is safe to
+// call if name is not an internalized string, and will jump to the miss_label
+// in that case. The generated code assumes that the receiver has slow
+// properties, is not a global object and does not have interceptors.
+static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss_label,
+ Register elements, Register name,
+ Register value, Register scratch0,
+ Register scratch1) {
+ // Register use:
+ //
+ // elements - holds the property dictionary on entry and is clobbered.
+ //
+ // name - holds the name of the property on entry and is unchanged.
+ //
+ // value - holds the value to store and is unchanged.
+ //
+ // scratch0 - used during the positive dictionary lookup and is clobbered.
+ //
+ // scratch1 - used for index into the property dictionary and is clobbered.
+ Label done;
+
+ // Probe the dictionary.
+ NameDictionaryLookupStub::GeneratePositiveLookup(
+ masm, miss_label, &done, elements, name, scratch0, scratch1);
+
+ // If probing finds an entry in the dictionary, scratch0 contains the
+ // index into the dictionary. Check that the value is a normal
+ // property that is not read only.
+ __ bind(&done);
+ const int kElementsStartOffset =
+ NameDictionary::kHeaderSize +
+ NameDictionary::kElementsStartIndex * kPointerSize;
+ const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+ const int kTypeAndReadOnlyMask =
+ (PropertyDetails::TypeField::kMask |
+ PropertyDetails::AttributesField::encode(READ_ONLY))
+ << kSmiTagSize;
+ __ Test(Operand(elements, scratch1, times_pointer_size,
+ kDetailsOffset - kHeapObjectTag),
+ Smi::FromInt(kTypeAndReadOnlyMask));
+ __ j(not_zero, miss_label);
+
+ // Store the value at the masked, scaled index.
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ leap(scratch1, Operand(elements, scratch1, times_pointer_size,
+ kValueOffset - kHeapObjectTag));
+ __ movp(Operand(scratch1, 0), value);
+
+ // Update write barrier. Make sure not to clobber the value.
+ __ movp(scratch0, value);
+ __ RecordWrite(elements, scratch1, scratch0, kDontSaveFPRegs);
+}
+
+
+// Checks the receiver for special cases (value type, slow case bits).
+// Falls through for regular JS object.
+static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
+ Register receiver, Register map,
+ int interceptor_bit, Label* slow) {
+ // Register use:
+ // receiver - holds the receiver and is unchanged.
+ // Scratch registers:
+ // map - used to hold the map of the receiver.
+
+ // Check that the object isn't a smi.
+ __ JumpIfSmi(receiver, slow);
+
+ // Check that the object is some kind of JS object EXCEPT JS Value type.
+ // In the case that the object is a value-wrapper object,
+ // we enter the runtime system to make sure that indexing
+ // into string objects work as intended.
+ DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
+ __ CmpObjectType(receiver, JS_OBJECT_TYPE, map);
+ __ j(below, slow);
+
+ // Check bit field.
+ __ testb(
+ FieldOperand(map, Map::kBitFieldOffset),
+ Immediate((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
+ __ j(not_zero, slow);
+}
+
+
+// Loads an indexed element from a fast case array.
+// If not_fast_array is NULL, doesn't perform the elements map check.
+static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
+ Register key, Register elements,
+ Register scratch, Register result,
+ Label* not_fast_array, Label* out_of_range) {
+ // Register use:
+ //
+ // receiver - holds the receiver on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // key - holds the smi key on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // elements - holds the elements of the receiver on exit.
+ //
+ // result - holds the result on exit if the load succeeded.
+ // Allowed to be the the same as 'receiver' or 'key'.
+ // Unchanged on bailout so 'receiver' and 'key' can be safely
+ // used by further computation.
+ //
+ // Scratch registers:
+ //
+ // scratch - used to hold elements of the receiver and the loaded value.
+
+ __ movp(elements, FieldOperand(receiver, JSObject::kElementsOffset));
+ if (not_fast_array != NULL) {
+ // Check that the object is in fast mode and writable.
+ __ CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
+ Heap::kFixedArrayMapRootIndex);
+ __ j(not_equal, not_fast_array);
+ } else {
+ __ AssertFastElements(elements);
+ }
+ // Check that the key (index) is within bounds.
+ __ SmiCompare(key, FieldOperand(elements, FixedArray::kLengthOffset));
+ // Unsigned comparison rejects negative indices.
+ __ j(above_equal, out_of_range);
+ // Fast case: Do the load.
+ SmiIndex index = masm->SmiToIndex(scratch, key, kPointerSizeLog2);
+ __ movp(scratch, FieldOperand(elements, index.reg, index.scale,
+ FixedArray::kHeaderSize));
+ __ CompareRoot(scratch, Heap::kTheHoleValueRootIndex);
+ // In case the loaded value is the_hole we have to consult GetProperty
+ // to ensure the prototype chain is searched.
+ __ j(equal, out_of_range);
+ if (!result.is(scratch)) {
+ __ movp(result, scratch);
+ }
+}
+
+
+// Checks whether a key is an array index string or a unique name.
+// Falls through if the key is a unique name.
+static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
+ Register map, Register hash,
+ Label* index_string, Label* not_unique) {
+ // Register use:
+ // key - holds the key and is unchanged. Assumed to be non-smi.
+ // Scratch registers:
+ // map - used to hold the map of the key.
+ // hash - used to hold the hash of the key.
+ Label unique;
+ __ CmpObjectType(key, LAST_UNIQUE_NAME_TYPE, map);
+ __ j(above, not_unique);
+ STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
+ __ j(equal, &unique);
+
+ // Is the string an array index, with cached numeric value?
+ __ movl(hash, FieldOperand(key, Name::kHashFieldOffset));
+ __ testl(hash, Immediate(Name::kContainsCachedArrayIndexMask));
+ __ j(zero, index_string); // The value in hash is used at jump target.
+
+ // Is the string internalized? We already know it's a string so a single
+ // bit test is enough.
+ STATIC_ASSERT(kNotInternalizedTag != 0);
+ __ testb(FieldOperand(map, Map::kInstanceTypeOffset),
+ Immediate(kIsNotInternalizedMask));
+ __ j(not_zero, not_unique);
+
+ __ bind(&unique);
+}
+
+
+void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
+ // The return address is on the stack.
+ Label slow, check_name, index_smi, index_name, property_array_property;
+ Label probe_dictionary, check_number_dictionary;
+
+ Register receiver = ReceiverRegister();
+ Register key = NameRegister();
+ DCHECK(receiver.is(rdx));
+ DCHECK(key.is(rcx));
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(key, &check_name);
+ __ bind(&index_smi);
+ // Now the key is known to be a smi. This place is also jumped to from below
+ // where a numeric string is converted to a smi.
+
+ GenerateKeyedLoadReceiverCheck(masm, receiver, rax,
+ Map::kHasIndexedInterceptor, &slow);
+
+ // Check the receiver's map to see if it has fast elements.
+ __ CheckFastElements(rax, &check_number_dictionary);
+
+ GenerateFastArrayLoad(masm, receiver, key, rax, rbx, rax, NULL, &slow);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->keyed_load_generic_smi(), 1);
+ __ ret(0);
+
+ __ bind(&check_number_dictionary);
+ __ SmiToInteger32(rbx, key);
+ __ movp(rax, FieldOperand(receiver, JSObject::kElementsOffset));
+
+ // Check whether the elements is a number dictionary.
+ // rbx: key as untagged int32
+ // rax: elements
+ __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
+ Heap::kHashTableMapRootIndex);
+ __ j(not_equal, &slow);
+ __ LoadFromNumberDictionary(&slow, rax, key, rbx, r9, rdi, rax);
+ __ ret(0);
+
+ __ bind(&slow);
+ // Slow case: Jump to runtime.
+ __ IncrementCounter(counters->keyed_load_generic_slow(), 1);
+ GenerateRuntimeGetProperty(masm);
+
+ __ bind(&check_name);
+ GenerateKeyNameCheck(masm, key, rax, rbx, &index_name, &slow);
+
+ GenerateKeyedLoadReceiverCheck(masm, receiver, rax, Map::kHasNamedInterceptor,
+ &slow);
+
+ // If the receiver is a fast-case object, check the keyed lookup
+ // cache. Otherwise probe the dictionary leaving result in key.
+ __ movp(rbx, FieldOperand(receiver, JSObject::kPropertiesOffset));
+ __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
+ Heap::kHashTableMapRootIndex);
+ __ j(equal, &probe_dictionary);
+
+ // Load the map of the receiver, compute the keyed lookup cache hash
+ // based on 32 bits of the map pointer and the string hash.
+ __ movp(rbx, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ movl(rax, rbx);
+ __ shrl(rax, Immediate(KeyedLookupCache::kMapHashShift));
+ __ movl(rdi, FieldOperand(key, String::kHashFieldOffset));
+ __ shrl(rdi, Immediate(String::kHashShift));
+ __ xorp(rax, rdi);
+ int mask = (KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
+ __ andp(rax, Immediate(mask));
+
+ // Load the key (consisting of map and internalized string) from the cache and
+ // check for match.
+ Label load_in_object_property;
+ static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
+ Label hit_on_nth_entry[kEntriesPerBucket];
+ ExternalReference cache_keys =
+ ExternalReference::keyed_lookup_cache_keys(masm->isolate());
+
+ for (int i = 0; i < kEntriesPerBucket - 1; i++) {
+ Label try_next_entry;
+ __ movp(rdi, rax);
+ __ shlp(rdi, Immediate(kPointerSizeLog2 + 1));
+ __ LoadAddress(kScratchRegister, cache_keys);
+ int off = kPointerSize * i * 2;
+ __ cmpp(rbx, Operand(kScratchRegister, rdi, times_1, off));
+ __ j(not_equal, &try_next_entry);
+ __ cmpp(key, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
+ __ j(equal, &hit_on_nth_entry[i]);
+ __ bind(&try_next_entry);
+ }
+
+ int off = kPointerSize * (kEntriesPerBucket - 1) * 2;
+ __ cmpp(rbx, Operand(kScratchRegister, rdi, times_1, off));
+ __ j(not_equal, &slow);
+ __ cmpp(key, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
+ __ j(not_equal, &slow);
+
+ // Get field offset, which is a 32-bit integer.
+ ExternalReference cache_field_offsets =
+ ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate());
+
+ // Hit on nth entry.
+ for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
+ __ bind(&hit_on_nth_entry[i]);
+ if (i != 0) {
+ __ addl(rax, Immediate(i));
+ }
+ __ LoadAddress(kScratchRegister, cache_field_offsets);
+ __ movl(rdi, Operand(kScratchRegister, rax, times_4, 0));
+ __ movzxbp(rax, FieldOperand(rbx, Map::kInObjectPropertiesOffset));
+ __ subp(rdi, rax);
+ __ j(above_equal, &property_array_property);
+ if (i != 0) {
+ __ jmp(&load_in_object_property);
+ }
+ }
+
+ // Load in-object property.
+ __ bind(&load_in_object_property);
+ __ movzxbp(rax, FieldOperand(rbx, Map::kInstanceSizeOffset));
+ __ addp(rax, rdi);
+ __ movp(rax, FieldOperand(receiver, rax, times_pointer_size, 0));
+ __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
+ __ ret(0);
+
+ // Load property array property.
+ __ bind(&property_array_property);
+ __ movp(rax, FieldOperand(receiver, JSObject::kPropertiesOffset));
+ __ movp(rax,
+ FieldOperand(rax, rdi, times_pointer_size, FixedArray::kHeaderSize));
+ __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
+ __ ret(0);
+
+ // Do a quick inline probe of the receiver's dictionary, if it
+ // exists.
+ __ bind(&probe_dictionary);
+ // rbx: elements
+
+ __ movp(rax, FieldOperand(receiver, JSObject::kMapOffset));
+ __ movb(rax, FieldOperand(rax, Map::kInstanceTypeOffset));
+ GenerateGlobalInstanceTypeCheck(masm, rax, &slow);
+
+ GenerateDictionaryLoad(masm, &slow, rbx, key, rax, rdi, rax);
+ __ IncrementCounter(counters->keyed_load_generic_symbol(), 1);
+ __ ret(0);
+
+ __ bind(&index_name);
+ __ IndexFromHash(rbx, key);
+ __ jmp(&index_smi);
+}
+
+
+void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
+ // Return address is on the stack.
+ Label miss;
+
+ Register receiver = ReceiverRegister();
+ Register index = NameRegister();
+ Register scratch = rbx;
+ Register result = rax;
+ DCHECK(!scratch.is(receiver) && !scratch.is(index));
+
+ StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ &miss, // When index out of range.
+ STRING_INDEX_IS_ARRAY_INDEX);
+ char_at_generator.GenerateFast(masm);
+ __ ret(0);
+
+ StubRuntimeCallHelper call_helper;
+ char_at_generator.GenerateSlow(masm, call_helper);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
+ // Return address is on the stack.
+ Label slow;
+
+ Register receiver = ReceiverRegister();
+ Register key = NameRegister();
+ Register scratch = rax;
+ DCHECK(!scratch.is(receiver) && !scratch.is(key));
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &slow);
+
+ // Check that the key is an array index, that is Uint32.
+ STATIC_ASSERT(kSmiValueSize <= 32);
+ __ JumpUnlessNonNegativeSmi(key, &slow);
+
+ // Get the map of the receiver.
+ __ movp(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
+
+ // Check that it has indexed interceptor and access checks
+ // are not enabled for this object.
+ __ movb(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
+ __ andb(scratch, Immediate(kSlowCaseBitFieldMask));
+ __ cmpb(scratch, Immediate(1 << Map::kHasIndexedInterceptor));
+ __ j(not_zero, &slow);
+
+ // Everything is fine, call runtime.
+ __ PopReturnAddressTo(scratch);
+ __ Push(receiver); // receiver
+ __ Push(key); // key
+ __ PushReturnAddressFrom(scratch);
+
+ // Perform tail call to the entry.
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(kLoadElementWithInterceptor),
+ masm->isolate()),
+ 2, 1);
+
+ __ bind(&slow);
+ GenerateMiss(masm);
+}
+
+
+static void KeyedStoreGenerateGenericHelper(
+ MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
+ KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length) {
+ Label transition_smi_elements;
+ Label finish_object_store, non_double_value, transition_double_elements;
+ Label fast_double_without_map_check;
+ Register receiver = KeyedStoreIC::ReceiverRegister();
+ Register key = KeyedStoreIC::NameRegister();
+ Register value = KeyedStoreIC::ValueRegister();
+ DCHECK(receiver.is(rdx));
+ DCHECK(key.is(rcx));
+ DCHECK(value.is(rax));
+ // Fast case: Do the store, could be either Object or double.
+ __ bind(fast_object);
+ // rbx: receiver's elements array (a FixedArray)
+ // receiver is a JSArray.
+ // r9: map of receiver
+ if (check_map == kCheckMap) {
+ __ movp(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
+ __ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
+ __ j(not_equal, fast_double);
+ }
+
+ // HOLECHECK: guards "A[i] = V"
+ // We have to go to the runtime if the current value is the hole because
+ // there may be a callback on the element
+ Label holecheck_passed1;
+ __ movp(kScratchRegister,
+ FieldOperand(rbx, key, times_pointer_size, FixedArray::kHeaderSize));
+ __ CompareRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
+ __ j(not_equal, &holecheck_passed1);
+ __ JumpIfDictionaryInPrototypeChain(receiver, rdi, kScratchRegister, slow);
+
+ __ bind(&holecheck_passed1);
+
+ // Smi stores don't require further checks.
+ Label non_smi_value;
+ __ JumpIfNotSmi(value, &non_smi_value);
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ leal(rdi, Operand(key, 1));
+ __ Integer32ToSmiField(FieldOperand(receiver, JSArray::kLengthOffset), rdi);
+ }
+ // It's irrelevant whether array is smi-only or not when writing a smi.
+ __ movp(FieldOperand(rbx, key, times_pointer_size, FixedArray::kHeaderSize),
+ value);
+ __ ret(0);
+
+ __ bind(&non_smi_value);
+ // Writing a non-smi, check whether array allows non-smi elements.
+ // r9: receiver's map
+ __ CheckFastObjectElements(r9, &transition_smi_elements);
+
+ __ bind(&finish_object_store);
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ leal(rdi, Operand(key, 1));
+ __ Integer32ToSmiField(FieldOperand(receiver, JSArray::kLengthOffset), rdi);
+ }
+ __ movp(FieldOperand(rbx, key, times_pointer_size, FixedArray::kHeaderSize),
+ value);
+ __ movp(rdx, value); // Preserve the value which is returned.
+ __ RecordWriteArray(rbx, rdx, key, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ ret(0);
+
+ __ bind(fast_double);
+ if (check_map == kCheckMap) {
+ // Check for fast double array case. If this fails, call through to the
+ // runtime.
+ // rdi: elements array's map
+ __ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
+ __ j(not_equal, slow);
+ }
+
+ // HOLECHECK: guards "A[i] double hole?"
+ // We have to see if the double version of the hole is present. If so
+ // go to the runtime.
+ uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
+ __ cmpl(FieldOperand(rbx, key, times_8, offset), Immediate(kHoleNanUpper32));
+ __ j(not_equal, &fast_double_without_map_check);
+ __ JumpIfDictionaryInPrototypeChain(receiver, rdi, kScratchRegister, slow);
+
+ __ bind(&fast_double_without_map_check);
+ __ StoreNumberToDoubleElements(value, rbx, key, xmm0,
+ &transition_double_elements);
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ leal(rdi, Operand(key, 1));
+ __ Integer32ToSmiField(FieldOperand(receiver, JSArray::kLengthOffset), rdi);
+ }
+ __ ret(0);
+
+ __ bind(&transition_smi_elements);
+ __ movp(rbx, FieldOperand(receiver, HeapObject::kMapOffset));
+
+ // Transition the array appropriately depending on the value type.
+ __ movp(r9, FieldOperand(value, HeapObject::kMapOffset));
+ __ CompareRoot(r9, Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &non_double_value);
+
+ // Value is a double. Transition FAST_SMI_ELEMENTS ->
+ // FAST_DOUBLE_ELEMENTS and complete the store.
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS, rbx, rdi, slow);
+ AllocationSiteMode mode =
+ AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
+ rbx, mode, slow);
+ __ movp(rbx, FieldOperand(receiver, JSObject::kElementsOffset));
+ __ jmp(&fast_double_without_map_check);
+
+ __ bind(&non_double_value);
+ // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, rbx,
+ rdi, slow);
+ mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
+ masm, receiver, key, value, rbx, mode, slow);
+ __ movp(rbx, FieldOperand(receiver, JSObject::kElementsOffset));
+ __ jmp(&finish_object_store);
+
+ __ bind(&transition_double_elements);
+ // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
+ // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
+ // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
+ __ movp(rbx, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
+ rbx, rdi, slow);
+ mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateDoubleToObject(masm, receiver, key,
+ value, rbx, mode, slow);
+ __ movp(rbx, FieldOperand(receiver, JSObject::kElementsOffset));
+ __ jmp(&finish_object_store);
+}
+
+
+void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
+ StrictMode strict_mode) {
+ // Return address is on the stack.
+ Label slow, slow_with_tagged_index, fast_object, fast_object_grow;
+ Label fast_double, fast_double_grow;
+ Label array, extra, check_if_double_array;
+ Register receiver = ReceiverRegister();
+ Register key = NameRegister();
+ DCHECK(receiver.is(rdx));
+ DCHECK(key.is(rcx));
+
+ // Check that the object isn't a smi.
+ __ JumpIfSmi(receiver, &slow_with_tagged_index);
+ // Get the map from the receiver.
+ __ movp(r9, FieldOperand(receiver, HeapObject::kMapOffset));
+ // Check that the receiver does not require access checks and is not observed.
+ // The generic stub does not perform map checks or handle observed objects.
+ __ testb(FieldOperand(r9, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
+ __ j(not_zero, &slow_with_tagged_index);
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(key, &slow_with_tagged_index);
+ __ SmiToInteger32(key, key);
+
+ __ CmpInstanceType(r9, JS_ARRAY_TYPE);
+ __ j(equal, &array);
+ // Check that the object is some kind of JSObject.
+ __ CmpInstanceType(r9, FIRST_JS_OBJECT_TYPE);
+ __ j(below, &slow);
+
+ // Object case: Check key against length in the elements array.
+ __ movp(rbx, FieldOperand(receiver, JSObject::kElementsOffset));
+ // Check array bounds.
+ __ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), key);
+ // rbx: FixedArray
+ __ j(above, &fast_object);
+
+ // Slow case: call runtime.
+ __ bind(&slow);
+ __ Integer32ToSmi(key, key);
+ __ bind(&slow_with_tagged_index);
+ GenerateRuntimeSetProperty(masm, strict_mode);
+ // Never returns to here.
+
+ // Extra capacity case: Check if there is extra capacity to
+ // perform the store and update the length. Used for adding one
+ // element to the array by writing to array[array.length].
+ __ bind(&extra);
+ // receiver is a JSArray.
+ // rbx: receiver's elements array (a FixedArray)
+ // flags: smicompare (receiver.length(), rbx)
+ __ j(not_equal, &slow); // do not leave holes in the array
+ __ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), key);
+ __ j(below_equal, &slow);
+ // Increment index to get new length.
+ __ movp(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
+ __ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
+ __ j(not_equal, &check_if_double_array);
+ __ jmp(&fast_object_grow);
+
+ __ bind(&check_if_double_array);
+ // rdi: elements array's map
+ __ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
+ __ j(not_equal, &slow);
+ __ jmp(&fast_double_grow);
+
+ // Array case: Get the length and the elements array from the JS
+ // array. Check that the array is in fast mode (and writable); if it
+ // is the length is always a smi.
+ __ bind(&array);
+ // receiver is a JSArray.
+ __ movp(rbx, FieldOperand(receiver, JSObject::kElementsOffset));
+
+ // Check the key against the length in the array, compute the
+ // address to store into and fall through to fast case.
+ __ SmiCompareInteger32(FieldOperand(receiver, JSArray::kLengthOffset), key);
+ __ j(below_equal, &extra);
+
+ KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double, &slow,
+ kCheckMap, kDontIncrementLength);
+ KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
+ &slow, kDontCheckMap, kIncrementLength);
+}
+
+
+static Operand GenerateMappedArgumentsLookup(
+ MacroAssembler* masm, Register object, Register key, Register scratch1,
+ Register scratch2, Register scratch3, Label* unmapped_case,
+ Label* slow_case) {
+ Heap* heap = masm->isolate()->heap();
+
+ // Check that the receiver is a JSObject. Because of the elements
+ // map check later, we do not need to check for interceptors or
+ // whether it requires access checks.
+ __ JumpIfSmi(object, slow_case);
+ // Check that the object is some kind of JSObject.
+ __ CmpObjectType(object, FIRST_JS_RECEIVER_TYPE, scratch1);
+ __ j(below, slow_case);
+
+ // Check that the key is a positive smi.
+ Condition check = masm->CheckNonNegativeSmi(key);
+ __ j(NegateCondition(check), slow_case);
+
+ // Load the elements into scratch1 and check its map. If not, jump
+ // to the unmapped lookup with the parameter map in scratch1.
+ Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
+ __ movp(scratch1, FieldOperand(object, JSObject::kElementsOffset));
+ __ CheckMap(scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
+
+ // Check if element is in the range of mapped arguments.
+ __ movp(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
+ __ SmiSubConstant(scratch2, scratch2, Smi::FromInt(2));
+ __ cmpp(key, scratch2);
+ __ j(greater_equal, unmapped_case);
+
+ // Load element index and check whether it is the hole.
+ const int kHeaderSize = FixedArray::kHeaderSize + 2 * kPointerSize;
+ __ SmiToInteger64(scratch3, key);
+ __ movp(scratch2,
+ FieldOperand(scratch1, scratch3, times_pointer_size, kHeaderSize));
+ __ CompareRoot(scratch2, Heap::kTheHoleValueRootIndex);
+ __ j(equal, unmapped_case);
+
+ // Load value from context and return it. We can reuse scratch1 because
+ // we do not jump to the unmapped lookup (which requires the parameter
+ // map in scratch1).
+ __ movp(scratch1, FieldOperand(scratch1, FixedArray::kHeaderSize));
+ __ SmiToInteger64(scratch3, scratch2);
+ return FieldOperand(scratch1, scratch3, times_pointer_size,
+ Context::kHeaderSize);
+}
+
+
+static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
+ Register key,
+ Register parameter_map,
+ Register scratch,
+ Label* slow_case) {
+ // Element is in arguments backing store, which is referenced by the
+ // second element of the parameter_map. The parameter_map register
+ // must be loaded with the parameter map of the arguments object and is
+ // overwritten.
+ const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
+ Register backing_store = parameter_map;
+ __ movp(backing_store, FieldOperand(parameter_map, kBackingStoreOffset));
+ Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
+ __ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
+ __ movp(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
+ __ cmpp(key, scratch);
+ __ j(greater_equal, slow_case);
+ __ SmiToInteger64(scratch, key);
+ return FieldOperand(backing_store, scratch, times_pointer_size,
+ FixedArray::kHeaderSize);
+}
+
+
+void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
+ // The return address is on the stack.
+ Register receiver = ReceiverRegister();
+ Register key = NameRegister();
+ DCHECK(receiver.is(rdx));
+ DCHECK(key.is(rcx));
+
+ Label slow, notin;
+ Operand mapped_location = GenerateMappedArgumentsLookup(
+ masm, receiver, key, rbx, rax, rdi, ¬in, &slow);
+ __ movp(rax, mapped_location);
+ __ Ret();
+ __ bind(¬in);
+ // The unmapped lookup expects that the parameter map is in rbx.
+ Operand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, key, rbx, rax, &slow);
+ __ CompareRoot(unmapped_location, Heap::kTheHoleValueRootIndex);
+ __ j(equal, &slow);
+ __ movp(rax, unmapped_location);
+ __ Ret();
+ __ bind(&slow);
+ GenerateMiss(masm);
+}
+
+
+void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
+ // The return address is on the stack.
+ Label slow, notin;
+ Register receiver = ReceiverRegister();
+ Register name = NameRegister();
+ Register value = ValueRegister();
+ DCHECK(receiver.is(rdx));
+ DCHECK(name.is(rcx));
+ DCHECK(value.is(rax));
+
+ Operand mapped_location = GenerateMappedArgumentsLookup(
+ masm, receiver, name, rbx, rdi, r8, ¬in, &slow);
+ __ movp(mapped_location, value);
+ __ leap(r9, mapped_location);
+ __ movp(r8, value);
+ __ RecordWrite(rbx, r9, r8, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ INLINE_SMI_CHECK);
+ __ Ret();
+ __ bind(¬in);
+ // The unmapped lookup expects that the parameter map is in rbx.
+ Operand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, name, rbx, rdi, &slow);
+ __ movp(unmapped_location, value);
+ __ leap(r9, unmapped_location);
+ __ movp(r8, value);
+ __ RecordWrite(rbx, r9, r8, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ INLINE_SMI_CHECK);
+ __ Ret();
+ __ bind(&slow);
+ GenerateMiss(masm);
+}
+
+
+void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
+ // The return address is on the stack.
+ Register receiver = ReceiverRegister();
+ Register name = NameRegister();
+ DCHECK(receiver.is(rdx));
+ DCHECK(name.is(rcx));
+
+ // Probe the stub cache.
+ Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
+ Code::ComputeHandlerFlags(Code::LOAD_IC));
+ masm->isolate()->stub_cache()->GenerateProbe(masm, flags, receiver, name, rbx,
+ rax);
+
+ GenerateMiss(masm);
+}
+
+
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
+ Register dictionary = rax;
+ DCHECK(!dictionary.is(ReceiverRegister()));
+ DCHECK(!dictionary.is(NameRegister()));
+
+ Label slow;
+
+ __ movp(dictionary,
+ FieldOperand(ReceiverRegister(), JSObject::kPropertiesOffset));
+ GenerateDictionaryLoad(masm, &slow, dictionary, NameRegister(), rbx, rdi,
+ rax);
+ __ ret(0);
+
+ // Dictionary load failed, go slow (but don't miss).
+ __ bind(&slow);
+ GenerateRuntimeGetProperty(masm);
+}
+
+
+// A register that isn't one of the parameters to the load ic.
+static const Register LoadIC_TempRegister() { return rbx; }
+
+
+static const Register KeyedLoadIC_TempRegister() { return rbx; }
+
+
+void LoadIC::GenerateMiss(MacroAssembler* masm) {
+ // The return address is on the stack.
+
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->load_miss(), 1);
+
+ __ PopReturnAddressTo(LoadIC_TempRegister());
+ __ Push(ReceiverRegister()); // receiver
+ __ Push(NameRegister()); // name
+ __ PushReturnAddressFrom(LoadIC_TempRegister());
+
+ // Perform tail call to the entry.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kLoadIC_Miss), masm->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+ // The return address is on the stack.
+
+ __ PopReturnAddressTo(LoadIC_TempRegister());
+ __ Push(ReceiverRegister()); // receiver
+ __ Push(NameRegister()); // name
+ __ PushReturnAddressFrom(LoadIC_TempRegister());
+
+ // Perform tail call to the entry.
+ __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
+}
+
+
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
+ // The return address is on the stack.
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->keyed_load_miss(), 1);
+
+ __ PopReturnAddressTo(KeyedLoadIC_TempRegister());
+ __ Push(ReceiverRegister()); // receiver
+ __ Push(NameRegister()); // name
+ __ PushReturnAddressFrom(KeyedLoadIC_TempRegister());
+
+ // Perform tail call to the entry.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+// IC register specifications
+const Register LoadIC::ReceiverRegister() { return rdx; }
+const Register LoadIC::NameRegister() { return rcx; }
+
+
+const Register LoadIC::SlotRegister() {
+ DCHECK(FLAG_vector_ics);
+ return rax;
+}
+
+
+const Register LoadIC::VectorRegister() {
+ DCHECK(FLAG_vector_ics);
+ return rbx;
+}
+
+
+const Register StoreIC::ReceiverRegister() { return rdx; }
+const Register StoreIC::NameRegister() { return rcx; }
+const Register StoreIC::ValueRegister() { return rax; }
+
+
+const Register KeyedStoreIC::MapRegister() { return rbx; }
+
+
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+ // The return address is on the stack.
+
+ __ PopReturnAddressTo(KeyedLoadIC_TempRegister());
+ __ Push(ReceiverRegister()); // receiver
+ __ Push(NameRegister()); // name
+ __ PushReturnAddressFrom(KeyedLoadIC_TempRegister());
+
+ // Perform tail call to the entry.
+ __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
+}
+
+
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+ // The return address is on the stack.
+
+ // Get the receiver from the stack and probe the stub cache.
+ Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
+ Code::ComputeHandlerFlags(Code::STORE_IC));
+ masm->isolate()->stub_cache()->GenerateProbe(masm, flags, ReceiverRegister(),
+ NameRegister(), rbx, no_reg);
+
+ // Cache miss: Jump to runtime.
+ GenerateMiss(masm);
+}
+
+
+static void StoreIC_PushArgs(MacroAssembler* masm) {
+ Register receiver = StoreIC::ReceiverRegister();
+ Register name = StoreIC::NameRegister();
+ Register value = StoreIC::ValueRegister();
+
+ DCHECK(!rbx.is(receiver) && !rbx.is(name) && !rbx.is(value));
+
+ __ PopReturnAddressTo(rbx);
+ __ Push(receiver);
+ __ Push(name);
+ __ Push(value);
+ __ PushReturnAddressFrom(rbx);
+}
+
+
+void StoreIC::GenerateMiss(MacroAssembler* masm) {
+ // Return address is on the stack.
+ StoreIC_PushArgs(masm);
+
+ // Perform tail call to the entry.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void StoreIC::GenerateNormal(MacroAssembler* masm) {
+ Register receiver = ReceiverRegister();
+ Register name = NameRegister();
+ Register value = ValueRegister();
+ Register dictionary = rbx;
+
+ Label miss;
+
+ __ movp(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset));
+ GenerateDictionaryStore(masm, &miss, dictionary, name, value, r8, r9);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->store_normal_hit(), 1);
+ __ ret(0);
+
+ __ bind(&miss);
+ __ IncrementCounter(counters->store_normal_miss(), 1);
+ GenerateMiss(masm);
+}
+
+
+void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictMode strict_mode) {
+ // Return address is on the stack.
+ DCHECK(!rbx.is(ReceiverRegister()) && !rbx.is(NameRegister()) &&
+ !rbx.is(ValueRegister()));
+
+ __ PopReturnAddressTo(rbx);
+ __ Push(ReceiverRegister());
+ __ Push(NameRegister());
+ __ Push(ValueRegister());
+ __ Push(Smi::FromInt(strict_mode));
+ __ PushReturnAddressFrom(rbx);
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
+}
+
+
+void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictMode strict_mode) {
+ // Return address is on the stack.
+ DCHECK(!rbx.is(ReceiverRegister()) && !rbx.is(NameRegister()) &&
+ !rbx.is(ValueRegister()));
+
+ __ PopReturnAddressTo(rbx);
+ __ Push(ReceiverRegister());
+ __ Push(NameRegister());
+ __ Push(ValueRegister());
+ __ Push(Smi::FromInt(strict_mode)); // Strict mode.
+ __ PushReturnAddressFrom(rbx);
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
+}
+
+
+void StoreIC::GenerateSlow(MacroAssembler* masm) {
+ // Return address is on the stack.
+ StoreIC_PushArgs(masm);
+
+ // Do tail-call to runtime routine.
+ ExternalReference ref(IC_Utility(kStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
+ // Return address is on the stack.
+ StoreIC_PushArgs(masm);
+
+ // Do tail-call to runtime routine.
+ ExternalReference ref(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
+ // Return address is on the stack.
+ StoreIC_PushArgs(masm);
+
+ // Do tail-call to runtime routine.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+#undef __
+
+
+Condition CompareIC::ComputeCondition(Token::Value op) {
+ switch (op) {
+ case Token::EQ_STRICT:
+ case Token::EQ:
+ return equal;
+ case Token::LT:
+ return less;
+ case Token::GT:
+ return greater;
+ case Token::LTE:
+ return less_equal;
+ case Token::GTE:
+ return greater_equal;
+ default:
+ UNREACHABLE();
+ return no_condition;
+ }
+}
+
+
+bool CompareIC::HasInlinedSmiCode(Address address) {
+ // The address of the instruction following the call.
+ Address test_instruction_address =
+ address + Assembler::kCallTargetAddressOffset;
+
+ // If the instruction following the call is not a test al, nothing
+ // was inlined.
+ return *test_instruction_address == Assembler::kTestAlByte;
+}
+
+
+void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
+ // The address of the instruction following the call.
+ Address test_instruction_address =
+ address + Assembler::kCallTargetAddressOffset;
+
+ // If the instruction following the call is not a test al, nothing
+ // was inlined.
+ if (*test_instruction_address != Assembler::kTestAlByte) {
+ DCHECK(*test_instruction_address == Assembler::kNopByte);
+ return;
+ }
+
+ Address delta_address = test_instruction_address + 1;
+ // The delta to the start of the map check instruction and the
+ // condition code uses at the patched jump.
+ uint8_t delta = *reinterpret_cast<uint8_t*>(delta_address);
+ if (FLAG_trace_ic) {
+ PrintF("[ patching ic at %p, test=%p, delta=%d\n", address,
+ test_instruction_address, delta);
+ }
+
+ // Patch with a short conditional jump. Enabling means switching from a short
+ // jump-if-carry/not-carry to jump-if-zero/not-zero, whereas disabling is the
+ // reverse operation of that.
+ Address jmp_address = test_instruction_address - delta;
+ DCHECK((check == ENABLE_INLINED_SMI_CHECK)
+ ? (*jmp_address == Assembler::kJncShortOpcode ||
+ *jmp_address == Assembler::kJcShortOpcode)
+ : (*jmp_address == Assembler::kJnzShortOpcode ||
+ *jmp_address == Assembler::kJzShortOpcode));
+ Condition cc =
+ (check == ENABLE_INLINED_SMI_CHECK)
+ ? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero)
+ : (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry);
+ *jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc);
+}
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X64
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_X64
+
+#include "src/codegen.h"
+#include "src/ic/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
+ Code::Flags flags, StubCache::Table table,
+ Register receiver, Register name,
+ // The offset is scaled by 4, based on
+ // kCacheIndexShift, which is two bits
+ Register offset) {
+ // We need to scale up the pointer by 2 when the offset is scaled by less
+ // than the pointer size.
+ DCHECK(kPointerSize == kInt64Size
+ ? kPointerSizeLog2 == StubCache::kCacheIndexShift + 1
+ : kPointerSizeLog2 == StubCache::kCacheIndexShift);
+ ScaleFactor scale_factor = kPointerSize == kInt64Size ? times_2 : times_1;
+
+ DCHECK_EQ(3 * kPointerSize, sizeof(StubCache::Entry));
+ // The offset register holds the entry offset times four (due to masking
+ // and shifting optimizations).
+ ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
+ ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
+ Label miss;
+
+ // Multiply by 3 because there are 3 fields per entry (name, code, map).
+ __ leap(offset, Operand(offset, offset, times_2, 0));
+
+ __ LoadAddress(kScratchRegister, key_offset);
+
+ // Check that the key in the entry matches the name.
+ // Multiply entry offset by 16 to get the entry address. Since the
+ // offset register already holds the entry offset times four, multiply
+ // by a further four.
+ __ cmpl(name, Operand(kScratchRegister, offset, scale_factor, 0));
+ __ j(not_equal, &miss);
+
+ // Get the map entry from the cache.
+ // Use key_offset + kPointerSize * 2, rather than loading map_offset.
+ __ movp(kScratchRegister,
+ Operand(kScratchRegister, offset, scale_factor, kPointerSize * 2));
+ __ cmpp(kScratchRegister, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ j(not_equal, &miss);
+
+ // Get the code entry from the cache.
+ __ LoadAddress(kScratchRegister, value_offset);
+ __ movp(kScratchRegister, Operand(kScratchRegister, offset, scale_factor, 0));
+
+ // Check that the flags match what we're looking for.
+ __ movl(offset, FieldOperand(kScratchRegister, Code::kFlagsOffset));
+ __ andp(offset, Immediate(~Code::kFlagsNotUsedInLookup));
+ __ cmpl(offset, Immediate(flags));
+ __ j(not_equal, &miss);
+
+#ifdef DEBUG
+ if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+ __ jmp(&miss);
+ } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+ __ jmp(&miss);
+ }
+#endif
+
+ // Jump to the first instruction in the code stub.
+ __ addp(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(kScratchRegister);
+
+ __ bind(&miss);
+}
+
+
+void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
+ Register receiver, Register name,
+ Register scratch, Register extra, Register extra2,
+ Register extra3) {
+ Isolate* isolate = masm->isolate();
+ Label miss;
+ USE(extra); // The register extra is not used on the X64 platform.
+ USE(extra2); // The register extra2 is not used on the X64 platform.
+ USE(extra3); // The register extra2 is not used on the X64 platform.
+ // Make sure that code is valid. The multiplying code relies on the
+ // entry size being 3 * kPointerSize.
+ DCHECK(sizeof(Entry) == 3 * kPointerSize);
+
+ // Make sure the flags do not name a specific type.
+ DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
+
+ // Make sure that there are no register conflicts.
+ DCHECK(!scratch.is(receiver));
+ DCHECK(!scratch.is(name));
+
+ // Check scratch register is valid, extra and extra2 are unused.
+ DCHECK(!scratch.is(no_reg));
+ DCHECK(extra2.is(no_reg));
+ DCHECK(extra3.is(no_reg));
+
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &miss);
+
+ // Get the map of the receiver and compute the hash.
+ __ movl(scratch, FieldOperand(name, Name::kHashFieldOffset));
+ // Use only the low 32 bits of the map pointer.
+ __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ xorp(scratch, Immediate(flags));
+ // We mask out the last two bits because they are not part of the hash and
+ // they are always 01 for maps. Also in the two 'and' instructions below.
+ __ andp(scratch, Immediate((kPrimaryTableSize - 1) << kCacheIndexShift));
+
+ // Probe the primary table.
+ ProbeTable(isolate, masm, flags, kPrimary, receiver, name, scratch);
+
+ // Primary miss: Compute hash for secondary probe.
+ __ movl(scratch, FieldOperand(name, Name::kHashFieldOffset));
+ __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ xorp(scratch, Immediate(flags));
+ __ andp(scratch, Immediate((kPrimaryTableSize - 1) << kCacheIndexShift));
+ __ subl(scratch, name);
+ __ addl(scratch, Immediate(flags));
+ __ andp(scratch, Immediate((kSecondaryTableSize - 1) << kCacheIndexShift));
+
+ // Probe the secondary table.
+ ProbeTable(isolate, masm, flags, kSecondary, receiver, name, scratch);
+
+ // Cache miss: Fall-through and let caller handle the miss by
+ // entering the runtime system.
+ __ bind(&miss);
+ __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
+}
+
+
+#undef __
+}
+} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X64
#include "src/heap/sweeper-thread.h"
#include "src/heap-profiler.h"
#include "src/hydrogen.h"
+#include "src/ic/stub-cache.h"
#include "src/isolate-inl.h"
#include "src/lithium-allocator.h"
#include "src/log.h"
#include "src/scopeinfo.h"
#include "src/serialize.h"
#include "src/simulator.h"
-#include "src/stub-cache.h"
#include "src/version.h"
#include "src/vm-state-inl.h"
#include "src/scopeinfo.h"
#include "src/smart-pointers.h"
#include "src/string-search.h"
-#include "src/stub-cache.h"
#include "src/uri.h"
#include "src/utils.h"
#include "src/v8threads.h"
#include "src/deoptimizer.h"
#include "src/execution.h"
#include "src/global-handles.h"
-#include "src/ic-inl.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
#include "src/natives.h"
#include "src/objects.h"
#include "src/runtime.h"
#include "src/serialize.h"
#include "src/snapshot.h"
#include "src/snapshot-source-sink.h"
-#include "src/stub-cache.h"
#include "src/v8threads.h"
#include "src/version.h"
+++ /dev/null
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#include "src/api.h"
-#include "src/arguments.h"
-#include "src/ast.h"
-#include "src/code-stubs.h"
-#include "src/cpu-profiler.h"
-#include "src/gdb-jit.h"
-#include "src/ic-inl.h"
-#include "src/stub-cache.h"
-#include "src/type-info.h"
-#include "src/vm-state-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -----------------------------------------------------------------------
-// StubCache implementation.
-
-
-StubCache::StubCache(Isolate* isolate)
- : isolate_(isolate) { }
-
-
-void StubCache::Initialize() {
- DCHECK(IsPowerOf2(kPrimaryTableSize));
- DCHECK(IsPowerOf2(kSecondaryTableSize));
- Clear();
-}
-
-
-static Code::Flags CommonStubCacheChecks(Name* name, Map* map,
- Code::Flags flags) {
- flags = Code::RemoveTypeAndHolderFromFlags(flags);
-
- // Validate that the name does not move on scavenge, and that we
- // can use identity checks instead of structural equality checks.
- DCHECK(!name->GetHeap()->InNewSpace(name));
- DCHECK(name->IsUniqueName());
-
- // The state bits are not important to the hash function because the stub
- // cache only contains handlers. Make sure that the bits are the least
- // significant so they will be the ones masked out.
- DCHECK_EQ(Code::HANDLER, Code::ExtractKindFromFlags(flags));
- STATIC_ASSERT((Code::ICStateField::kMask & 1) == 1);
-
- // Make sure that the code type and cache holder are not included in the hash.
- DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
- DCHECK(Code::ExtractCacheHolderFromFlags(flags) == 0);
-
- return flags;
-}
-
-
-Code* StubCache::Set(Name* name, Map* map, Code* code) {
- Code::Flags flags = CommonStubCacheChecks(name, map, code->flags());
-
- // Compute the primary entry.
- int primary_offset = PrimaryOffset(name, flags, map);
- Entry* primary = entry(primary_, primary_offset);
- Code* old_code = primary->value;
-
- // If the primary entry has useful data in it, we retire it to the
- // secondary cache before overwriting it.
- if (old_code != isolate_->builtins()->builtin(Builtins::kIllegal)) {
- Map* old_map = primary->map;
- Code::Flags old_flags =
- Code::RemoveTypeAndHolderFromFlags(old_code->flags());
- int seed = PrimaryOffset(primary->key, old_flags, old_map);
- int secondary_offset = SecondaryOffset(primary->key, old_flags, seed);
- Entry* secondary = entry(secondary_, secondary_offset);
- *secondary = *primary;
- }
-
- // Update primary cache.
- primary->key = name;
- primary->value = code;
- primary->map = map;
- isolate()->counters()->megamorphic_stub_cache_updates()->Increment();
- return code;
-}
-
-
-Code* StubCache::Get(Name* name, Map* map, Code::Flags flags) {
- flags = CommonStubCacheChecks(name, map, flags);
- int primary_offset = PrimaryOffset(name, flags, map);
- Entry* primary = entry(primary_, primary_offset);
- if (primary->key == name && primary->map == map) {
- return primary->value;
- }
- int secondary_offset = SecondaryOffset(name, flags, primary_offset);
- Entry* secondary = entry(secondary_, secondary_offset);
- if (secondary->key == name && secondary->map == map) {
- return secondary->value;
- }
- return NULL;
-}
-
-
-Handle<Code> PropertyICCompiler::Find(Handle<Name> name,
- Handle<Map> stub_holder, Code::Kind kind,
- ExtraICState extra_state,
- CacheHolderFlag cache_holder) {
- Code::Flags flags = Code::ComputeMonomorphicFlags(
- kind, extra_state, cache_holder);
- Object* probe = stub_holder->FindInCodeCache(*name, flags);
- if (probe->IsCode()) return handle(Code::cast(probe));
- return Handle<Code>::null();
-}
-
-
-Handle<Code> PropertyHandlerCompiler::Find(Handle<Name> name,
- Handle<Map> stub_holder,
- Code::Kind kind,
- CacheHolderFlag cache_holder,
- Code::StubType type) {
- Code::Flags flags = Code::ComputeHandlerFlags(kind, type, cache_holder);
- Object* probe = stub_holder->FindInCodeCache(*name, flags);
- if (probe->IsCode()) return handle(Code::cast(probe));
- return Handle<Code>::null();
-}
-
-
-Handle<Code> PropertyICCompiler::ComputeMonomorphic(
- Code::Kind kind, Handle<Name> name, Handle<HeapType> type,
- Handle<Code> handler, ExtraICState extra_ic_state) {
- Isolate* isolate = name->GetIsolate();
- if (handler.is_identical_to(isolate->builtins()->LoadIC_Normal()) ||
- handler.is_identical_to(isolate->builtins()->StoreIC_Normal())) {
- name = isolate->factory()->normal_ic_symbol();
- }
-
- CacheHolderFlag flag;
- Handle<Map> stub_holder = IC::GetICCacheHolder(*type, isolate, &flag);
-
- Handle<Code> ic;
- // There are multiple string maps that all use the same prototype. That
- // prototype cannot hold multiple handlers, one for each of the string maps,
- // for a single name. Hence, turn off caching of the IC.
- bool can_be_cached = !type->Is(HeapType::String());
- if (can_be_cached) {
- ic = Find(name, stub_holder, kind, extra_ic_state, flag);
- if (!ic.is_null()) return ic;
- }
-
-#ifdef DEBUG
- if (kind == Code::KEYED_STORE_IC) {
- DCHECK(STANDARD_STORE ==
- KeyedStoreIC::GetKeyedAccessStoreMode(extra_ic_state));
- }
-#endif
-
- PropertyICCompiler ic_compiler(isolate, kind, extra_ic_state, flag);
- ic = ic_compiler.CompileMonomorphic(type, handler, name, PROPERTY);
-
- if (can_be_cached) Map::UpdateCodeCache(stub_holder, name, ic);
- return ic;
-}
-
-
-Handle<Code> NamedLoadHandlerCompiler::ComputeLoadNonexistent(
- Handle<Name> name, Handle<HeapType> type) {
- Isolate* isolate = name->GetIsolate();
- Handle<Map> receiver_map = IC::TypeToMap(*type, isolate);
- if (receiver_map->prototype()->IsNull()) {
- // TODO(jkummerow/verwaest): If there is no prototype and the property
- // is nonexistent, introduce a builtin to handle this (fast properties
- // -> return undefined, dictionary properties -> do negative lookup).
- return Handle<Code>();
- }
- CacheHolderFlag flag;
- Handle<Map> stub_holder_map =
- IC::GetHandlerCacheHolder(*type, false, isolate, &flag);
-
- // If no dictionary mode objects are present in the prototype chain, the load
- // nonexistent IC stub can be shared for all names for a given map and we use
- // the empty string for the map cache in that case. If there are dictionary
- // mode objects involved, we need to do negative lookups in the stub and
- // therefore the stub will be specific to the name.
- Handle<Name> cache_name =
- receiver_map->is_dictionary_map()
- ? name
- : Handle<Name>::cast(isolate->factory()->nonexistent_symbol());
- Handle<Map> current_map = stub_holder_map;
- Handle<JSObject> last(JSObject::cast(receiver_map->prototype()));
- while (true) {
- if (current_map->is_dictionary_map()) cache_name = name;
- if (current_map->prototype()->IsNull()) break;
- last = handle(JSObject::cast(current_map->prototype()));
- current_map = handle(last->map());
- }
- // Compile the stub that is either shared for all names or
- // name specific if there are global objects involved.
- Handle<Code> handler = PropertyHandlerCompiler::Find(
- cache_name, stub_holder_map, Code::LOAD_IC, flag, Code::FAST);
- if (!handler.is_null()) return handler;
-
- NamedLoadHandlerCompiler compiler(isolate, type, last, flag);
- handler = compiler.CompileLoadNonexistent(cache_name);
- Map::UpdateCodeCache(stub_holder_map, cache_name, handler);
- return handler;
-}
-
-
-Handle<Code> PropertyICCompiler::ComputeKeyedLoadMonomorphic(
- Handle<Map> receiver_map) {
- Isolate* isolate = receiver_map->GetIsolate();
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC);
- Handle<Name> name = isolate->factory()->KeyedLoadMonomorphic_string();
-
- Handle<Object> probe(receiver_map->FindInCodeCache(*name, flags), isolate);
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- ElementsKind elements_kind = receiver_map->elements_kind();
- Handle<Code> stub;
- if (receiver_map->has_fast_elements() ||
- receiver_map->has_external_array_elements() ||
- receiver_map->has_fixed_typed_array_elements()) {
- stub = LoadFastElementStub(isolate,
- receiver_map->instance_type() == JS_ARRAY_TYPE,
- elements_kind).GetCode();
- } else {
- stub = FLAG_compiled_keyed_dictionary_loads
- ? LoadDictionaryElementStub(isolate).GetCode()
- : LoadDictionaryElementPlatformStub(isolate).GetCode();
- }
- PropertyICCompiler compiler(isolate, Code::KEYED_LOAD_IC);
- Handle<Code> code =
- compiler.CompileMonomorphic(HeapType::Class(receiver_map, isolate), stub,
- isolate->factory()->empty_string(), ELEMENT);
-
- Map::UpdateCodeCache(receiver_map, name, code);
- return code;
-}
-
-
-Handle<Code> PropertyICCompiler::ComputeKeyedStoreMonomorphic(
- Handle<Map> receiver_map, StrictMode strict_mode,
- KeyedAccessStoreMode store_mode) {
- Isolate* isolate = receiver_map->GetIsolate();
- ExtraICState extra_state =
- KeyedStoreIC::ComputeExtraICState(strict_mode, store_mode);
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC, extra_state);
-
- DCHECK(store_mode == STANDARD_STORE ||
- store_mode == STORE_AND_GROW_NO_TRANSITION ||
- store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
- store_mode == STORE_NO_TRANSITION_HANDLE_COW);
-
- Handle<String> name = isolate->factory()->KeyedStoreMonomorphic_string();
- Handle<Object> probe(receiver_map->FindInCodeCache(*name, flags), isolate);
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- PropertyICCompiler compiler(isolate, Code::KEYED_STORE_IC, extra_state);
- Handle<Code> code =
- compiler.CompileKeyedStoreMonomorphic(receiver_map, store_mode);
-
- Map::UpdateCodeCache(receiver_map, name, code);
- DCHECK(KeyedStoreIC::GetKeyedAccessStoreMode(code->extra_ic_state())
- == store_mode);
- return code;
-}
-
-
-#define CALL_LOGGER_TAG(kind, type) (Logger::KEYED_##type)
-
-static void FillCache(Isolate* isolate, Handle<Code> code) {
- Handle<UnseededNumberDictionary> dictionary =
- UnseededNumberDictionary::Set(isolate->factory()->non_monomorphic_cache(),
- code->flags(),
- code);
- isolate->heap()->public_set_non_monomorphic_cache(*dictionary);
-}
-
-
-Code* PropertyICCompiler::FindPreMonomorphic(Isolate* isolate, Code::Kind kind,
- ExtraICState state) {
- Code::Flags flags = Code::ComputeFlags(kind, PREMONOMORPHIC, state);
- UnseededNumberDictionary* dictionary =
- isolate->heap()->non_monomorphic_cache();
- int entry = dictionary->FindEntry(isolate, flags);
- DCHECK(entry != -1);
- Object* code = dictionary->ValueAt(entry);
- // This might be called during the marking phase of the collector
- // hence the unchecked cast.
- return reinterpret_cast<Code*>(code);
-}
-
-
-Handle<Code> PropertyICCompiler::ComputeLoad(Isolate* isolate,
- InlineCacheState ic_state,
- ExtraICState extra_state) {
- Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC, ic_state, extra_state);
- Handle<UnseededNumberDictionary> cache =
- isolate->factory()->non_monomorphic_cache();
- int entry = cache->FindEntry(isolate, flags);
- if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
-
- PropertyICCompiler compiler(isolate, Code::LOAD_IC);
- Handle<Code> code;
- if (ic_state == UNINITIALIZED) {
- code = compiler.CompileLoadInitialize(flags);
- } else if (ic_state == PREMONOMORPHIC) {
- code = compiler.CompileLoadPreMonomorphic(flags);
- } else if (ic_state == MEGAMORPHIC) {
- code = compiler.CompileLoadMegamorphic(flags);
- } else {
- UNREACHABLE();
- }
- FillCache(isolate, code);
- return code;
-}
-
-
-Handle<Code> PropertyICCompiler::ComputeStore(Isolate* isolate,
- InlineCacheState ic_state,
- ExtraICState extra_state) {
- Code::Flags flags = Code::ComputeFlags(Code::STORE_IC, ic_state, extra_state);
- Handle<UnseededNumberDictionary> cache =
- isolate->factory()->non_monomorphic_cache();
- int entry = cache->FindEntry(isolate, flags);
- if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
-
- PropertyICCompiler compiler(isolate, Code::STORE_IC);
- Handle<Code> code;
- if (ic_state == UNINITIALIZED) {
- code = compiler.CompileStoreInitialize(flags);
- } else if (ic_state == PREMONOMORPHIC) {
- code = compiler.CompileStorePreMonomorphic(flags);
- } else if (ic_state == GENERIC) {
- code = compiler.CompileStoreGeneric(flags);
- } else if (ic_state == MEGAMORPHIC) {
- code = compiler.CompileStoreMegamorphic(flags);
- } else {
- UNREACHABLE();
- }
-
- FillCache(isolate, code);
- return code;
-}
-
-
-Handle<Code> PropertyICCompiler::ComputeCompareNil(Handle<Map> receiver_map,
- CompareNilICStub* stub) {
- Isolate* isolate = receiver_map->GetIsolate();
- Handle<String> name(isolate->heap()->empty_string());
- if (!receiver_map->is_dictionary_map()) {
- Handle<Code> cached_ic =
- Find(name, receiver_map, Code::COMPARE_NIL_IC, stub->GetExtraICState());
- if (!cached_ic.is_null()) return cached_ic;
- }
-
- Code::FindAndReplacePattern pattern;
- pattern.Add(isolate->factory()->meta_map(), receiver_map);
- Handle<Code> ic = stub->GetCodeCopy(pattern);
-
- if (!receiver_map->is_dictionary_map()) {
- Map::UpdateCodeCache(receiver_map, name, ic);
- }
-
- return ic;
-}
-
-
-// TODO(verwaest): Change this method so it takes in a TypeHandleList.
-Handle<Code> PropertyICCompiler::ComputeKeyedLoadPolymorphic(
- MapHandleList* receiver_maps) {
- Isolate* isolate = receiver_maps->at(0)->GetIsolate();
- Code::Flags flags = Code::ComputeFlags(Code::KEYED_LOAD_IC, POLYMORPHIC);
- Handle<PolymorphicCodeCache> cache =
- isolate->factory()->polymorphic_code_cache();
- Handle<Object> probe = cache->Lookup(receiver_maps, flags);
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- TypeHandleList types(receiver_maps->length());
- for (int i = 0; i < receiver_maps->length(); i++) {
- types.Add(HeapType::Class(receiver_maps->at(i), isolate));
- }
- CodeHandleList handlers(receiver_maps->length());
- ElementHandlerCompiler compiler(isolate);
- compiler.CompileElementHandlers(receiver_maps, &handlers);
- PropertyICCompiler ic_compiler(isolate, Code::KEYED_LOAD_IC);
- Handle<Code> code = ic_compiler.CompilePolymorphic(
- &types, &handlers, isolate->factory()->empty_string(), Code::NORMAL,
- ELEMENT);
-
- isolate->counters()->keyed_load_polymorphic_stubs()->Increment();
-
- PolymorphicCodeCache::Update(cache, receiver_maps, flags, code);
- return code;
-}
-
-
-Handle<Code> PropertyICCompiler::ComputePolymorphic(
- Code::Kind kind, TypeHandleList* types, CodeHandleList* handlers,
- int valid_types, Handle<Name> name, ExtraICState extra_ic_state) {
- Handle<Code> handler = handlers->at(0);
- Code::StubType type = valid_types == 1 ? handler->type() : Code::NORMAL;
- DCHECK(kind == Code::LOAD_IC || kind == Code::STORE_IC);
- PropertyICCompiler ic_compiler(name->GetIsolate(), kind, extra_ic_state);
- return ic_compiler.CompilePolymorphic(types, handlers, name, type, PROPERTY);
-}
-
-
-Handle<Code> PropertyICCompiler::ComputeKeyedStorePolymorphic(
- MapHandleList* receiver_maps, KeyedAccessStoreMode store_mode,
- StrictMode strict_mode) {
- Isolate* isolate = receiver_maps->at(0)->GetIsolate();
- DCHECK(store_mode == STANDARD_STORE ||
- store_mode == STORE_AND_GROW_NO_TRANSITION ||
- store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
- store_mode == STORE_NO_TRANSITION_HANDLE_COW);
- Handle<PolymorphicCodeCache> cache =
- isolate->factory()->polymorphic_code_cache();
- ExtraICState extra_state = KeyedStoreIC::ComputeExtraICState(
- strict_mode, store_mode);
- Code::Flags flags =
- Code::ComputeFlags(Code::KEYED_STORE_IC, POLYMORPHIC, extra_state);
- Handle<Object> probe = cache->Lookup(receiver_maps, flags);
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- PropertyICCompiler compiler(isolate, Code::KEYED_STORE_IC, extra_state);
- Handle<Code> code =
- compiler.CompileKeyedStorePolymorphic(receiver_maps, store_mode);
- PolymorphicCodeCache::Update(cache, receiver_maps, flags, code);
- return code;
-}
-
-
-void StubCache::Clear() {
- Code* empty = isolate_->builtins()->builtin(Builtins::kIllegal);
- for (int i = 0; i < kPrimaryTableSize; i++) {
- primary_[i].key = isolate()->heap()->empty_string();
- primary_[i].map = NULL;
- primary_[i].value = empty;
- }
- for (int j = 0; j < kSecondaryTableSize; j++) {
- secondary_[j].key = isolate()->heap()->empty_string();
- secondary_[j].map = NULL;
- secondary_[j].value = empty;
- }
-}
-
-
-void StubCache::CollectMatchingMaps(SmallMapList* types,
- Handle<Name> name,
- Code::Flags flags,
- Handle<Context> native_context,
- Zone* zone) {
- for (int i = 0; i < kPrimaryTableSize; i++) {
- if (primary_[i].key == *name) {
- Map* map = primary_[i].map;
- // Map can be NULL, if the stub is constant function call
- // with a primitive receiver.
- if (map == NULL) continue;
-
- int offset = PrimaryOffset(*name, flags, map);
- if (entry(primary_, offset) == &primary_[i] &&
- !TypeFeedbackOracle::CanRetainOtherContext(map, *native_context)) {
- types->AddMapIfMissing(Handle<Map>(map), zone);
- }
- }
- }
-
- for (int i = 0; i < kSecondaryTableSize; i++) {
- if (secondary_[i].key == *name) {
- Map* map = secondary_[i].map;
- // Map can be NULL, if the stub is constant function call
- // with a primitive receiver.
- if (map == NULL) continue;
-
- // Lookup in primary table and skip duplicates.
- int primary_offset = PrimaryOffset(*name, flags, map);
-
- // Lookup in secondary table and add matches.
- int offset = SecondaryOffset(*name, flags, primary_offset);
- if (entry(secondary_, offset) == &secondary_[i] &&
- !TypeFeedbackOracle::CanRetainOtherContext(map, *native_context)) {
- types->AddMapIfMissing(Handle<Map>(map), zone);
- }
- }
- }
-}
-
-
-// ------------------------------------------------------------------------
-// StubCompiler implementation.
-
-
-RUNTIME_FUNCTION(StoreCallbackProperty) {
- Handle<JSObject> receiver = args.at<JSObject>(0);
- Handle<JSObject> holder = args.at<JSObject>(1);
- Handle<ExecutableAccessorInfo> callback = args.at<ExecutableAccessorInfo>(2);
- Handle<Name> name = args.at<Name>(3);
- Handle<Object> value = args.at<Object>(4);
- HandleScope scope(isolate);
-
- DCHECK(callback->IsCompatibleReceiver(*receiver));
-
- Address setter_address = v8::ToCData<Address>(callback->setter());
- v8::AccessorNameSetterCallback fun =
- FUNCTION_CAST<v8::AccessorNameSetterCallback>(setter_address);
- DCHECK(fun != NULL);
-
- LOG(isolate, ApiNamedPropertyAccess("store", *receiver, *name));
- PropertyCallbackArguments custom_args(isolate, callback->data(), *receiver,
- *holder);
- custom_args.Call(fun, v8::Utils::ToLocal(name), v8::Utils::ToLocal(value));
- RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
- return *value;
-}
-
-
-/**
- * Attempts to load a property with an interceptor (which must be present),
- * but doesn't search the prototype chain.
- *
- * Returns |Heap::no_interceptor_result_sentinel()| if interceptor doesn't
- * provide any value for the given name.
- */
-RUNTIME_FUNCTION(LoadPropertyWithInterceptorOnly) {
- DCHECK(args.length() == NamedLoadHandlerCompiler::kInterceptorArgsLength);
- Handle<Name> name_handle =
- args.at<Name>(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex);
- Handle<InterceptorInfo> interceptor_info = args.at<InterceptorInfo>(
- NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex);
-
- // TODO(rossberg): Support symbols in the API.
- if (name_handle->IsSymbol())
- return isolate->heap()->no_interceptor_result_sentinel();
- Handle<String> name = Handle<String>::cast(name_handle);
-
- Address getter_address = v8::ToCData<Address>(interceptor_info->getter());
- v8::NamedPropertyGetterCallback getter =
- FUNCTION_CAST<v8::NamedPropertyGetterCallback>(getter_address);
- DCHECK(getter != NULL);
-
- Handle<JSObject> receiver =
- args.at<JSObject>(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex);
- Handle<JSObject> holder =
- args.at<JSObject>(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex);
- PropertyCallbackArguments callback_args(
- isolate, interceptor_info->data(), *receiver, *holder);
- {
- // Use the interceptor getter.
- HandleScope scope(isolate);
- v8::Handle<v8::Value> r =
- callback_args.Call(getter, v8::Utils::ToLocal(name));
- RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
- if (!r.IsEmpty()) {
- Handle<Object> result = v8::Utils::OpenHandle(*r);
- result->VerifyApiCallResultType();
- return *v8::Utils::OpenHandle(*r);
- }
- }
-
- return isolate->heap()->no_interceptor_result_sentinel();
-}
-
-
-static Object* ThrowReferenceError(Isolate* isolate, Name* name) {
- // If the load is non-contextual, just return the undefined result.
- // Note that both keyed and non-keyed loads may end up here.
- HandleScope scope(isolate);
- LoadIC ic(IC::NO_EXTRA_FRAME, isolate);
- if (ic.contextual_mode() != CONTEXTUAL) {
- return isolate->heap()->undefined_value();
- }
-
- // Throw a reference error.
- Handle<Name> name_handle(name);
- Handle<Object> error =
- isolate->factory()->NewReferenceError("not_defined",
- HandleVector(&name_handle, 1));
- return isolate->Throw(*error);
-}
-
-
-/**
- * Loads a property with an interceptor performing post interceptor
- * lookup if interceptor failed.
- */
-RUNTIME_FUNCTION(LoadPropertyWithInterceptor) {
- HandleScope scope(isolate);
- DCHECK(args.length() == NamedLoadHandlerCompiler::kInterceptorArgsLength);
- Handle<Name> name =
- args.at<Name>(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex);
- Handle<JSObject> receiver =
- args.at<JSObject>(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex);
- Handle<JSObject> holder =
- args.at<JSObject>(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex);
-
- Handle<Object> result;
- LookupIterator it(receiver, name, holder);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, JSObject::GetProperty(&it));
-
- if (it.IsFound()) return *result;
-
- return ThrowReferenceError(isolate, Name::cast(args[0]));
-}
-
-
-RUNTIME_FUNCTION(StorePropertyWithInterceptor) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 3);
- StoreIC ic(IC::NO_EXTRA_FRAME, isolate);
- Handle<JSObject> receiver = args.at<JSObject>(0);
- Handle<Name> name = args.at<Name>(1);
- Handle<Object> value = args.at<Object>(2);
-#ifdef DEBUG
- PrototypeIterator iter(isolate, receiver,
- PrototypeIterator::START_AT_RECEIVER);
- bool found = false;
- while (!iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN)) {
- Handle<Object> current = PrototypeIterator::GetCurrent(iter);
- if (current->IsJSObject() &&
- Handle<JSObject>::cast(current)->HasNamedInterceptor()) {
- found = true;
- break;
- }
- }
- DCHECK(found);
-#endif
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- JSObject::SetProperty(receiver, name, value, ic.strict_mode()));
- return *result;
-}
-
-
-RUNTIME_FUNCTION(LoadElementWithInterceptor) {
- HandleScope scope(isolate);
- Handle<JSObject> receiver = args.at<JSObject>(0);
- DCHECK(args.smi_at(1) >= 0);
- uint32_t index = args.smi_at(1);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- JSObject::GetElementWithInterceptor(receiver, receiver, index));
- return *result;
-}
-
-
-Handle<Code> PropertyICCompiler::CompileLoadInitialize(Code::Flags flags) {
- LoadIC::GenerateInitialize(masm());
- Handle<Code> code = GetCodeWithFlags(flags, "CompileLoadInitialize");
- PROFILE(isolate(),
- CodeCreateEvent(Logger::LOAD_INITIALIZE_TAG, *code, 0));
- return code;
-}
-
-
-Handle<Code> PropertyICCompiler::CompileLoadPreMonomorphic(Code::Flags flags) {
- LoadIC::GeneratePreMonomorphic(masm());
- Handle<Code> code = GetCodeWithFlags(flags, "CompileLoadPreMonomorphic");
- PROFILE(isolate(),
- CodeCreateEvent(Logger::LOAD_PREMONOMORPHIC_TAG, *code, 0));
- return code;
-}
-
-
-Handle<Code> PropertyICCompiler::CompileLoadMegamorphic(Code::Flags flags) {
- LoadIC::GenerateMegamorphic(masm());
- Handle<Code> code = GetCodeWithFlags(flags, "CompileLoadMegamorphic");
- PROFILE(isolate(),
- CodeCreateEvent(Logger::LOAD_MEGAMORPHIC_TAG, *code, 0));
- return code;
-}
-
-
-Handle<Code> PropertyICCompiler::CompileStoreInitialize(Code::Flags flags) {
- StoreIC::GenerateInitialize(masm());
- Handle<Code> code = GetCodeWithFlags(flags, "CompileStoreInitialize");
- PROFILE(isolate(),
- CodeCreateEvent(Logger::STORE_INITIALIZE_TAG, *code, 0));
- return code;
-}
-
-
-Handle<Code> PropertyICCompiler::CompileStorePreMonomorphic(Code::Flags flags) {
- StoreIC::GeneratePreMonomorphic(masm());
- Handle<Code> code = GetCodeWithFlags(flags, "CompileStorePreMonomorphic");
- PROFILE(isolate(),
- CodeCreateEvent(Logger::STORE_PREMONOMORPHIC_TAG, *code, 0));
- return code;
-}
-
-
-Handle<Code> PropertyICCompiler::CompileStoreGeneric(Code::Flags flags) {
- ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
- StrictMode strict_mode = StoreIC::GetStrictMode(extra_state);
- StoreIC::GenerateRuntimeSetProperty(masm(), strict_mode);
- Handle<Code> code = GetCodeWithFlags(flags, "CompileStoreGeneric");
- PROFILE(isolate(),
- CodeCreateEvent(Logger::STORE_GENERIC_TAG, *code, 0));
- return code;
-}
-
-
-Handle<Code> PropertyICCompiler::CompileStoreMegamorphic(Code::Flags flags) {
- StoreIC::GenerateMegamorphic(masm());
- Handle<Code> code = GetCodeWithFlags(flags, "CompileStoreMegamorphic");
- PROFILE(isolate(),
- CodeCreateEvent(Logger::STORE_MEGAMORPHIC_TAG, *code, 0));
- return code;
-}
-
-
-#undef CALL_LOGGER_TAG
-
-
-Handle<Code> PropertyAccessCompiler::GetCodeWithFlags(Code::Flags flags,
- const char* name) {
- // Create code object in the heap.
- CodeDesc desc;
- masm()->GetCode(&desc);
- Handle<Code> code = factory()->NewCode(desc, flags, masm()->CodeObject());
- if (code->IsCodeStubOrIC()) code->set_stub_key(CodeStub::NoCacheKey());
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_code_stubs) {
- OFStream os(stdout);
- code->Disassemble(name, os);
- }
-#endif
- return code;
-}
-
-
-Handle<Code> PropertyAccessCompiler::GetCodeWithFlags(Code::Flags flags,
- Handle<Name> name) {
- return (FLAG_print_code_stubs && !name.is_null() && name->IsString())
- ? GetCodeWithFlags(flags, Handle<String>::cast(name)->ToCString().get())
- : GetCodeWithFlags(flags, NULL);
-}
-
-
-#define __ ACCESS_MASM(masm())
-
-
-Register NamedLoadHandlerCompiler::FrontendHeader(Register object_reg,
- Handle<Name> name,
- Label* miss) {
- PrototypeCheckType check_type = CHECK_ALL_MAPS;
- int function_index = -1;
- if (type()->Is(HeapType::String())) {
- function_index = Context::STRING_FUNCTION_INDEX;
- } else if (type()->Is(HeapType::Symbol())) {
- function_index = Context::SYMBOL_FUNCTION_INDEX;
- } else if (type()->Is(HeapType::Number())) {
- function_index = Context::NUMBER_FUNCTION_INDEX;
- } else if (type()->Is(HeapType::Boolean())) {
- function_index = Context::BOOLEAN_FUNCTION_INDEX;
- } else {
- check_type = SKIP_RECEIVER;
- }
-
- if (check_type == CHECK_ALL_MAPS) {
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), function_index, scratch1(), miss);
- Object* function = isolate()->native_context()->get(function_index);
- Object* prototype = JSFunction::cast(function)->instance_prototype();
- set_type_for_object(handle(prototype, isolate()));
- object_reg = scratch1();
- }
-
- // Check that the maps starting from the prototype haven't changed.
- return CheckPrototypes(object_reg, scratch1(), scratch2(), scratch3(), name,
- miss, check_type);
-}
-
-
-// Frontend for store uses the name register. It has to be restored before a
-// miss.
-Register NamedStoreHandlerCompiler::FrontendHeader(Register object_reg,
- Handle<Name> name,
- Label* miss) {
- return CheckPrototypes(object_reg, this->name(), scratch1(), scratch2(), name,
- miss, SKIP_RECEIVER);
-}
-
-
-bool PropertyICCompiler::IncludesNumberType(TypeHandleList* types) {
- for (int i = 0; i < types->length(); ++i) {
- if (types->at(i)->Is(HeapType::Number())) return true;
- }
- return false;
-}
-
-
-Register PropertyHandlerCompiler::Frontend(Register object_reg,
- Handle<Name> name) {
- Label miss;
- Register reg = FrontendHeader(object_reg, name, &miss);
- FrontendFooter(name, &miss);
- return reg;
-}
-
-
-void PropertyHandlerCompiler::NonexistentFrontendHeader(Handle<Name> name,
- Label* miss,
- Register scratch1,
- Register scratch2) {
- Register holder_reg;
- Handle<Map> last_map;
- if (holder().is_null()) {
- holder_reg = receiver();
- last_map = IC::TypeToMap(*type(), isolate());
- // If |type| has null as its prototype, |holder()| is
- // Handle<JSObject>::null().
- DCHECK(last_map->prototype() == isolate()->heap()->null_value());
- } else {
- holder_reg = FrontendHeader(receiver(), name, miss);
- last_map = handle(holder()->map());
- }
-
- if (last_map->is_dictionary_map()) {
- if (last_map->IsJSGlobalObjectMap()) {
- Handle<JSGlobalObject> global =
- holder().is_null()
- ? Handle<JSGlobalObject>::cast(type()->AsConstant()->Value())
- : Handle<JSGlobalObject>::cast(holder());
- GenerateCheckPropertyCell(masm(), global, name, scratch1, miss);
- } else {
- if (!name->IsUniqueName()) {
- DCHECK(name->IsString());
- name = factory()->InternalizeString(Handle<String>::cast(name));
- }
- DCHECK(holder().is_null() ||
- holder()->property_dictionary()->FindEntry(name) ==
- NameDictionary::kNotFound);
- GenerateDictionaryNegativeLookup(masm(), miss, holder_reg, name, scratch1,
- scratch2);
- }
- }
-}
-
-
-Handle<Code> NamedLoadHandlerCompiler::CompileLoadField(Handle<Name> name,
- FieldIndex field) {
- Register reg = Frontend(receiver(), name);
- __ Move(receiver(), reg);
- LoadFieldStub stub(isolate(), field);
- GenerateTailCall(masm(), stub.GetCode());
- return GetCode(kind(), Code::FAST, name);
-}
-
-
-Handle<Code> NamedLoadHandlerCompiler::CompileLoadConstant(Handle<Name> name,
- int constant_index) {
- Register reg = Frontend(receiver(), name);
- __ Move(receiver(), reg);
- LoadConstantStub stub(isolate(), constant_index);
- GenerateTailCall(masm(), stub.GetCode());
- return GetCode(kind(), Code::FAST, name);
-}
-
-
-Handle<Code> NamedLoadHandlerCompiler::CompileLoadNonexistent(
- Handle<Name> name) {
- Label miss;
- NonexistentFrontendHeader(name, &miss, scratch2(), scratch3());
- GenerateLoadConstant(isolate()->factory()->undefined_value());
- FrontendFooter(name, &miss);
- return GetCode(kind(), Code::FAST, name);
-}
-
-
-Handle<Code> NamedLoadHandlerCompiler::CompileLoadCallback(
- Handle<Name> name, Handle<ExecutableAccessorInfo> callback) {
- Register reg = Frontend(receiver(), name);
- GenerateLoadCallback(reg, callback);
- return GetCode(kind(), Code::FAST, name);
-}
-
-
-Handle<Code> NamedLoadHandlerCompiler::CompileLoadCallback(
- Handle<Name> name, const CallOptimization& call_optimization) {
- DCHECK(call_optimization.is_simple_api_call());
- Frontend(receiver(), name);
- Handle<Map> receiver_map = IC::TypeToMap(*type(), isolate());
- GenerateFastApiCall(
- masm(), call_optimization, receiver_map,
- receiver(), scratch1(), false, 0, NULL);
- return GetCode(kind(), Code::FAST, name);
-}
-
-
-Handle<Code> NamedLoadHandlerCompiler::CompileLoadInterceptor(
- LookupIterator* it) {
- // So far the most popular follow ups for interceptor loads are FIELD and
- // ExecutableAccessorInfo, so inline only them. Other cases may be added
- // later.
- bool inline_followup = it->state() == LookupIterator::PROPERTY;
- if (inline_followup) {
- switch (it->property_kind()) {
- case LookupIterator::DATA:
- inline_followup = it->property_details().type() == FIELD;
- break;
- case LookupIterator::ACCESSOR: {
- Handle<Object> accessors = it->GetAccessors();
- inline_followup = accessors->IsExecutableAccessorInfo();
- if (!inline_followup) break;
- Handle<ExecutableAccessorInfo> info =
- Handle<ExecutableAccessorInfo>::cast(accessors);
- inline_followup = info->getter() != NULL &&
- ExecutableAccessorInfo::IsCompatibleReceiverType(
- isolate(), info, type());
- }
- }
- }
-
- Register reg = Frontend(receiver(), it->name());
- if (inline_followup) {
- // TODO(368): Compile in the whole chain: all the interceptors in
- // prototypes and ultimate answer.
- GenerateLoadInterceptorWithFollowup(it, reg);
- } else {
- GenerateLoadInterceptor(reg);
- }
- return GetCode(kind(), Code::FAST, it->name());
-}
-
-
-void NamedLoadHandlerCompiler::GenerateLoadPostInterceptor(
- LookupIterator* it, Register interceptor_reg) {
- Handle<JSObject> real_named_property_holder(it->GetHolder<JSObject>());
-
- set_type_for_object(holder());
- set_holder(real_named_property_holder);
- Register reg = Frontend(interceptor_reg, it->name());
-
- switch (it->property_kind()) {
- case LookupIterator::DATA: {
- DCHECK_EQ(FIELD, it->property_details().type());
- __ Move(receiver(), reg);
- LoadFieldStub stub(isolate(), it->GetFieldIndex());
- GenerateTailCall(masm(), stub.GetCode());
- break;
- }
- case LookupIterator::ACCESSOR:
- Handle<ExecutableAccessorInfo> info =
- Handle<ExecutableAccessorInfo>::cast(it->GetAccessors());
- DCHECK_NE(NULL, info->getter());
- GenerateLoadCallback(reg, info);
- }
-}
-
-
-Handle<Code> PropertyICCompiler::CompileMonomorphic(Handle<HeapType> type,
- Handle<Code> handler,
- Handle<Name> name,
- IcCheckType check) {
- TypeHandleList types(1);
- CodeHandleList handlers(1);
- types.Add(type);
- handlers.Add(handler);
- Code::StubType stub_type = handler->type();
- return CompilePolymorphic(&types, &handlers, name, stub_type, check);
-}
-
-
-Handle<Code> NamedLoadHandlerCompiler::CompileLoadViaGetter(
- Handle<Name> name, Handle<JSFunction> getter) {
- Frontend(receiver(), name);
- GenerateLoadViaGetter(masm(), type(), receiver(), getter);
- return GetCode(kind(), Code::FAST, name);
-}
-
-
-// TODO(verwaest): Cleanup. holder() is actually the receiver.
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
- Handle<Map> transition, Handle<Name> name) {
- Label miss, slow;
-
- // Ensure no transitions to deprecated maps are followed.
- __ CheckMapDeprecated(transition, scratch1(), &miss);
-
- // Check that we are allowed to write this.
- bool is_nonexistent = holder()->map() == transition->GetBackPointer();
- if (is_nonexistent) {
- // Find the top object.
- Handle<JSObject> last;
- PrototypeIterator iter(isolate(), holder());
- while (!iter.IsAtEnd()) {
- last = Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
- iter.Advance();
- }
- if (!last.is_null()) set_holder(last);
- NonexistentFrontendHeader(name, &miss, scratch1(), scratch2());
- } else {
- FrontendHeader(receiver(), name, &miss);
- DCHECK(holder()->HasFastProperties());
- }
-
- GenerateStoreTransition(transition, name, receiver(), this->name(), value(),
- scratch1(), scratch2(), scratch3(), &miss, &slow);
-
- GenerateRestoreName(&miss, name);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- GenerateRestoreName(&slow, name);
- TailCallBuiltin(masm(), SlowBuiltin(kind()));
- return GetCode(kind(), Code::FAST, name);
-}
-
-
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreField(LookupIterator* it) {
- Label miss;
- GenerateStoreField(it, value(), &miss);
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
- return GetCode(kind(), Code::FAST, it->name());
-}
-
-
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreViaSetter(
- Handle<JSObject> object, Handle<Name> name, Handle<JSFunction> setter) {
- Frontend(receiver(), name);
- GenerateStoreViaSetter(masm(), type(), receiver(), setter);
-
- return GetCode(kind(), Code::FAST, name);
-}
-
-
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
- Handle<JSObject> object, Handle<Name> name,
- const CallOptimization& call_optimization) {
- Frontend(receiver(), name);
- Register values[] = { value() };
- GenerateFastApiCall(
- masm(), call_optimization, handle(object->map()),
- receiver(), scratch1(), true, 1, values);
- return GetCode(kind(), Code::FAST, name);
-}
-
-
-Handle<Code> PropertyICCompiler::CompileKeyedStoreMonomorphic(
- Handle<Map> receiver_map, KeyedAccessStoreMode store_mode) {
- ElementsKind elements_kind = receiver_map->elements_kind();
- bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE;
- Handle<Code> stub;
- if (receiver_map->has_fast_elements() ||
- receiver_map->has_external_array_elements() ||
- receiver_map->has_fixed_typed_array_elements()) {
- stub = StoreFastElementStub(isolate(), is_jsarray, elements_kind,
- store_mode).GetCode();
- } else {
- stub = StoreElementStub(isolate(), is_jsarray, elements_kind, store_mode)
- .GetCode();
- }
-
- __ DispatchMap(receiver(), scratch1(), receiver_map, stub, DO_SMI_CHECK);
-
- TailCallBuiltin(masm(), Builtins::kKeyedStoreIC_Miss);
-
- return GetCode(kind(), Code::NORMAL, factory()->empty_string());
-}
-
-
-#undef __
-
-
-void PropertyAccessCompiler::TailCallBuiltin(MacroAssembler* masm,
- Builtins::Name name) {
- Handle<Code> code(masm->isolate()->builtins()->builtin(name));
- GenerateTailCall(masm, code);
-}
-
-
-Register* PropertyAccessCompiler::GetCallingConvention(Code::Kind kind) {
- if (kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC) {
- return load_calling_convention();
- }
- DCHECK(kind == Code::STORE_IC || kind == Code::KEYED_STORE_IC);
- return store_calling_convention();
-}
-
-
-Handle<Code> PropertyICCompiler::GetCode(Code::Kind kind, Code::StubType type,
- Handle<Name> name,
- InlineCacheState state) {
- Code::Flags flags =
- Code::ComputeFlags(kind, state, extra_ic_state_, type, cache_holder());
- Handle<Code> code = GetCodeWithFlags(flags, name);
- IC::RegisterWeakMapDependency(code);
- PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, *name));
- return code;
-}
-
-
-Handle<Code> PropertyHandlerCompiler::GetCode(Code::Kind kind,
- Code::StubType type,
- Handle<Name> name) {
- Code::Flags flags = Code::ComputeHandlerFlags(kind, type, cache_holder());
- Handle<Code> code = GetCodeWithFlags(flags, name);
- PROFILE(isolate(), CodeCreateEvent(Logger::STUB_TAG, *code, *name));
- return code;
-}
-
-
-void ElementHandlerCompiler::CompileElementHandlers(
- MapHandleList* receiver_maps, CodeHandleList* handlers) {
- for (int i = 0; i < receiver_maps->length(); ++i) {
- Handle<Map> receiver_map = receiver_maps->at(i);
- Handle<Code> cached_stub;
-
- if ((receiver_map->instance_type() & kNotStringTag) == 0) {
- cached_stub = isolate()->builtins()->KeyedLoadIC_String();
- } else if (receiver_map->instance_type() < FIRST_JS_RECEIVER_TYPE) {
- cached_stub = isolate()->builtins()->KeyedLoadIC_Slow();
- } else {
- bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
- ElementsKind elements_kind = receiver_map->elements_kind();
-
- if (IsFastElementsKind(elements_kind) ||
- IsExternalArrayElementsKind(elements_kind) ||
- IsFixedTypedArrayElementsKind(elements_kind)) {
- cached_stub = LoadFastElementStub(isolate(), is_js_array, elements_kind)
- .GetCode();
- } else if (elements_kind == SLOPPY_ARGUMENTS_ELEMENTS) {
- cached_stub = isolate()->builtins()->KeyedLoadIC_SloppyArguments();
- } else {
- DCHECK(elements_kind == DICTIONARY_ELEMENTS);
- cached_stub = LoadDictionaryElementStub(isolate()).GetCode();
- }
- }
-
- handlers->Add(cached_stub);
- }
-}
-
-
-Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
- MapHandleList* receiver_maps, KeyedAccessStoreMode store_mode) {
- // Collect MONOMORPHIC stubs for all |receiver_maps|.
- CodeHandleList handlers(receiver_maps->length());
- MapHandleList transitioned_maps(receiver_maps->length());
- for (int i = 0; i < receiver_maps->length(); ++i) {
- Handle<Map> receiver_map(receiver_maps->at(i));
- Handle<Code> cached_stub;
- Handle<Map> transitioned_map =
- receiver_map->FindTransitionedMap(receiver_maps);
-
- // TODO(mvstanton): The code below is doing pessimistic elements
- // transitions. I would like to stop doing that and rely on Allocation Site
- // Tracking to do a better job of ensuring the data types are what they need
- // to be. Not all the elements are in place yet, pessimistic elements
- // transitions are still important for performance.
- bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
- ElementsKind elements_kind = receiver_map->elements_kind();
- if (!transitioned_map.is_null()) {
- cached_stub =
- ElementsTransitionAndStoreStub(isolate(), elements_kind,
- transitioned_map->elements_kind(),
- is_js_array, store_mode).GetCode();
- } else if (receiver_map->instance_type() < FIRST_JS_RECEIVER_TYPE) {
- cached_stub = isolate()->builtins()->KeyedStoreIC_Slow();
- } else {
- if (receiver_map->has_fast_elements() ||
- receiver_map->has_external_array_elements() ||
- receiver_map->has_fixed_typed_array_elements()) {
- cached_stub = StoreFastElementStub(isolate(), is_js_array,
- elements_kind, store_mode).GetCode();
- } else {
- cached_stub = StoreElementStub(isolate(), is_js_array, elements_kind,
- store_mode).GetCode();
- }
- }
- DCHECK(!cached_stub.is_null());
- handlers.Add(cached_stub);
- transitioned_maps.Add(transitioned_map);
- }
-
- Handle<Code> code = CompileKeyedStorePolymorphic(receiver_maps, &handlers,
- &transitioned_maps);
- isolate()->counters()->keyed_store_polymorphic_stubs()->Increment();
- PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, 0));
- return code;
-}
-
-
-void ElementHandlerCompiler::GenerateStoreDictionaryElement(
- MacroAssembler* masm) {
- KeyedStoreIC::GenerateSlow(masm);
-}
-
-
-CallOptimization::CallOptimization(Handle<JSFunction> function) {
- Initialize(function);
-}
-
-
-Handle<JSObject> CallOptimization::LookupHolderOfExpectedType(
- Handle<Map> object_map,
- HolderLookup* holder_lookup) const {
- DCHECK(is_simple_api_call());
- if (!object_map->IsJSObjectMap()) {
- *holder_lookup = kHolderNotFound;
- return Handle<JSObject>::null();
- }
- if (expected_receiver_type_.is_null() ||
- expected_receiver_type_->IsTemplateFor(*object_map)) {
- *holder_lookup = kHolderIsReceiver;
- return Handle<JSObject>::null();
- }
- while (true) {
- if (!object_map->prototype()->IsJSObject()) break;
- Handle<JSObject> prototype(JSObject::cast(object_map->prototype()));
- if (!prototype->map()->is_hidden_prototype()) break;
- object_map = handle(prototype->map());
- if (expected_receiver_type_->IsTemplateFor(*object_map)) {
- *holder_lookup = kHolderFound;
- return prototype;
- }
- }
- *holder_lookup = kHolderNotFound;
- return Handle<JSObject>::null();
-}
-
-
-bool CallOptimization::IsCompatibleReceiver(Handle<Object> receiver,
- Handle<JSObject> holder) const {
- DCHECK(is_simple_api_call());
- if (!receiver->IsJSObject()) return false;
- Handle<Map> map(JSObject::cast(*receiver)->map());
- HolderLookup holder_lookup;
- Handle<JSObject> api_holder =
- LookupHolderOfExpectedType(map, &holder_lookup);
- switch (holder_lookup) {
- case kHolderNotFound:
- return false;
- case kHolderIsReceiver:
- return true;
- case kHolderFound:
- if (api_holder.is_identical_to(holder)) return true;
- // Check if holder is in prototype chain of api_holder.
- {
- JSObject* object = *api_holder;
- while (true) {
- Object* prototype = object->map()->prototype();
- if (!prototype->IsJSObject()) return false;
- if (prototype == *holder) return true;
- object = JSObject::cast(prototype);
- }
- }
- break;
- }
- UNREACHABLE();
- return false;
-}
-
-
-void CallOptimization::Initialize(Handle<JSFunction> function) {
- constant_function_ = Handle<JSFunction>::null();
- is_simple_api_call_ = false;
- expected_receiver_type_ = Handle<FunctionTemplateInfo>::null();
- api_call_info_ = Handle<CallHandlerInfo>::null();
-
- if (function.is_null() || !function->is_compiled()) return;
-
- constant_function_ = function;
- AnalyzePossibleApiFunction(function);
-}
-
-
-void CallOptimization::AnalyzePossibleApiFunction(Handle<JSFunction> function) {
- if (!function->shared()->IsApiFunction()) return;
- Handle<FunctionTemplateInfo> info(function->shared()->get_api_func_data());
-
- // Require a C++ callback.
- if (info->call_code()->IsUndefined()) return;
- api_call_info_ =
- Handle<CallHandlerInfo>(CallHandlerInfo::cast(info->call_code()));
-
- // Accept signatures that either have no restrictions at all or
- // only have restrictions on the receiver.
- if (!info->signature()->IsUndefined()) {
- Handle<SignatureInfo> signature =
- Handle<SignatureInfo>(SignatureInfo::cast(info->signature()));
- if (!signature->args()->IsUndefined()) return;
- if (!signature->receiver()->IsUndefined()) {
- expected_receiver_type_ =
- Handle<FunctionTemplateInfo>(
- FunctionTemplateInfo::cast(signature->receiver()));
- }
- }
-
- is_simple_api_call_ = true;
-}
-
-
-} } // namespace v8::internal
+++ /dev/null
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_STUB_CACHE_H_
-#define V8_STUB_CACHE_H_
-
-#include "src/allocation.h"
-#include "src/arguments.h"
-#include "src/code-stubs.h"
-#include "src/ic-inl.h"
-#include "src/macro-assembler.h"
-#include "src/objects.h"
-#include "src/zone-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-// The stub cache is used for megamorphic property accesses.
-// It maps (map, name, type) to property access handlers. The cache does not
-// need explicit invalidation when a prototype chain is modified, since the
-// handlers verify the chain.
-
-
-class CallOptimization;
-class SmallMapList;
-class StubCache;
-
-
-class SCTableReference {
- public:
- Address address() const { return address_; }
-
- private:
- explicit SCTableReference(Address address) : address_(address) {}
-
- Address address_;
-
- friend class StubCache;
-};
-
-
-class StubCache {
- public:
- struct Entry {
- Name* key;
- Code* value;
- Map* map;
- };
-
- void Initialize();
- // Access cache for entry hash(name, map).
- Code* Set(Name* name, Map* map, Code* code);
- Code* Get(Name* name, Map* map, Code::Flags flags);
- // Clear the lookup table (@ mark compact collection).
- void Clear();
- // Collect all maps that match the name and flags.
- void CollectMatchingMaps(SmallMapList* types,
- Handle<Name> name,
- Code::Flags flags,
- Handle<Context> native_context,
- Zone* zone);
- // Generate code for probing the stub cache table.
- // Arguments extra, extra2 and extra3 may be used to pass additional scratch
- // registers. Set to no_reg if not needed.
- void GenerateProbe(MacroAssembler* masm,
- Code::Flags flags,
- Register receiver,
- Register name,
- Register scratch,
- Register extra,
- Register extra2 = no_reg,
- Register extra3 = no_reg);
-
- enum Table {
- kPrimary,
- kSecondary
- };
-
- SCTableReference key_reference(StubCache::Table table) {
- return SCTableReference(
- reinterpret_cast<Address>(&first_entry(table)->key));
- }
-
- SCTableReference map_reference(StubCache::Table table) {
- return SCTableReference(
- reinterpret_cast<Address>(&first_entry(table)->map));
- }
-
- SCTableReference value_reference(StubCache::Table table) {
- return SCTableReference(
- reinterpret_cast<Address>(&first_entry(table)->value));
- }
-
- StubCache::Entry* first_entry(StubCache::Table table) {
- switch (table) {
- case StubCache::kPrimary: return StubCache::primary_;
- case StubCache::kSecondary: return StubCache::secondary_;
- }
- UNREACHABLE();
- return NULL;
- }
-
- Isolate* isolate() { return isolate_; }
-
- // Setting the entry size such that the index is shifted by Name::kHashShift
- // is convenient; shifting down the length field (to extract the hash code)
- // automatically discards the hash bit field.
- static const int kCacheIndexShift = Name::kHashShift;
-
- private:
- explicit StubCache(Isolate* isolate);
-
- // The stub cache has a primary and secondary level. The two levels have
- // different hashing algorithms in order to avoid simultaneous collisions
- // in both caches. Unlike a probing strategy (quadratic or otherwise) the
- // update strategy on updates is fairly clear and simple: Any existing entry
- // in the primary cache is moved to the secondary cache, and secondary cache
- // entries are overwritten.
-
- // Hash algorithm for the primary table. This algorithm is replicated in
- // assembler for every architecture. Returns an index into the table that
- // is scaled by 1 << kCacheIndexShift.
- static int PrimaryOffset(Name* name, Code::Flags flags, Map* map) {
- STATIC_ASSERT(kCacheIndexShift == Name::kHashShift);
- // Compute the hash of the name (use entire hash field).
- DCHECK(name->HasHashCode());
- uint32_t field = name->hash_field();
- // Using only the low bits in 64-bit mode is unlikely to increase the
- // risk of collision even if the heap is spread over an area larger than
- // 4Gb (and not at all if it isn't).
- uint32_t map_low32bits =
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map));
- // We always set the in_loop bit to zero when generating the lookup code
- // so do it here too so the hash codes match.
- uint32_t iflags =
- (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
- // Base the offset on a simple combination of name, flags, and map.
- uint32_t key = (map_low32bits + field) ^ iflags;
- return key & ((kPrimaryTableSize - 1) << kCacheIndexShift);
- }
-
- // Hash algorithm for the secondary table. This algorithm is replicated in
- // assembler for every architecture. Returns an index into the table that
- // is scaled by 1 << kCacheIndexShift.
- static int SecondaryOffset(Name* name, Code::Flags flags, int seed) {
- // Use the seed from the primary cache in the secondary cache.
- uint32_t name_low32bits =
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name));
- // We always set the in_loop bit to zero when generating the lookup code
- // so do it here too so the hash codes match.
- uint32_t iflags =
- (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
- uint32_t key = (seed - name_low32bits) + iflags;
- return key & ((kSecondaryTableSize - 1) << kCacheIndexShift);
- }
-
- // Compute the entry for a given offset in exactly the same way as
- // we do in generated code. We generate an hash code that already
- // ends in Name::kHashShift 0s. Then we multiply it so it is a multiple
- // of sizeof(Entry). This makes it easier to avoid making mistakes
- // in the hashed offset computations.
- static Entry* entry(Entry* table, int offset) {
- const int multiplier = sizeof(*table) >> Name::kHashShift;
- return reinterpret_cast<Entry*>(
- reinterpret_cast<Address>(table) + offset * multiplier);
- }
-
- static const int kPrimaryTableBits = 11;
- static const int kPrimaryTableSize = (1 << kPrimaryTableBits);
- static const int kSecondaryTableBits = 9;
- static const int kSecondaryTableSize = (1 << kSecondaryTableBits);
-
- Entry primary_[kPrimaryTableSize];
- Entry secondary_[kSecondaryTableSize];
- Isolate* isolate_;
-
- friend class Isolate;
- friend class SCTableReference;
-
- DISALLOW_COPY_AND_ASSIGN(StubCache);
-};
-
-
-// ------------------------------------------------------------------------
-
-
-// Support functions for IC stubs for callbacks.
-DECLARE_RUNTIME_FUNCTION(StoreCallbackProperty);
-
-
-// Support functions for IC stubs for interceptors.
-DECLARE_RUNTIME_FUNCTION(LoadPropertyWithInterceptorOnly);
-DECLARE_RUNTIME_FUNCTION(LoadPropertyWithInterceptor);
-DECLARE_RUNTIME_FUNCTION(LoadElementWithInterceptor);
-DECLARE_RUNTIME_FUNCTION(StorePropertyWithInterceptor);
-
-
-enum PrototypeCheckType { CHECK_ALL_MAPS, SKIP_RECEIVER };
-enum IcCheckType { ELEMENT, PROPERTY };
-
-
-class PropertyAccessCompiler BASE_EMBEDDED {
- public:
- static Builtins::Name MissBuiltin(Code::Kind kind) {
- switch (kind) {
- case Code::LOAD_IC:
- return Builtins::kLoadIC_Miss;
- case Code::STORE_IC:
- return Builtins::kStoreIC_Miss;
- case Code::KEYED_LOAD_IC:
- return Builtins::kKeyedLoadIC_Miss;
- case Code::KEYED_STORE_IC:
- return Builtins::kKeyedStoreIC_Miss;
- default:
- UNREACHABLE();
- }
- return Builtins::kLoadIC_Miss;
- }
-
- static void TailCallBuiltin(MacroAssembler* masm, Builtins::Name name);
-
- protected:
- PropertyAccessCompiler(Isolate* isolate, Code::Kind kind,
- CacheHolderFlag cache_holder)
- : registers_(GetCallingConvention(kind)),
- kind_(kind),
- cache_holder_(cache_holder),
- isolate_(isolate),
- masm_(isolate, NULL, 256) {}
-
- Code::Kind kind() const { return kind_; }
- CacheHolderFlag cache_holder() const { return cache_holder_; }
- MacroAssembler* masm() { return &masm_; }
- Isolate* isolate() const { return isolate_; }
- Heap* heap() const { return isolate()->heap(); }
- Factory* factory() const { return isolate()->factory(); }
-
- Register receiver() const { return registers_[0]; }
- Register name() const { return registers_[1]; }
- Register scratch1() const { return registers_[2]; }
- Register scratch2() const { return registers_[3]; }
- Register scratch3() const { return registers_[4]; }
-
- // Calling convention between indexed store IC and handler.
- Register transition_map() const { return scratch1(); }
-
- static Register* GetCallingConvention(Code::Kind);
- static Register* load_calling_convention();
- static Register* store_calling_convention();
- static Register* keyed_store_calling_convention();
-
- Register* registers_;
-
- static void GenerateTailCall(MacroAssembler* masm, Handle<Code> code);
-
- Handle<Code> GetCodeWithFlags(Code::Flags flags, const char* name);
- Handle<Code> GetCodeWithFlags(Code::Flags flags, Handle<Name> name);
-
- private:
- Code::Kind kind_;
- CacheHolderFlag cache_holder_;
-
- Isolate* isolate_;
- MacroAssembler masm_;
-};
-
-
-class PropertyICCompiler : public PropertyAccessCompiler {
- public:
- // Finds the Code object stored in the Heap::non_monomorphic_cache().
- static Code* FindPreMonomorphic(Isolate* isolate, Code::Kind kind,
- ExtraICState extra_ic_state);
-
- // Named
- static Handle<Code> ComputeLoad(Isolate* isolate, InlineCacheState ic_state,
- ExtraICState extra_state);
- static Handle<Code> ComputeStore(Isolate* isolate, InlineCacheState ic_state,
- ExtraICState extra_state);
-
- static Handle<Code> ComputeMonomorphic(Code::Kind kind, Handle<Name> name,
- Handle<HeapType> type,
- Handle<Code> handler,
- ExtraICState extra_ic_state);
- static Handle<Code> ComputePolymorphic(Code::Kind kind, TypeHandleList* types,
- CodeHandleList* handlers,
- int number_of_valid_maps,
- Handle<Name> name,
- ExtraICState extra_ic_state);
-
- // Keyed
- static Handle<Code> ComputeKeyedLoadMonomorphic(Handle<Map> receiver_map);
-
- static Handle<Code> ComputeKeyedStoreMonomorphic(
- Handle<Map> receiver_map, StrictMode strict_mode,
- KeyedAccessStoreMode store_mode);
- static Handle<Code> ComputeKeyedLoadPolymorphic(MapHandleList* receiver_maps);
- static Handle<Code> ComputeKeyedStorePolymorphic(
- MapHandleList* receiver_maps, KeyedAccessStoreMode store_mode,
- StrictMode strict_mode);
-
- // Compare nil
- static Handle<Code> ComputeCompareNil(Handle<Map> receiver_map,
- CompareNilICStub* stub);
-
-
- private:
- PropertyICCompiler(Isolate* isolate, Code::Kind kind,
- ExtraICState extra_ic_state = kNoExtraICState,
- CacheHolderFlag cache_holder = kCacheOnReceiver)
- : PropertyAccessCompiler(isolate, kind, cache_holder),
- extra_ic_state_(extra_ic_state) {}
-
- static Handle<Code> Find(Handle<Name> name, Handle<Map> stub_holder_map,
- Code::Kind kind,
- ExtraICState extra_ic_state = kNoExtraICState,
- CacheHolderFlag cache_holder = kCacheOnReceiver);
-
- Handle<Code> CompileLoadInitialize(Code::Flags flags);
- Handle<Code> CompileLoadPreMonomorphic(Code::Flags flags);
- Handle<Code> CompileLoadMegamorphic(Code::Flags flags);
- Handle<Code> CompileStoreInitialize(Code::Flags flags);
- Handle<Code> CompileStorePreMonomorphic(Code::Flags flags);
- Handle<Code> CompileStoreGeneric(Code::Flags flags);
- Handle<Code> CompileStoreMegamorphic(Code::Flags flags);
-
- Handle<Code> CompileMonomorphic(Handle<HeapType> type, Handle<Code> handler,
- Handle<Name> name, IcCheckType check);
- Handle<Code> CompilePolymorphic(TypeHandleList* types,
- CodeHandleList* handlers, Handle<Name> name,
- Code::StubType type, IcCheckType check);
-
- Handle<Code> CompileKeyedStoreMonomorphic(Handle<Map> receiver_map,
- KeyedAccessStoreMode store_mode);
- Handle<Code> CompileKeyedStorePolymorphic(MapHandleList* receiver_maps,
- KeyedAccessStoreMode store_mode);
- Handle<Code> CompileKeyedStorePolymorphic(MapHandleList* receiver_maps,
- CodeHandleList* handler_stubs,
- MapHandleList* transitioned_maps);
-
- bool IncludesNumberType(TypeHandleList* types);
-
- Handle<Code> GetCode(Code::Kind kind, Code::StubType type, Handle<Name> name,
- InlineCacheState state = MONOMORPHIC);
-
- Logger::LogEventsAndTags log_kind(Handle<Code> code) {
- if (kind() == Code::LOAD_IC) {
- return code->ic_state() == MONOMORPHIC ? Logger::LOAD_IC_TAG
- : Logger::LOAD_POLYMORPHIC_IC_TAG;
- } else if (kind() == Code::KEYED_LOAD_IC) {
- return code->ic_state() == MONOMORPHIC
- ? Logger::KEYED_LOAD_IC_TAG
- : Logger::KEYED_LOAD_POLYMORPHIC_IC_TAG;
- } else if (kind() == Code::STORE_IC) {
- return code->ic_state() == MONOMORPHIC ? Logger::STORE_IC_TAG
- : Logger::STORE_POLYMORPHIC_IC_TAG;
- } else {
- DCHECK_EQ(Code::KEYED_STORE_IC, kind());
- return code->ic_state() == MONOMORPHIC
- ? Logger::KEYED_STORE_IC_TAG
- : Logger::KEYED_STORE_POLYMORPHIC_IC_TAG;
- }
- }
-
- const ExtraICState extra_ic_state_;
-};
-
-
-class PropertyHandlerCompiler : public PropertyAccessCompiler {
- public:
- static Handle<Code> Find(Handle<Name> name, Handle<Map> map, Code::Kind kind,
- CacheHolderFlag cache_holder, Code::StubType type);
-
- protected:
- PropertyHandlerCompiler(Isolate* isolate, Code::Kind kind,
- Handle<HeapType> type, Handle<JSObject> holder,
- CacheHolderFlag cache_holder)
- : PropertyAccessCompiler(isolate, kind, cache_holder),
- type_(type),
- holder_(holder) {}
-
- virtual ~PropertyHandlerCompiler() {}
-
- virtual Register FrontendHeader(Register object_reg, Handle<Name> name,
- Label* miss) {
- UNREACHABLE();
- return receiver();
- }
-
- virtual void FrontendFooter(Handle<Name> name, Label* miss) { UNREACHABLE(); }
-
- Register Frontend(Register object_reg, Handle<Name> name);
- void NonexistentFrontendHeader(Handle<Name> name, Label* miss,
- Register scratch1, Register scratch2);
-
- // TODO(verwaest): Make non-static.
- static void GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- Handle<Map> receiver_map, Register receiver,
- Register scratch, bool is_store, int argc,
- Register* values);
-
- // Helper function used to check that the dictionary doesn't contain
- // the property. This function may return false negatives, so miss_label
- // must always call a backup property check that is complete.
- // This function is safe to call if the receiver has fast properties.
- // Name must be unique and receiver must be a heap object.
- static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
- Label* miss_label,
- Register receiver,
- Handle<Name> name,
- Register r0,
- Register r1);
-
- // Generate code to check that a global property cell is empty. Create
- // the property cell at compilation time if no cell exists for the
- // property.
- static void GenerateCheckPropertyCell(MacroAssembler* masm,
- Handle<JSGlobalObject> global,
- Handle<Name> name,
- Register scratch,
- Label* miss);
-
- // Generates code that verifies that the property holder has not changed
- // (checking maps of objects in the prototype chain for fast and global
- // objects or doing negative lookup for slow objects, ensures that the
- // property cells for global objects are still empty) and checks that the map
- // of the holder has not changed. If necessary the function also generates
- // code for security check in case of global object holders. Helps to make
- // sure that the current IC is still valid.
- //
- // The scratch and holder registers are always clobbered, but the object
- // register is only clobbered if it the same as the holder register. The
- // function returns a register containing the holder - either object_reg or
- // holder_reg.
- Register CheckPrototypes(Register object_reg, Register holder_reg,
- Register scratch1, Register scratch2,
- Handle<Name> name, Label* miss,
- PrototypeCheckType check = CHECK_ALL_MAPS);
-
- Handle<Code> GetCode(Code::Kind kind, Code::StubType type, Handle<Name> name);
- void set_type_for_object(Handle<Object> object) {
- type_ = IC::CurrentTypeOf(object, isolate());
- }
- void set_holder(Handle<JSObject> holder) { holder_ = holder; }
- Handle<HeapType> type() const { return type_; }
- Handle<JSObject> holder() const { return holder_; }
-
- private:
- Handle<HeapType> type_;
- Handle<JSObject> holder_;
-};
-
-
-class NamedLoadHandlerCompiler : public PropertyHandlerCompiler {
- public:
- NamedLoadHandlerCompiler(Isolate* isolate, Handle<HeapType> type,
- Handle<JSObject> holder,
- CacheHolderFlag cache_holder)
- : PropertyHandlerCompiler(isolate, Code::LOAD_IC, type, holder,
- cache_holder) {}
-
- virtual ~NamedLoadHandlerCompiler() {}
-
- Handle<Code> CompileLoadField(Handle<Name> name, FieldIndex index);
-
- Handle<Code> CompileLoadCallback(Handle<Name> name,
- Handle<ExecutableAccessorInfo> callback);
-
- Handle<Code> CompileLoadCallback(Handle<Name> name,
- const CallOptimization& call_optimization);
-
- Handle<Code> CompileLoadConstant(Handle<Name> name, int constant_index);
-
- // The LookupIterator is used to perform a lookup behind the interceptor. If
- // the iterator points to a LookupIterator::PROPERTY, its access will be
- // inlined.
- Handle<Code> CompileLoadInterceptor(LookupIterator* it);
-
- Handle<Code> CompileLoadViaGetter(Handle<Name> name,
- Handle<JSFunction> getter);
-
- Handle<Code> CompileLoadGlobal(Handle<PropertyCell> cell, Handle<Name> name,
- bool is_configurable);
-
- // Static interface
- static Handle<Code> ComputeLoadNonexistent(Handle<Name> name,
- Handle<HeapType> type);
-
- static void GenerateLoadViaGetter(MacroAssembler* masm, Handle<HeapType> type,
- Register receiver,
- Handle<JSFunction> getter);
-
- static void GenerateLoadViaGetterForDeopt(MacroAssembler* masm) {
- GenerateLoadViaGetter(masm, Handle<HeapType>::null(), no_reg,
- Handle<JSFunction>());
- }
-
- static void GenerateLoadFunctionPrototype(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss_label);
-
- // These constants describe the structure of the interceptor arguments on the
- // stack. The arguments are pushed by the (platform-specific)
- // PushInterceptorArguments and read by LoadPropertyWithInterceptorOnly and
- // LoadWithInterceptor.
- static const int kInterceptorArgsNameIndex = 0;
- static const int kInterceptorArgsInfoIndex = 1;
- static const int kInterceptorArgsThisIndex = 2;
- static const int kInterceptorArgsHolderIndex = 3;
- static const int kInterceptorArgsLength = 4;
-
- protected:
- virtual Register FrontendHeader(Register object_reg, Handle<Name> name,
- Label* miss);
-
- virtual void FrontendFooter(Handle<Name> name, Label* miss);
-
- private:
- Handle<Code> CompileLoadNonexistent(Handle<Name> name);
- void GenerateLoadConstant(Handle<Object> value);
- void GenerateLoadCallback(Register reg,
- Handle<ExecutableAccessorInfo> callback);
- void GenerateLoadCallback(const CallOptimization& call_optimization,
- Handle<Map> receiver_map);
- void GenerateLoadInterceptor(Register holder_reg);
- void GenerateLoadInterceptorWithFollowup(LookupIterator* it,
- Register holder_reg);
- void GenerateLoadPostInterceptor(LookupIterator* it, Register reg);
-
- // Generates prototype loading code that uses the objects from the
- // context we were in when this function was called. If the context
- // has changed, a jump to miss is performed. This ties the generated
- // code to a particular context and so must not be used in cases
- // where the generated code is not allowed to have references to
- // objects from a context.
- static void GenerateDirectLoadGlobalFunctionPrototype(MacroAssembler* masm,
- int index,
- Register prototype,
- Label* miss);
-
-
- Register scratch4() { return registers_[5]; }
-};
-
-
-class NamedStoreHandlerCompiler : public PropertyHandlerCompiler {
- public:
- explicit NamedStoreHandlerCompiler(Isolate* isolate, Handle<HeapType> type,
- Handle<JSObject> holder)
- : PropertyHandlerCompiler(isolate, Code::STORE_IC, type, holder,
- kCacheOnReceiver) {}
-
- virtual ~NamedStoreHandlerCompiler() {}
-
- Handle<Code> CompileStoreTransition(Handle<Map> transition,
- Handle<Name> name);
- Handle<Code> CompileStoreField(LookupIterator* it);
- Handle<Code> CompileStoreCallback(Handle<JSObject> object, Handle<Name> name,
- Handle<ExecutableAccessorInfo> callback);
- Handle<Code> CompileStoreCallback(Handle<JSObject> object, Handle<Name> name,
- const CallOptimization& call_optimization);
- Handle<Code> CompileStoreViaSetter(Handle<JSObject> object, Handle<Name> name,
- Handle<JSFunction> setter);
- Handle<Code> CompileStoreInterceptor(Handle<Name> name);
-
- static void GenerateStoreViaSetter(MacroAssembler* masm,
- Handle<HeapType> type, Register receiver,
- Handle<JSFunction> setter);
-
- static void GenerateStoreViaSetterForDeopt(MacroAssembler* masm) {
- GenerateStoreViaSetter(masm, Handle<HeapType>::null(), no_reg,
- Handle<JSFunction>());
- }
-
- protected:
- virtual Register FrontendHeader(Register object_reg, Handle<Name> name,
- Label* miss);
-
- virtual void FrontendFooter(Handle<Name> name, Label* miss);
- void GenerateRestoreName(Label* label, Handle<Name> name);
-
- private:
- void GenerateStoreTransition(Handle<Map> transition, Handle<Name> name,
- Register receiver_reg, Register name_reg,
- Register value_reg, Register scratch1,
- Register scratch2, Register scratch3,
- Label* miss_label, Label* slow);
-
- void GenerateStoreField(LookupIterator* lookup, Register value_reg,
- Label* miss_label);
-
- static Builtins::Name SlowBuiltin(Code::Kind kind) {
- switch (kind) {
- case Code::STORE_IC: return Builtins::kStoreIC_Slow;
- case Code::KEYED_STORE_IC: return Builtins::kKeyedStoreIC_Slow;
- default: UNREACHABLE();
- }
- return Builtins::kStoreIC_Slow;
- }
-
- static Register value();
-};
-
-
-class ElementHandlerCompiler : public PropertyHandlerCompiler {
- public:
- explicit ElementHandlerCompiler(Isolate* isolate)
- : PropertyHandlerCompiler(isolate, Code::KEYED_LOAD_IC,
- Handle<HeapType>::null(),
- Handle<JSObject>::null(), kCacheOnReceiver) {}
-
- virtual ~ElementHandlerCompiler() {}
-
- void CompileElementHandlers(MapHandleList* receiver_maps,
- CodeHandleList* handlers);
-
- static void GenerateLoadDictionaryElement(MacroAssembler* masm);
- static void GenerateStoreDictionaryElement(MacroAssembler* masm);
-};
-
-
-// Holds information about possible function call optimizations.
-class CallOptimization BASE_EMBEDDED {
- public:
- explicit CallOptimization(Handle<JSFunction> function);
-
- bool is_constant_call() const {
- return !constant_function_.is_null();
- }
-
- Handle<JSFunction> constant_function() const {
- DCHECK(is_constant_call());
- return constant_function_;
- }
-
- bool is_simple_api_call() const {
- return is_simple_api_call_;
- }
-
- Handle<FunctionTemplateInfo> expected_receiver_type() const {
- DCHECK(is_simple_api_call());
- return expected_receiver_type_;
- }
-
- Handle<CallHandlerInfo> api_call_info() const {
- DCHECK(is_simple_api_call());
- return api_call_info_;
- }
-
- enum HolderLookup {
- kHolderNotFound,
- kHolderIsReceiver,
- kHolderFound
- };
- Handle<JSObject> LookupHolderOfExpectedType(
- Handle<Map> receiver_map,
- HolderLookup* holder_lookup) const;
-
- // Check if the api holder is between the receiver and the holder.
- bool IsCompatibleReceiver(Handle<Object> receiver,
- Handle<JSObject> holder) const;
-
- private:
- void Initialize(Handle<JSFunction> function);
-
- // Determines whether the given function can be called using the
- // fast api call builtin.
- void AnalyzePossibleApiFunction(Handle<JSFunction> function);
-
- Handle<JSFunction> constant_function_;
- bool is_simple_api_call_;
- Handle<FunctionTemplateInfo> expected_receiver_type_;
- Handle<CallHandlerInfo> api_call_info_;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_STUB_CACHE_H_
#include "src/ast.h"
#include "src/code-stubs.h"
#include "src/compiler.h"
-#include "src/ic.h"
+#include "src/ic/stub-cache.h"
#include "src/macro-assembler.h"
-#include "src/stub-cache.h"
#include "src/type-info.h"
-#include "src/ic-inl.h"
#include "src/objects-inl.h"
namespace v8 {
#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/full-codegen.h"
-#include "src/stub-cache.h"
namespace v8 {
namespace internal {
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
+#include "src/codegen.h"
+#include "src/ic/ic-compiler.h"
+#include "src/isolate.h"
+#include "src/jsregexp.h"
#include "src/regexp-macro-assembler.h"
#include "src/runtime.h"
-#include "src/stub-cache.h"
namespace v8 {
namespace internal {
#ifndef V8_X64_CODE_STUBS_X64_H_
#define V8_X64_CODE_STUBS_X64_H_
-#include "src/ic-inl.h"
+#include "src/code-stubs.h"
namespace v8 {
namespace internal {
#define V8_X64_CODEGEN_X64_H_
#include "src/ast.h"
-#include "src/ic-inl.h"
+#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
#include "src/isolate-inl.h"
#include "src/parser.h"
#include "src/scopes.h"
-#include "src/stub-cache.h"
namespace v8 {
namespace internal {
+++ /dev/null
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#if V8_TARGET_ARCH_X64
-
-#include "src/codegen.h"
-#include "src/ic-inl.h"
-#include "src/runtime.h"
-#include "src/stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-// ----------------------------------------------------------------------------
-// Static IC stub generators.
-//
-
-#define __ ACCESS_MASM(masm)
-
-
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
- Register type,
- Label* global_object) {
- // Register usage:
- // type: holds the receiver instance type on entry.
- __ cmpb(type, Immediate(JS_GLOBAL_OBJECT_TYPE));
- __ j(equal, global_object);
- __ cmpb(type, Immediate(JS_BUILTINS_OBJECT_TYPE));
- __ j(equal, global_object);
- __ cmpb(type, Immediate(JS_GLOBAL_PROXY_TYPE));
- __ j(equal, global_object);
-}
-
-
-// Helper function used to load a property from a dictionary backing storage.
-// This function may return false negatives, so miss_label
-// must always call a backup property load that is complete.
-// This function is safe to call if name is not an internalized string,
-// and will jump to the miss_label in that case.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm,
- Label* miss_label,
- Register elements,
- Register name,
- Register r0,
- Register r1,
- Register result) {
- // Register use:
- //
- // elements - holds the property dictionary on entry and is unchanged.
- //
- // name - holds the name of the property on entry and is unchanged.
- //
- // r0 - used to hold the capacity of the property dictionary.
- //
- // r1 - used to hold the index into the property dictionary.
- //
- // result - holds the result on exit if the load succeeded.
-
- Label done;
-
- // Probe the dictionary.
- NameDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss_label,
- &done,
- elements,
- name,
- r0,
- r1);
-
- // If probing finds an entry in the dictionary, r1 contains the
- // index into the dictionary. Check that the value is a normal
- // property.
- __ bind(&done);
- const int kElementsStartOffset =
- NameDictionary::kHeaderSize +
- NameDictionary::kElementsStartIndex * kPointerSize;
- const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- __ Test(Operand(elements, r1, times_pointer_size,
- kDetailsOffset - kHeapObjectTag),
- Smi::FromInt(PropertyDetails::TypeField::kMask));
- __ j(not_zero, miss_label);
-
- // Get the value at the masked, scaled index.
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ movp(result,
- Operand(elements, r1, times_pointer_size,
- kValueOffset - kHeapObjectTag));
-}
-
-
-// Helper function used to store a property to a dictionary backing
-// storage. This function may fail to store a property even though it
-// is in the dictionary, so code at miss_label must always call a
-// backup property store that is complete. This function is safe to
-// call if name is not an internalized string, and will jump to the miss_label
-// in that case. The generated code assumes that the receiver has slow
-// properties, is not a global object and does not have interceptors.
-static void GenerateDictionaryStore(MacroAssembler* masm,
- Label* miss_label,
- Register elements,
- Register name,
- Register value,
- Register scratch0,
- Register scratch1) {
- // Register use:
- //
- // elements - holds the property dictionary on entry and is clobbered.
- //
- // name - holds the name of the property on entry and is unchanged.
- //
- // value - holds the value to store and is unchanged.
- //
- // scratch0 - used during the positive dictionary lookup and is clobbered.
- //
- // scratch1 - used for index into the property dictionary and is clobbered.
- Label done;
-
- // Probe the dictionary.
- NameDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss_label,
- &done,
- elements,
- name,
- scratch0,
- scratch1);
-
- // If probing finds an entry in the dictionary, scratch0 contains the
- // index into the dictionary. Check that the value is a normal
- // property that is not read only.
- __ bind(&done);
- const int kElementsStartOffset =
- NameDictionary::kHeaderSize +
- NameDictionary::kElementsStartIndex * kPointerSize;
- const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- const int kTypeAndReadOnlyMask =
- (PropertyDetails::TypeField::kMask |
- PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
- __ Test(Operand(elements,
- scratch1,
- times_pointer_size,
- kDetailsOffset - kHeapObjectTag),
- Smi::FromInt(kTypeAndReadOnlyMask));
- __ j(not_zero, miss_label);
-
- // Store the value at the masked, scaled index.
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ leap(scratch1, Operand(elements,
- scratch1,
- times_pointer_size,
- kValueOffset - kHeapObjectTag));
- __ movp(Operand(scratch1, 0), value);
-
- // Update write barrier. Make sure not to clobber the value.
- __ movp(scratch0, value);
- __ RecordWrite(elements, scratch1, scratch0, kDontSaveFPRegs);
-}
-
-
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
- Register receiver,
- Register map,
- int interceptor_bit,
- Label* slow) {
- // Register use:
- // receiver - holds the receiver and is unchanged.
- // Scratch registers:
- // map - used to hold the map of the receiver.
-
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, slow);
-
- // Check that the object is some kind of JS object EXCEPT JS Value type.
- // In the case that the object is a value-wrapper object,
- // we enter the runtime system to make sure that indexing
- // into string objects work as intended.
- DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
- __ CmpObjectType(receiver, JS_OBJECT_TYPE, map);
- __ j(below, slow);
-
- // Check bit field.
- __ testb(FieldOperand(map, Map::kBitFieldOffset),
- Immediate((1 << Map::kIsAccessCheckNeeded) |
- (1 << interceptor_bit)));
- __ j(not_zero, slow);
-}
-
-
-// Loads an indexed element from a fast case array.
-// If not_fast_array is NULL, doesn't perform the elements map check.
-static void GenerateFastArrayLoad(MacroAssembler* masm,
- Register receiver,
- Register key,
- Register elements,
- Register scratch,
- Register result,
- Label* not_fast_array,
- Label* out_of_range) {
- // Register use:
- //
- // receiver - holds the receiver on entry.
- // Unchanged unless 'result' is the same register.
- //
- // key - holds the smi key on entry.
- // Unchanged unless 'result' is the same register.
- //
- // elements - holds the elements of the receiver on exit.
- //
- // result - holds the result on exit if the load succeeded.
- // Allowed to be the the same as 'receiver' or 'key'.
- // Unchanged on bailout so 'receiver' and 'key' can be safely
- // used by further computation.
- //
- // Scratch registers:
- //
- // scratch - used to hold elements of the receiver and the loaded value.
-
- __ movp(elements, FieldOperand(receiver, JSObject::kElementsOffset));
- if (not_fast_array != NULL) {
- // Check that the object is in fast mode and writable.
- __ CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
- Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, not_fast_array);
- } else {
- __ AssertFastElements(elements);
- }
- // Check that the key (index) is within bounds.
- __ SmiCompare(key, FieldOperand(elements, FixedArray::kLengthOffset));
- // Unsigned comparison rejects negative indices.
- __ j(above_equal, out_of_range);
- // Fast case: Do the load.
- SmiIndex index = masm->SmiToIndex(scratch, key, kPointerSizeLog2);
- __ movp(scratch, FieldOperand(elements,
- index.reg,
- index.scale,
- FixedArray::kHeaderSize));
- __ CompareRoot(scratch, Heap::kTheHoleValueRootIndex);
- // In case the loaded value is the_hole we have to consult GetProperty
- // to ensure the prototype chain is searched.
- __ j(equal, out_of_range);
- if (!result.is(scratch)) {
- __ movp(result, scratch);
- }
-}
-
-
-// Checks whether a key is an array index string or a unique name.
-// Falls through if the key is a unique name.
-static void GenerateKeyNameCheck(MacroAssembler* masm,
- Register key,
- Register map,
- Register hash,
- Label* index_string,
- Label* not_unique) {
- // Register use:
- // key - holds the key and is unchanged. Assumed to be non-smi.
- // Scratch registers:
- // map - used to hold the map of the key.
- // hash - used to hold the hash of the key.
- Label unique;
- __ CmpObjectType(key, LAST_UNIQUE_NAME_TYPE, map);
- __ j(above, not_unique);
- STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
- __ j(equal, &unique);
-
- // Is the string an array index, with cached numeric value?
- __ movl(hash, FieldOperand(key, Name::kHashFieldOffset));
- __ testl(hash, Immediate(Name::kContainsCachedArrayIndexMask));
- __ j(zero, index_string); // The value in hash is used at jump target.
-
- // Is the string internalized? We already know it's a string so a single
- // bit test is enough.
- STATIC_ASSERT(kNotInternalizedTag != 0);
- __ testb(FieldOperand(map, Map::kInstanceTypeOffset),
- Immediate(kIsNotInternalizedMask));
- __ j(not_zero, not_unique);
-
- __ bind(&unique);
-}
-
-
-
-void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
- // The return address is on the stack.
- Label slow, check_name, index_smi, index_name, property_array_property;
- Label probe_dictionary, check_number_dictionary;
-
- Register receiver = ReceiverRegister();
- Register key = NameRegister();
- DCHECK(receiver.is(rdx));
- DCHECK(key.is(rcx));
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, &check_name);
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from below
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(
- masm, receiver, rax, Map::kHasIndexedInterceptor, &slow);
-
- // Check the receiver's map to see if it has fast elements.
- __ CheckFastElements(rax, &check_number_dictionary);
-
- GenerateFastArrayLoad(masm,
- receiver,
- key,
- rax,
- rbx,
- rax,
- NULL,
- &slow);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->keyed_load_generic_smi(), 1);
- __ ret(0);
-
- __ bind(&check_number_dictionary);
- __ SmiToInteger32(rbx, key);
- __ movp(rax, FieldOperand(receiver, JSObject::kElementsOffset));
-
- // Check whether the elements is a number dictionary.
- // rbx: key as untagged int32
- // rax: elements
- __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(not_equal, &slow);
- __ LoadFromNumberDictionary(&slow, rax, key, rbx, r9, rdi, rax);
- __ ret(0);
-
- __ bind(&slow);
- // Slow case: Jump to runtime.
- __ IncrementCounter(counters->keyed_load_generic_slow(), 1);
- GenerateRuntimeGetProperty(masm);
-
- __ bind(&check_name);
- GenerateKeyNameCheck(masm, key, rax, rbx, &index_name, &slow);
-
- GenerateKeyedLoadReceiverCheck(
- masm, receiver, rax, Map::kHasNamedInterceptor, &slow);
-
- // If the receiver is a fast-case object, check the keyed lookup
- // cache. Otherwise probe the dictionary leaving result in key.
- __ movp(rbx, FieldOperand(receiver, JSObject::kPropertiesOffset));
- __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(equal, &probe_dictionary);
-
- // Load the map of the receiver, compute the keyed lookup cache hash
- // based on 32 bits of the map pointer and the string hash.
- __ movp(rbx, FieldOperand(receiver, HeapObject::kMapOffset));
- __ movl(rax, rbx);
- __ shrl(rax, Immediate(KeyedLookupCache::kMapHashShift));
- __ movl(rdi, FieldOperand(key, String::kHashFieldOffset));
- __ shrl(rdi, Immediate(String::kHashShift));
- __ xorp(rax, rdi);
- int mask = (KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
- __ andp(rax, Immediate(mask));
-
- // Load the key (consisting of map and internalized string) from the cache and
- // check for match.
- Label load_in_object_property;
- static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
- Label hit_on_nth_entry[kEntriesPerBucket];
- ExternalReference cache_keys
- = ExternalReference::keyed_lookup_cache_keys(masm->isolate());
-
- for (int i = 0; i < kEntriesPerBucket - 1; i++) {
- Label try_next_entry;
- __ movp(rdi, rax);
- __ shlp(rdi, Immediate(kPointerSizeLog2 + 1));
- __ LoadAddress(kScratchRegister, cache_keys);
- int off = kPointerSize * i * 2;
- __ cmpp(rbx, Operand(kScratchRegister, rdi, times_1, off));
- __ j(not_equal, &try_next_entry);
- __ cmpp(key, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
- __ j(equal, &hit_on_nth_entry[i]);
- __ bind(&try_next_entry);
- }
-
- int off = kPointerSize * (kEntriesPerBucket - 1) * 2;
- __ cmpp(rbx, Operand(kScratchRegister, rdi, times_1, off));
- __ j(not_equal, &slow);
- __ cmpp(key, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
- __ j(not_equal, &slow);
-
- // Get field offset, which is a 32-bit integer.
- ExternalReference cache_field_offsets
- = ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate());
-
- // Hit on nth entry.
- for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
- __ bind(&hit_on_nth_entry[i]);
- if (i != 0) {
- __ addl(rax, Immediate(i));
- }
- __ LoadAddress(kScratchRegister, cache_field_offsets);
- __ movl(rdi, Operand(kScratchRegister, rax, times_4, 0));
- __ movzxbp(rax, FieldOperand(rbx, Map::kInObjectPropertiesOffset));
- __ subp(rdi, rax);
- __ j(above_equal, &property_array_property);
- if (i != 0) {
- __ jmp(&load_in_object_property);
- }
- }
-
- // Load in-object property.
- __ bind(&load_in_object_property);
- __ movzxbp(rax, FieldOperand(rbx, Map::kInstanceSizeOffset));
- __ addp(rax, rdi);
- __ movp(rax, FieldOperand(receiver, rax, times_pointer_size, 0));
- __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
- __ ret(0);
-
- // Load property array property.
- __ bind(&property_array_property);
- __ movp(rax, FieldOperand(receiver, JSObject::kPropertiesOffset));
- __ movp(rax, FieldOperand(rax, rdi, times_pointer_size,
- FixedArray::kHeaderSize));
- __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
- __ ret(0);
-
- // Do a quick inline probe of the receiver's dictionary, if it
- // exists.
- __ bind(&probe_dictionary);
- // rbx: elements
-
- __ movp(rax, FieldOperand(receiver, JSObject::kMapOffset));
- __ movb(rax, FieldOperand(rax, Map::kInstanceTypeOffset));
- GenerateGlobalInstanceTypeCheck(masm, rax, &slow);
-
- GenerateDictionaryLoad(masm, &slow, rbx, key, rax, rdi, rax);
- __ IncrementCounter(counters->keyed_load_generic_symbol(), 1);
- __ ret(0);
-
- __ bind(&index_name);
- __ IndexFromHash(rbx, key);
- __ jmp(&index_smi);
-}
-
-
-void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
- // Return address is on the stack.
- Label miss;
-
- Register receiver = ReceiverRegister();
- Register index = NameRegister();
- Register scratch = rbx;
- Register result = rax;
- DCHECK(!scratch.is(receiver) && !scratch.is(index));
-
- StringCharAtGenerator char_at_generator(receiver,
- index,
- scratch,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- &miss, // When index out of range.
- STRING_INDEX_IS_ARRAY_INDEX);
- char_at_generator.GenerateFast(masm);
- __ ret(0);
-
- StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm, call_helper);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
- // Return address is on the stack.
- Label slow;
-
- Register receiver = ReceiverRegister();
- Register key = NameRegister();
- Register scratch = rax;
- DCHECK(!scratch.is(receiver) && !scratch.is(key));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &slow);
-
- // Check that the key is an array index, that is Uint32.
- STATIC_ASSERT(kSmiValueSize <= 32);
- __ JumpUnlessNonNegativeSmi(key, &slow);
-
- // Get the map of the receiver.
- __ movp(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
-
- // Check that it has indexed interceptor and access checks
- // are not enabled for this object.
- __ movb(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
- __ andb(scratch, Immediate(kSlowCaseBitFieldMask));
- __ cmpb(scratch, Immediate(1 << Map::kHasIndexedInterceptor));
- __ j(not_zero, &slow);
-
- // Everything is fine, call runtime.
- __ PopReturnAddressTo(scratch);
- __ Push(receiver); // receiver
- __ Push(key); // key
- __ PushReturnAddressFrom(scratch);
-
- // Perform tail call to the entry.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(kLoadElementWithInterceptor),
- masm->isolate()),
- 2, 1);
-
- __ bind(&slow);
- GenerateMiss(masm);
-}
-
-
-static void KeyedStoreGenerateGenericHelper(
- MacroAssembler* masm,
- Label* fast_object,
- Label* fast_double,
- Label* slow,
- KeyedStoreCheckMap check_map,
- KeyedStoreIncrementLength increment_length) {
- Label transition_smi_elements;
- Label finish_object_store, non_double_value, transition_double_elements;
- Label fast_double_without_map_check;
- Register receiver = KeyedStoreIC::ReceiverRegister();
- Register key = KeyedStoreIC::NameRegister();
- Register value = KeyedStoreIC::ValueRegister();
- DCHECK(receiver.is(rdx));
- DCHECK(key.is(rcx));
- DCHECK(value.is(rax));
- // Fast case: Do the store, could be either Object or double.
- __ bind(fast_object);
- // rbx: receiver's elements array (a FixedArray)
- // receiver is a JSArray.
- // r9: map of receiver
- if (check_map == kCheckMap) {
- __ movp(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
- __ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, fast_double);
- }
-
- // HOLECHECK: guards "A[i] = V"
- // We have to go to the runtime if the current value is the hole because
- // there may be a callback on the element
- Label holecheck_passed1;
- __ movp(kScratchRegister, FieldOperand(rbx,
- key,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ CompareRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
- __ j(not_equal, &holecheck_passed1);
- __ JumpIfDictionaryInPrototypeChain(receiver, rdi, kScratchRegister, slow);
-
- __ bind(&holecheck_passed1);
-
- // Smi stores don't require further checks.
- Label non_smi_value;
- __ JumpIfNotSmi(value, &non_smi_value);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ leal(rdi, Operand(key, 1));
- __ Integer32ToSmiField(FieldOperand(receiver, JSArray::kLengthOffset), rdi);
- }
- // It's irrelevant whether array is smi-only or not when writing a smi.
- __ movp(FieldOperand(rbx, key, times_pointer_size, FixedArray::kHeaderSize),
- value);
- __ ret(0);
-
- __ bind(&non_smi_value);
- // Writing a non-smi, check whether array allows non-smi elements.
- // r9: receiver's map
- __ CheckFastObjectElements(r9, &transition_smi_elements);
-
- __ bind(&finish_object_store);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ leal(rdi, Operand(key, 1));
- __ Integer32ToSmiField(FieldOperand(receiver, JSArray::kLengthOffset), rdi);
- }
- __ movp(FieldOperand(rbx, key, times_pointer_size, FixedArray::kHeaderSize),
- value);
- __ movp(rdx, value); // Preserve the value which is returned.
- __ RecordWriteArray(
- rbx, rdx, key, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ ret(0);
-
- __ bind(fast_double);
- if (check_map == kCheckMap) {
- // Check for fast double array case. If this fails, call through to the
- // runtime.
- // rdi: elements array's map
- __ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
- __ j(not_equal, slow);
- }
-
- // HOLECHECK: guards "A[i] double hole?"
- // We have to see if the double version of the hole is present. If so
- // go to the runtime.
- uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
- __ cmpl(FieldOperand(rbx, key, times_8, offset), Immediate(kHoleNanUpper32));
- __ j(not_equal, &fast_double_without_map_check);
- __ JumpIfDictionaryInPrototypeChain(receiver, rdi, kScratchRegister, slow);
-
- __ bind(&fast_double_without_map_check);
- __ StoreNumberToDoubleElements(value, rbx, key, xmm0,
- &transition_double_elements);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ leal(rdi, Operand(key, 1));
- __ Integer32ToSmiField(FieldOperand(receiver, JSArray::kLengthOffset), rdi);
- }
- __ ret(0);
-
- __ bind(&transition_smi_elements);
- __ movp(rbx, FieldOperand(receiver, HeapObject::kMapOffset));
-
- // Transition the array appropriately depending on the value type.
- __ movp(r9, FieldOperand(value, HeapObject::kMapOffset));
- __ CompareRoot(r9, Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &non_double_value);
-
- // Value is a double. Transition FAST_SMI_ELEMENTS ->
- // FAST_DOUBLE_ELEMENTS and complete the store.
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS,
- rbx,
- rdi,
- slow);
- AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS);
- ElementsTransitionGenerator::GenerateSmiToDouble(
- masm, receiver, key, value, rbx, mode, slow);
- __ movp(rbx, FieldOperand(receiver, JSObject::kElementsOffset));
- __ jmp(&fast_double_without_map_check);
-
- __ bind(&non_double_value);
- // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- rbx,
- rdi,
- slow);
- mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
- masm, receiver, key, value, rbx, mode, slow);
- __ movp(rbx, FieldOperand(receiver, JSObject::kElementsOffset));
- __ jmp(&finish_object_store);
-
- __ bind(&transition_double_elements);
- // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
- // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
- // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
- __ movp(rbx, FieldOperand(receiver, HeapObject::kMapOffset));
- __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
- FAST_ELEMENTS,
- rbx,
- rdi,
- slow);
- mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateDoubleToObject(
- masm, receiver, key, value, rbx, mode, slow);
- __ movp(rbx, FieldOperand(receiver, JSObject::kElementsOffset));
- __ jmp(&finish_object_store);
-}
-
-
-void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
- StrictMode strict_mode) {
- // Return address is on the stack.
- Label slow, slow_with_tagged_index, fast_object, fast_object_grow;
- Label fast_double, fast_double_grow;
- Label array, extra, check_if_double_array;
- Register receiver = ReceiverRegister();
- Register key = NameRegister();
- DCHECK(receiver.is(rdx));
- DCHECK(key.is(rcx));
-
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, &slow_with_tagged_index);
- // Get the map from the receiver.
- __ movp(r9, FieldOperand(receiver, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks and is not observed.
- // The generic stub does not perform map checks or handle observed objects.
- __ testb(FieldOperand(r9, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
- __ j(not_zero, &slow_with_tagged_index);
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, &slow_with_tagged_index);
- __ SmiToInteger32(key, key);
-
- __ CmpInstanceType(r9, JS_ARRAY_TYPE);
- __ j(equal, &array);
- // Check that the object is some kind of JSObject.
- __ CmpInstanceType(r9, FIRST_JS_OBJECT_TYPE);
- __ j(below, &slow);
-
- // Object case: Check key against length in the elements array.
- __ movp(rbx, FieldOperand(receiver, JSObject::kElementsOffset));
- // Check array bounds.
- __ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), key);
- // rbx: FixedArray
- __ j(above, &fast_object);
-
- // Slow case: call runtime.
- __ bind(&slow);
- __ Integer32ToSmi(key, key);
- __ bind(&slow_with_tagged_index);
- GenerateRuntimeSetProperty(masm, strict_mode);
- // Never returns to here.
-
- // Extra capacity case: Check if there is extra capacity to
- // perform the store and update the length. Used for adding one
- // element to the array by writing to array[array.length].
- __ bind(&extra);
- // receiver is a JSArray.
- // rbx: receiver's elements array (a FixedArray)
- // flags: smicompare (receiver.length(), rbx)
- __ j(not_equal, &slow); // do not leave holes in the array
- __ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), key);
- __ j(below_equal, &slow);
- // Increment index to get new length.
- __ movp(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
- __ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &check_if_double_array);
- __ jmp(&fast_object_grow);
-
- __ bind(&check_if_double_array);
- // rdi: elements array's map
- __ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
- __ j(not_equal, &slow);
- __ jmp(&fast_double_grow);
-
- // Array case: Get the length and the elements array from the JS
- // array. Check that the array is in fast mode (and writable); if it
- // is the length is always a smi.
- __ bind(&array);
- // receiver is a JSArray.
- __ movp(rbx, FieldOperand(receiver, JSObject::kElementsOffset));
-
- // Check the key against the length in the array, compute the
- // address to store into and fall through to fast case.
- __ SmiCompareInteger32(FieldOperand(receiver, JSArray::kLengthOffset), key);
- __ j(below_equal, &extra);
-
- KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
- &slow, kCheckMap, kDontIncrementLength);
- KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
- &slow, kDontCheckMap, kIncrementLength);
-}
-
-
-static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
- Register object,
- Register key,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* unmapped_case,
- Label* slow_case) {
- Heap* heap = masm->isolate()->heap();
-
- // Check that the receiver is a JSObject. Because of the elements
- // map check later, we do not need to check for interceptors or
- // whether it requires access checks.
- __ JumpIfSmi(object, slow_case);
- // Check that the object is some kind of JSObject.
- __ CmpObjectType(object, FIRST_JS_RECEIVER_TYPE, scratch1);
- __ j(below, slow_case);
-
- // Check that the key is a positive smi.
- Condition check = masm->CheckNonNegativeSmi(key);
- __ j(NegateCondition(check), slow_case);
-
- // Load the elements into scratch1 and check its map. If not, jump
- // to the unmapped lookup with the parameter map in scratch1.
- Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
- __ movp(scratch1, FieldOperand(object, JSObject::kElementsOffset));
- __ CheckMap(scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
-
- // Check if element is in the range of mapped arguments.
- __ movp(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
- __ SmiSubConstant(scratch2, scratch2, Smi::FromInt(2));
- __ cmpp(key, scratch2);
- __ j(greater_equal, unmapped_case);
-
- // Load element index and check whether it is the hole.
- const int kHeaderSize = FixedArray::kHeaderSize + 2 * kPointerSize;
- __ SmiToInteger64(scratch3, key);
- __ movp(scratch2, FieldOperand(scratch1,
- scratch3,
- times_pointer_size,
- kHeaderSize));
- __ CompareRoot(scratch2, Heap::kTheHoleValueRootIndex);
- __ j(equal, unmapped_case);
-
- // Load value from context and return it. We can reuse scratch1 because
- // we do not jump to the unmapped lookup (which requires the parameter
- // map in scratch1).
- __ movp(scratch1, FieldOperand(scratch1, FixedArray::kHeaderSize));
- __ SmiToInteger64(scratch3, scratch2);
- return FieldOperand(scratch1,
- scratch3,
- times_pointer_size,
- Context::kHeaderSize);
-}
-
-
-static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
- Register key,
- Register parameter_map,
- Register scratch,
- Label* slow_case) {
- // Element is in arguments backing store, which is referenced by the
- // second element of the parameter_map. The parameter_map register
- // must be loaded with the parameter map of the arguments object and is
- // overwritten.
- const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
- Register backing_store = parameter_map;
- __ movp(backing_store, FieldOperand(parameter_map, kBackingStoreOffset));
- Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
- __ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
- __ movp(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
- __ cmpp(key, scratch);
- __ j(greater_equal, slow_case);
- __ SmiToInteger64(scratch, key);
- return FieldOperand(backing_store,
- scratch,
- times_pointer_size,
- FixedArray::kHeaderSize);
-}
-
-
-void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
- // The return address is on the stack.
- Register receiver = ReceiverRegister();
- Register key = NameRegister();
- DCHECK(receiver.is(rdx));
- DCHECK(key.is(rcx));
-
- Label slow, notin;
- Operand mapped_location =
- GenerateMappedArgumentsLookup(
- masm, receiver, key, rbx, rax, rdi, ¬in, &slow);
- __ movp(rax, mapped_location);
- __ Ret();
- __ bind(¬in);
- // The unmapped lookup expects that the parameter map is in rbx.
- Operand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, key, rbx, rax, &slow);
- __ CompareRoot(unmapped_location, Heap::kTheHoleValueRootIndex);
- __ j(equal, &slow);
- __ movp(rax, unmapped_location);
- __ Ret();
- __ bind(&slow);
- GenerateMiss(masm);
-}
-
-
-void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
- // The return address is on the stack.
- Label slow, notin;
- Register receiver = ReceiverRegister();
- Register name = NameRegister();
- Register value = ValueRegister();
- DCHECK(receiver.is(rdx));
- DCHECK(name.is(rcx));
- DCHECK(value.is(rax));
-
- Operand mapped_location = GenerateMappedArgumentsLookup(
- masm, receiver, name, rbx, rdi, r8, ¬in, &slow);
- __ movp(mapped_location, value);
- __ leap(r9, mapped_location);
- __ movp(r8, value);
- __ RecordWrite(rbx,
- r9,
- r8,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- INLINE_SMI_CHECK);
- __ Ret();
- __ bind(¬in);
- // The unmapped lookup expects that the parameter map is in rbx.
- Operand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, name, rbx, rdi, &slow);
- __ movp(unmapped_location, value);
- __ leap(r9, unmapped_location);
- __ movp(r8, value);
- __ RecordWrite(rbx,
- r9,
- r8,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- INLINE_SMI_CHECK);
- __ Ret();
- __ bind(&slow);
- GenerateMiss(masm);
-}
-
-
-void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
- // The return address is on the stack.
- Register receiver = ReceiverRegister();
- Register name = NameRegister();
- DCHECK(receiver.is(rdx));
- DCHECK(name.is(rcx));
-
- // Probe the stub cache.
- Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::LOAD_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, receiver, name, rbx, rax);
-
- GenerateMiss(masm);
-}
-
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
- Register dictionary = rax;
- DCHECK(!dictionary.is(ReceiverRegister()));
- DCHECK(!dictionary.is(NameRegister()));
-
- Label slow;
-
- __ movp(dictionary,
- FieldOperand(ReceiverRegister(), JSObject::kPropertiesOffset));
- GenerateDictionaryLoad(masm, &slow, dictionary, NameRegister(), rbx, rdi,
- rax);
- __ ret(0);
-
- // Dictionary load failed, go slow (but don't miss).
- __ bind(&slow);
- GenerateRuntimeGetProperty(masm);
-}
-
-
-// A register that isn't one of the parameters to the load ic.
-static const Register LoadIC_TempRegister() { return rbx; }
-
-
-static const Register KeyedLoadIC_TempRegister() {
- return rbx;
-}
-
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
- // The return address is on the stack.
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->load_miss(), 1);
-
- __ PopReturnAddressTo(LoadIC_TempRegister());
- __ Push(ReceiverRegister()); // receiver
- __ Push(NameRegister()); // name
- __ PushReturnAddressFrom(LoadIC_TempRegister());
-
- // Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kLoadIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
- // The return address is on the stack.
-
- __ PopReturnAddressTo(LoadIC_TempRegister());
- __ Push(ReceiverRegister()); // receiver
- __ Push(NameRegister()); // name
- __ PushReturnAddressFrom(LoadIC_TempRegister());
-
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
- // The return address is on the stack.
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->keyed_load_miss(), 1);
-
- __ PopReturnAddressTo(KeyedLoadIC_TempRegister());
- __ Push(ReceiverRegister()); // receiver
- __ Push(NameRegister()); // name
- __ PushReturnAddressFrom(KeyedLoadIC_TempRegister());
-
- // Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-// IC register specifications
-const Register LoadIC::ReceiverRegister() { return rdx; }
-const Register LoadIC::NameRegister() { return rcx; }
-
-
-const Register LoadIC::SlotRegister() {
- DCHECK(FLAG_vector_ics);
- return rax;
-}
-
-
-const Register LoadIC::VectorRegister() {
- DCHECK(FLAG_vector_ics);
- return rbx;
-}
-
-
-const Register StoreIC::ReceiverRegister() { return rdx; }
-const Register StoreIC::NameRegister() { return rcx; }
-const Register StoreIC::ValueRegister() { return rax; }
-
-
-const Register KeyedStoreIC::MapRegister() {
- return rbx;
-}
-
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
- // The return address is on the stack.
-
- __ PopReturnAddressTo(KeyedLoadIC_TempRegister());
- __ Push(ReceiverRegister()); // receiver
- __ Push(NameRegister()); // name
- __ PushReturnAddressFrom(KeyedLoadIC_TempRegister());
-
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
-}
-
-
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
- // The return address is on the stack.
-
- // Get the receiver from the stack and probe the stub cache.
- Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, ReceiverRegister(), NameRegister(), rbx, no_reg);
-
- // Cache miss: Jump to runtime.
- GenerateMiss(masm);
-}
-
-
-static void StoreIC_PushArgs(MacroAssembler* masm) {
- Register receiver = StoreIC::ReceiverRegister();
- Register name = StoreIC::NameRegister();
- Register value = StoreIC::ValueRegister();
-
- DCHECK(!rbx.is(receiver) && !rbx.is(name) && !rbx.is(value));
-
- __ PopReturnAddressTo(rbx);
- __ Push(receiver);
- __ Push(name);
- __ Push(value);
- __ PushReturnAddressFrom(rbx);
-}
-
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
- // Return address is on the stack.
- StoreIC_PushArgs(masm);
-
- // Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
- Register receiver = ReceiverRegister();
- Register name = NameRegister();
- Register value = ValueRegister();
- Register dictionary = rbx;
-
- Label miss;
-
- __ movp(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset));
- GenerateDictionaryStore(masm, &miss, dictionary, name, value, r8, r9);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->store_normal_hit(), 1);
- __ ret(0);
-
- __ bind(&miss);
- __ IncrementCounter(counters->store_normal_miss(), 1);
- GenerateMiss(masm);
-}
-
-
-void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictMode strict_mode) {
- // Return address is on the stack.
- DCHECK(!rbx.is(ReceiverRegister()) && !rbx.is(NameRegister()) &&
- !rbx.is(ValueRegister()));
-
- __ PopReturnAddressTo(rbx);
- __ Push(ReceiverRegister());
- __ Push(NameRegister());
- __ Push(ValueRegister());
- __ Push(Smi::FromInt(strict_mode));
- __ PushReturnAddressFrom(rbx);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
-}
-
-
-void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictMode strict_mode) {
- // Return address is on the stack.
- DCHECK(!rbx.is(ReceiverRegister()) && !rbx.is(NameRegister()) &&
- !rbx.is(ValueRegister()));
-
- __ PopReturnAddressTo(rbx);
- __ Push(ReceiverRegister());
- __ Push(NameRegister());
- __ Push(ValueRegister());
- __ Push(Smi::FromInt(strict_mode)); // Strict mode.
- __ PushReturnAddressFrom(rbx);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
-}
-
-
-void StoreIC::GenerateSlow(MacroAssembler* masm) {
- // Return address is on the stack.
- StoreIC_PushArgs(masm);
-
- // Do tail-call to runtime routine.
- ExternalReference ref(IC_Utility(kStoreIC_Slow), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
- // Return address is on the stack.
- StoreIC_PushArgs(masm);
-
- // Do tail-call to runtime routine.
- ExternalReference ref(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
- // Return address is on the stack.
- StoreIC_PushArgs(masm);
-
- // Do tail-call to runtime routine.
- ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-#undef __
-
-
-Condition CompareIC::ComputeCondition(Token::Value op) {
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- return equal;
- case Token::LT:
- return less;
- case Token::GT:
- return greater;
- case Token::LTE:
- return less_equal;
- case Token::GTE:
- return greater_equal;
- default:
- UNREACHABLE();
- return no_condition;
- }
-}
-
-
-bool CompareIC::HasInlinedSmiCode(Address address) {
- // The address of the instruction following the call.
- Address test_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
-
- // If the instruction following the call is not a test al, nothing
- // was inlined.
- return *test_instruction_address == Assembler::kTestAlByte;
-}
-
-
-void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
- // The address of the instruction following the call.
- Address test_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
-
- // If the instruction following the call is not a test al, nothing
- // was inlined.
- if (*test_instruction_address != Assembler::kTestAlByte) {
- DCHECK(*test_instruction_address == Assembler::kNopByte);
- return;
- }
-
- Address delta_address = test_instruction_address + 1;
- // The delta to the start of the map check instruction and the
- // condition code uses at the patched jump.
- uint8_t delta = *reinterpret_cast<uint8_t*>(delta_address);
- if (FLAG_trace_ic) {
- PrintF("[ patching ic at %p, test=%p, delta=%d\n",
- address, test_instruction_address, delta);
- }
-
- // Patch with a short conditional jump. Enabling means switching from a short
- // jump-if-carry/not-carry to jump-if-zero/not-zero, whereas disabling is the
- // reverse operation of that.
- Address jmp_address = test_instruction_address - delta;
- DCHECK((check == ENABLE_INLINED_SMI_CHECK)
- ? (*jmp_address == Assembler::kJncShortOpcode ||
- *jmp_address == Assembler::kJcShortOpcode)
- : (*jmp_address == Assembler::kJnzShortOpcode ||
- *jmp_address == Assembler::kJzShortOpcode));
- Condition cc = (check == ENABLE_INLINED_SMI_CHECK)
- ? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero)
- : (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry);
- *jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
#include "src/code-stubs.h"
#include "src/hydrogen-osr.h"
-#include "src/stub-cache.h"
#include "src/x64/lithium-codegen-x64.h"
namespace v8 {
+++ /dev/null
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#if V8_TARGET_ARCH_X64
-
-#include "src/arguments.h"
-#include "src/codegen.h"
-#include "src/ic-inl.h"
-#include "src/stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-
-static void ProbeTable(Isolate* isolate,
- MacroAssembler* masm,
- Code::Flags flags,
- StubCache::Table table,
- Register receiver,
- Register name,
- // The offset is scaled by 4, based on
- // kCacheIndexShift, which is two bits
- Register offset) {
- // We need to scale up the pointer by 2 when the offset is scaled by less
- // than the pointer size.
- DCHECK(kPointerSize == kInt64Size
- ? kPointerSizeLog2 == StubCache::kCacheIndexShift + 1
- : kPointerSizeLog2 == StubCache::kCacheIndexShift);
- ScaleFactor scale_factor = kPointerSize == kInt64Size ? times_2 : times_1;
-
- DCHECK_EQ(3 * kPointerSize, sizeof(StubCache::Entry));
- // The offset register holds the entry offset times four (due to masking
- // and shifting optimizations).
- ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
- ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
- Label miss;
-
- // Multiply by 3 because there are 3 fields per entry (name, code, map).
- __ leap(offset, Operand(offset, offset, times_2, 0));
-
- __ LoadAddress(kScratchRegister, key_offset);
-
- // Check that the key in the entry matches the name.
- // Multiply entry offset by 16 to get the entry address. Since the
- // offset register already holds the entry offset times four, multiply
- // by a further four.
- __ cmpl(name, Operand(kScratchRegister, offset, scale_factor, 0));
- __ j(not_equal, &miss);
-
- // Get the map entry from the cache.
- // Use key_offset + kPointerSize * 2, rather than loading map_offset.
- __ movp(kScratchRegister,
- Operand(kScratchRegister, offset, scale_factor, kPointerSize * 2));
- __ cmpp(kScratchRegister, FieldOperand(receiver, HeapObject::kMapOffset));
- __ j(not_equal, &miss);
-
- // Get the code entry from the cache.
- __ LoadAddress(kScratchRegister, value_offset);
- __ movp(kScratchRegister,
- Operand(kScratchRegister, offset, scale_factor, 0));
-
- // Check that the flags match what we're looking for.
- __ movl(offset, FieldOperand(kScratchRegister, Code::kFlagsOffset));
- __ andp(offset, Immediate(~Code::kFlagsNotUsedInLookup));
- __ cmpl(offset, Immediate(flags));
- __ j(not_equal, &miss);
-
-#ifdef DEBUG
- if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
- __ jmp(&miss);
- } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
- __ jmp(&miss);
- }
-#endif
-
- // Jump to the first instruction in the code stub.
- __ addp(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(kScratchRegister);
-
- __ bind(&miss);
-}
-
-
-void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
- MacroAssembler* masm, Label* miss_label, Register receiver,
- Handle<Name> name, Register scratch0, Register scratch1) {
- DCHECK(name->IsUniqueName());
- DCHECK(!receiver.is(scratch0));
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->negative_lookups(), 1);
- __ IncrementCounter(counters->negative_lookups_miss(), 1);
-
- __ movp(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
-
- const int kInterceptorOrAccessCheckNeededMask =
- (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
-
- // Bail out if the receiver has a named interceptor or requires access checks.
- __ testb(FieldOperand(scratch0, Map::kBitFieldOffset),
- Immediate(kInterceptorOrAccessCheckNeededMask));
- __ j(not_zero, miss_label);
-
- // Check that receiver is a JSObject.
- __ CmpInstanceType(scratch0, FIRST_SPEC_OBJECT_TYPE);
- __ j(below, miss_label);
-
- // Load properties array.
- Register properties = scratch0;
- __ movp(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
-
- // Check that the properties array is a dictionary.
- __ CompareRoot(FieldOperand(properties, HeapObject::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(not_equal, miss_label);
-
- Label done;
- NameDictionaryLookupStub::GenerateNegativeLookup(masm,
- miss_label,
- &done,
- properties,
- name,
- scratch1);
- __ bind(&done);
- __ DecrementCounter(counters->negative_lookups_miss(), 1);
-}
-
-
-void StubCache::GenerateProbe(MacroAssembler* masm,
- Code::Flags flags,
- Register receiver,
- Register name,
- Register scratch,
- Register extra,
- Register extra2,
- Register extra3) {
- Isolate* isolate = masm->isolate();
- Label miss;
- USE(extra); // The register extra is not used on the X64 platform.
- USE(extra2); // The register extra2 is not used on the X64 platform.
- USE(extra3); // The register extra2 is not used on the X64 platform.
- // Make sure that code is valid. The multiplying code relies on the
- // entry size being 3 * kPointerSize.
- DCHECK(sizeof(Entry) == 3 * kPointerSize);
-
- // Make sure the flags do not name a specific type.
- DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
-
- // Make sure that there are no register conflicts.
- DCHECK(!scratch.is(receiver));
- DCHECK(!scratch.is(name));
-
- // Check scratch register is valid, extra and extra2 are unused.
- DCHECK(!scratch.is(no_reg));
- DCHECK(extra2.is(no_reg));
- DCHECK(extra3.is(no_reg));
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Get the map of the receiver and compute the hash.
- __ movl(scratch, FieldOperand(name, Name::kHashFieldOffset));
- // Use only the low 32 bits of the map pointer.
- __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xorp(scratch, Immediate(flags));
- // We mask out the last two bits because they are not part of the hash and
- // they are always 01 for maps. Also in the two 'and' instructions below.
- __ andp(scratch, Immediate((kPrimaryTableSize - 1) << kCacheIndexShift));
-
- // Probe the primary table.
- ProbeTable(isolate, masm, flags, kPrimary, receiver, name, scratch);
-
- // Primary miss: Compute hash for secondary probe.
- __ movl(scratch, FieldOperand(name, Name::kHashFieldOffset));
- __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xorp(scratch, Immediate(flags));
- __ andp(scratch, Immediate((kPrimaryTableSize - 1) << kCacheIndexShift));
- __ subl(scratch, name);
- __ addl(scratch, Immediate(flags));
- __ andp(scratch, Immediate((kSecondaryTableSize - 1) << kCacheIndexShift));
-
- // Probe the secondary table.
- ProbeTable(isolate, masm, flags, kSecondary, receiver, name, scratch);
-
- // Cache miss: Fall-through and let caller handle the miss by
- // entering the runtime system.
- __ bind(&miss);
- __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
-}
-
-
-void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm, int index, Register prototype, Label* miss) {
- Isolate* isolate = masm->isolate();
- // Get the global function with the given index.
- Handle<JSFunction> function(
- JSFunction::cast(isolate->native_context()->get(index)));
-
- // Check we're still in the same context.
- Register scratch = prototype;
- const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
- __ movp(scratch, Operand(rsi, offset));
- __ movp(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
- __ Cmp(Operand(scratch, Context::SlotOffset(index)), function);
- __ j(not_equal, miss);
-
- // Load its initial map. The global functions all have initial maps.
- __ Move(prototype, Handle<Map>(function->initial_map()));
- // Load the prototype from the initial map.
- __ movp(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
-}
-
-
-void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
- MacroAssembler* masm, Register receiver, Register result, Register scratch,
- Label* miss_label) {
- __ TryGetFunctionPrototype(receiver, result, miss_label);
- if (!result.is(rax)) __ movp(rax, result);
- __ ret(0);
-}
-
-
-static void PushInterceptorArguments(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
- Handle<JSObject> holder_obj) {
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex == 1);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 2);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 3);
- STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 4);
- __ Push(name);
- Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
- DCHECK(!masm->isolate()->heap()->InNewSpace(*interceptor));
- __ Move(kScratchRegister, interceptor);
- __ Push(kScratchRegister);
- __ Push(receiver);
- __ Push(holder);
-}
-
-
-static void CompileCallLoadPropertyWithInterceptor(
- MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
- Handle<JSObject> holder_obj,
- IC::UtilityId id) {
- PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
- __ CallExternalReference(ExternalReference(IC_Utility(id), masm->isolate()),
- NamedLoadHandlerCompiler::kInterceptorArgsLength);
-}
-
-
-// Generate call to api function.
-void PropertyHandlerCompiler::GenerateFastApiCall(
- MacroAssembler* masm, const CallOptimization& optimization,
- Handle<Map> receiver_map, Register receiver, Register scratch_in,
- bool is_store, int argc, Register* values) {
- DCHECK(optimization.is_simple_api_call());
-
- __ PopReturnAddressTo(scratch_in);
- // receiver
- __ Push(receiver);
- // Write the arguments to stack frame.
- for (int i = 0; i < argc; i++) {
- Register arg = values[argc-1-i];
- DCHECK(!receiver.is(arg));
- DCHECK(!scratch_in.is(arg));
- __ Push(arg);
- }
- __ PushReturnAddressFrom(scratch_in);
- // Stack now matches JSFunction abi.
-
- // Abi for CallApiFunctionStub.
- Register callee = rax;
- Register call_data = rbx;
- Register holder = rcx;
- Register api_function_address = rdx;
- Register scratch = rdi; // scratch_in is no longer valid.
-
- // Put holder in place.
- CallOptimization::HolderLookup holder_lookup;
- Handle<JSObject> api_holder = optimization.LookupHolderOfExpectedType(
- receiver_map,
- &holder_lookup);
- switch (holder_lookup) {
- case CallOptimization::kHolderIsReceiver:
- __ Move(holder, receiver);
- break;
- case CallOptimization::kHolderFound:
- __ Move(holder, api_holder);
- break;
- case CallOptimization::kHolderNotFound:
- UNREACHABLE();
- break;
- }
-
- Isolate* isolate = masm->isolate();
- Handle<JSFunction> function = optimization.constant_function();
- Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- Handle<Object> call_data_obj(api_call_info->data(), isolate);
-
- // Put callee in place.
- __ Move(callee, function);
-
- bool call_data_undefined = false;
- // Put call_data in place.
- if (isolate->heap()->InNewSpace(*call_data_obj)) {
- __ Move(scratch, api_call_info);
- __ movp(call_data, FieldOperand(scratch, CallHandlerInfo::kDataOffset));
- } else if (call_data_obj->IsUndefined()) {
- call_data_undefined = true;
- __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
- } else {
- __ Move(call_data, call_data_obj);
- }
-
- // Put api_function_address in place.
- Address function_address = v8::ToCData<Address>(api_call_info->callback());
- __ Move(
- api_function_address, function_address, RelocInfo::EXTERNAL_REFERENCE);
-
- // Jump to stub.
- CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc);
- __ TailCallStub(&stub);
-}
-
-
-void PropertyHandlerCompiler::GenerateCheckPropertyCell(
- MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
- Register scratch, Label* miss) {
- Handle<PropertyCell> cell =
- JSGlobalObject::EnsurePropertyCell(global, name);
- DCHECK(cell->value()->IsTheHole());
- __ Move(scratch, cell);
- __ Cmp(FieldOperand(scratch, Cell::kValueOffset),
- masm->isolate()->factory()->the_hole_value());
- __ j(not_equal, miss);
-}
-
-
-void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
- Handle<Code> code) {
- __ jmp(code, RelocInfo::CODE_TARGET);
-}
-
-
-#undef __
-#define __ ACCESS_MASM((masm()))
-
-
-void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
- Handle<Name> name) {
- if (!label->is_unused()) {
- __ bind(label);
- __ Move(this->name(), name);
- }
-}
-
-
-// Receiver_reg is preserved on jumps to miss_label, but may be destroyed if
-// store is successful.
-void NamedStoreHandlerCompiler::GenerateStoreTransition(
- Handle<Map> transition, Handle<Name> name, Register receiver_reg,
- Register storage_reg, Register value_reg, Register scratch1,
- Register scratch2, Register unused, Label* miss_label, Label* slow) {
- int descriptor = transition->LastAdded();
- DescriptorArray* descriptors = transition->instance_descriptors();
- PropertyDetails details = descriptors->GetDetails(descriptor);
- Representation representation = details.representation();
- DCHECK(!representation.IsNone());
-
- if (details.type() == CONSTANT) {
- Handle<Object> constant(descriptors->GetValue(descriptor), isolate());
- __ Cmp(value_reg, constant);
- __ j(not_equal, miss_label);
- } else if (representation.IsSmi()) {
- __ JumpIfNotSmi(value_reg, miss_label);
- } else if (representation.IsHeapObject()) {
- __ JumpIfSmi(value_reg, miss_label);
- HeapType* field_type = descriptors->GetFieldType(descriptor);
- HeapType::Iterator<Map> it = field_type->Classes();
- if (!it.Done()) {
- Label do_store;
- while (true) {
- __ CompareMap(value_reg, it.Current());
- it.Advance();
- if (it.Done()) {
- __ j(not_equal, miss_label);
- break;
- }
- __ j(equal, &do_store, Label::kNear);
- }
- __ bind(&do_store);
- }
- } else if (representation.IsDouble()) {
- Label do_store, heap_number;
- __ AllocateHeapNumber(storage_reg, scratch1, slow, MUTABLE);
-
- __ JumpIfNotSmi(value_reg, &heap_number);
- __ SmiToInteger32(scratch1, value_reg);
- __ Cvtlsi2sd(xmm0, scratch1);
- __ jmp(&do_store);
-
- __ bind(&heap_number);
- __ CheckMap(value_reg, isolate()->factory()->heap_number_map(), miss_label,
- DONT_DO_SMI_CHECK);
- __ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
-
- __ bind(&do_store);
- __ movsd(FieldOperand(storage_reg, HeapNumber::kValueOffset), xmm0);
- }
-
- // Stub never generated for objects that require access checks.
- DCHECK(!transition->is_access_check_needed());
-
- // Perform map transition for the receiver if necessary.
- if (details.type() == FIELD &&
- Map::cast(transition->GetBackPointer())->unused_property_fields() == 0) {
- // The properties must be extended before we can store the value.
- // We jump to a runtime call that extends the properties array.
- __ PopReturnAddressTo(scratch1);
- __ Push(receiver_reg);
- __ Push(transition);
- __ Push(value_reg);
- __ PushReturnAddressFrom(scratch1);
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
- isolate()),
- 3, 1);
- return;
- }
-
- // Update the map of the object.
- __ Move(scratch1, transition);
- __ movp(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
-
- // Update the write barrier for the map field.
- __ RecordWriteField(receiver_reg,
- HeapObject::kMapOffset,
- scratch1,
- scratch2,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-
- if (details.type() == CONSTANT) {
- DCHECK(value_reg.is(rax));
- __ ret(0);
- return;
- }
-
- int index = transition->instance_descriptors()->GetFieldIndex(
- transition->LastAdded());
-
- // Adjust for the number of properties stored in the object. Even in the
- // face of a transition we can use the old map here because the size of the
- // object and the number of in-object properties is not going to change.
- index -= transition->inobject_properties();
-
- // TODO(verwaest): Share this code as a code stub.
- SmiCheck smi_check = representation.IsTagged()
- ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
- if (index < 0) {
- // Set the property straight into the object.
- int offset = transition->instance_size() + (index * kPointerSize);
- if (representation.IsDouble()) {
- __ movp(FieldOperand(receiver_reg, offset), storage_reg);
- } else {
- __ movp(FieldOperand(receiver_reg, offset), value_reg);
- }
-
- if (!representation.IsSmi()) {
- // Update the write barrier for the array address.
- if (!representation.IsDouble()) {
- __ movp(storage_reg, value_reg);
- }
- __ RecordWriteField(
- receiver_reg, offset, storage_reg, scratch1, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, smi_check);
- }
- } else {
- // Write to the properties array.
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
- // Get the properties array (optimistically).
- __ movp(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
- if (representation.IsDouble()) {
- __ movp(FieldOperand(scratch1, offset), storage_reg);
- } else {
- __ movp(FieldOperand(scratch1, offset), value_reg);
- }
-
- if (!representation.IsSmi()) {
- // Update the write barrier for the array address.
- if (!representation.IsDouble()) {
- __ movp(storage_reg, value_reg);
- }
- __ RecordWriteField(
- scratch1, offset, storage_reg, receiver_reg, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, smi_check);
- }
- }
-
- // Return the value (register rax).
- DCHECK(value_reg.is(rax));
- __ ret(0);
-}
-
-
-void NamedStoreHandlerCompiler::GenerateStoreField(LookupIterator* lookup,
- Register value_reg,
- Label* miss_label) {
- DCHECK(lookup->representation().IsHeapObject());
- __ JumpIfSmi(value_reg, miss_label);
- HeapType::Iterator<Map> it = lookup->GetFieldType()->Classes();
- Label do_store;
- while (true) {
- __ CompareMap(value_reg, it.Current());
- it.Advance();
- if (it.Done()) {
- __ j(not_equal, miss_label);
- break;
- }
- __ j(equal, &do_store, Label::kNear);
- }
- __ bind(&do_store);
-
- StoreFieldStub stub(isolate(), lookup->GetFieldIndex(),
- lookup->representation());
- GenerateTailCall(masm(), stub.GetCode());
-}
-
-
-Register PropertyHandlerCompiler::CheckPrototypes(
- Register object_reg, Register holder_reg, Register scratch1,
- Register scratch2, Handle<Name> name, Label* miss,
- PrototypeCheckType check) {
- Handle<Map> receiver_map(IC::TypeToMap(*type(), isolate()));
-
- // Make sure there's no overlap between holder and object registers.
- DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
- DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
- && !scratch2.is(scratch1));
-
- // Keep track of the current object in register reg. On the first
- // iteration, reg is an alias for object_reg, on later iterations,
- // it is an alias for holder_reg.
- Register reg = object_reg;
- int depth = 0;
-
- Handle<JSObject> current = Handle<JSObject>::null();
- if (type()->IsConstant()) {
- current = Handle<JSObject>::cast(type()->AsConstant()->Value());
- }
- Handle<JSObject> prototype = Handle<JSObject>::null();
- Handle<Map> current_map = receiver_map;
- Handle<Map> holder_map(holder()->map());
- // Traverse the prototype chain and check the maps in the prototype chain for
- // fast and global objects or do negative lookup for normal objects.
- while (!current_map.is_identical_to(holder_map)) {
- ++depth;
-
- // Only global objects and objects that do not require access
- // checks are allowed in stubs.
- DCHECK(current_map->IsJSGlobalProxyMap() ||
- !current_map->is_access_check_needed());
-
- prototype = handle(JSObject::cast(current_map->prototype()));
- if (current_map->is_dictionary_map() &&
- !current_map->IsJSGlobalObjectMap()) {
- DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
- if (!name->IsUniqueName()) {
- DCHECK(name->IsString());
- name = factory()->InternalizeString(Handle<String>::cast(name));
- }
- DCHECK(current.is_null() ||
- current->property_dictionary()->FindEntry(name) ==
- NameDictionary::kNotFound);
-
- GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
- scratch1, scratch2);
-
- __ movp(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
- reg = holder_reg; // From now on the object will be in holder_reg.
- __ movp(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
- } else {
- bool in_new_space = heap()->InNewSpace(*prototype);
- // Two possible reasons for loading the prototype from the map:
- // (1) Can't store references to new space in code.
- // (2) Handler is shared for all receivers with the same prototype
- // map (but not necessarily the same prototype instance).
- bool load_prototype_from_map = in_new_space || depth == 1;
- if (load_prototype_from_map) {
- // Save the map in scratch1 for later.
- __ movp(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
- }
- if (depth != 1 || check == CHECK_ALL_MAPS) {
- __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
- }
-
- // Check access rights to the global object. This has to happen after
- // the map check so that we know that the object is actually a global
- // object.
- // This allows us to install generated handlers for accesses to the
- // global proxy (as opposed to using slow ICs). See corresponding code
- // in LookupForRead().
- if (current_map->IsJSGlobalProxyMap()) {
- __ CheckAccessGlobalProxy(reg, scratch2, miss);
- } else if (current_map->IsJSGlobalObjectMap()) {
- GenerateCheckPropertyCell(
- masm(), Handle<JSGlobalObject>::cast(current), name,
- scratch2, miss);
- }
- reg = holder_reg; // From now on the object will be in holder_reg.
-
- if (load_prototype_from_map) {
- __ movp(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
- } else {
- __ Move(reg, prototype);
- }
- }
-
- // Go to the next object in the prototype chain.
- current = prototype;
- current_map = handle(current->map());
- }
-
- // Log the check depth.
- LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
-
- if (depth != 0 || check == CHECK_ALL_MAPS) {
- // Check the holder map.
- __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
- }
-
- // Perform security check for access to the global object.
- DCHECK(current_map->IsJSGlobalProxyMap() ||
- !current_map->is_access_check_needed());
- if (current_map->IsJSGlobalProxyMap()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
- }
-
- // Return the register containing the holder.
- return reg;
-}
-
-
-void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
- if (!miss->is_unused()) {
- Label success;
- __ jmp(&success);
- __ bind(miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
- __ bind(&success);
- }
-}
-
-
-void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
- if (!miss->is_unused()) {
- Label success;
- __ jmp(&success);
- GenerateRestoreName(miss, name);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
- __ bind(&success);
- }
-}
-
-
-void NamedLoadHandlerCompiler::GenerateLoadCallback(
- Register reg, Handle<ExecutableAccessorInfo> callback) {
- // Insert additional parameters into the stack frame above return address.
- DCHECK(!scratch4().is(reg));
- __ PopReturnAddressTo(scratch4());
-
- STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
- STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
- STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
- STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
- STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
- __ Push(receiver()); // receiver
- if (heap()->InNewSpace(callback->data())) {
- DCHECK(!scratch2().is(reg));
- __ Move(scratch2(), callback);
- __ Push(FieldOperand(scratch2(),
- ExecutableAccessorInfo::kDataOffset)); // data
- } else {
- __ Push(Handle<Object>(callback->data(), isolate()));
- }
- DCHECK(!kScratchRegister.is(reg));
- __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
- __ Push(kScratchRegister); // return value
- __ Push(kScratchRegister); // return value default
- __ PushAddress(ExternalReference::isolate_address(isolate()));
- __ Push(reg); // holder
- __ Push(name()); // name
- // Save a pointer to where we pushed the arguments pointer. This will be
- // passed as the const PropertyAccessorInfo& to the C++ callback.
-
- __ PushReturnAddressFrom(scratch4());
-
- // Abi for CallApiGetter
- Register api_function_address = r8;
- Address getter_address = v8::ToCData<Address>(callback->getter());
- __ Move(api_function_address, getter_address, RelocInfo::EXTERNAL_REFERENCE);
-
- CallApiGetterStub stub(isolate());
- __ TailCallStub(&stub);
-}
-
-
-void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
- // Return the constant value.
- __ Move(rax, value);
- __ ret(0);
-}
-
-
-void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
- LookupIterator* it, Register holder_reg) {
- DCHECK(holder()->HasNamedInterceptor());
- DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
-
- // Compile the interceptor call, followed by inline code to load the
- // property from further up the prototype chain if the call fails.
- // Check that the maps haven't changed.
- DCHECK(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
-
- // Preserve the receiver register explicitly whenever it is different from the
- // holder and it is needed should the interceptor return without any result.
- // The ACCESSOR case needs the receiver to be passed into C++ code, the FIELD
- // case might cause a miss during the prototype check.
- bool must_perform_prototype_check =
- !holder().is_identical_to(it->GetHolder<JSObject>());
- bool must_preserve_receiver_reg =
- !receiver().is(holder_reg) &&
- (it->property_kind() == LookupIterator::ACCESSOR ||
- must_perform_prototype_check);
-
- // Save necessary data before invoking an interceptor.
- // Requires a frame to make GC aware of pushed pointers.
- {
- FrameScope frame_scope(masm(), StackFrame::INTERNAL);
-
- if (must_preserve_receiver_reg) {
- __ Push(receiver());
- }
- __ Push(holder_reg);
- __ Push(this->name());
-
- // Invoke an interceptor. Note: map checks from receiver to
- // interceptor's holder has been compiled before (see a caller
- // of this method.)
- CompileCallLoadPropertyWithInterceptor(
- masm(), receiver(), holder_reg, this->name(), holder(),
- IC::kLoadPropertyWithInterceptorOnly);
-
- // Check if interceptor provided a value for property. If it's
- // the case, return immediately.
- Label interceptor_failed;
- __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
- __ j(equal, &interceptor_failed);
- frame_scope.GenerateLeaveFrame();
- __ ret(0);
-
- __ bind(&interceptor_failed);
- __ Pop(this->name());
- __ Pop(holder_reg);
- if (must_preserve_receiver_reg) {
- __ Pop(receiver());
- }
-
- // Leave the internal frame.
- }
-
- GenerateLoadPostInterceptor(it, holder_reg);
-}
-
-
-void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
- // Call the runtime system to load the interceptor.
- DCHECK(holder()->HasNamedInterceptor());
- DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
- __ PopReturnAddressTo(scratch2());
- PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
- holder());
- __ PushReturnAddressFrom(scratch2());
-
- ExternalReference ref = ExternalReference(
- IC_Utility(IC::kLoadPropertyWithInterceptor), isolate());
- __ TailCallExternalReference(
- ref, NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
-}
-
-
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
- Handle<JSObject> object, Handle<Name> name,
- Handle<ExecutableAccessorInfo> callback) {
- Register holder_reg = Frontend(receiver(), name);
-
- __ PopReturnAddressTo(scratch1());
- __ Push(receiver());
- __ Push(holder_reg);
- __ Push(callback); // callback info
- __ Push(name);
- __ Push(value());
- __ PushReturnAddressFrom(scratch1());
-
- // Do tail-call to the runtime system.
- ExternalReference store_callback_property =
- ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
- __ TailCallExternalReference(store_callback_property, 5, 1);
-
- // Return the generated code.
- return GetCode(kind(), Code::FAST, name);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
- MacroAssembler* masm, Handle<HeapType> type, Register receiver,
- Handle<JSFunction> setter) {
- // ----------- S t a t e -------------
- // -- rsp[0] : return address
- // -----------------------------------
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Save value register, so we can restore it later.
- __ Push(value());
-
- if (!setter.is_null()) {
- // Call the JavaScript setter with receiver and value on the stack.
- if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
- // Swap in the global receiver.
- __ movp(receiver,
- FieldOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
- }
- __ Push(receiver);
- __ Push(value());
- ParameterCount actual(1);
- ParameterCount expected(setter);
- __ InvokeFunction(setter, expected, actual,
- CALL_FUNCTION, NullCallWrapper());
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // We have to return the passed value, not the return value of the setter.
- __ Pop(rax);
-
- // Restore context register.
- __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- }
- __ ret(0);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
- Handle<Name> name) {
- __ PopReturnAddressTo(scratch1());
- __ Push(receiver());
- __ Push(this->name());
- __ Push(value());
- __ PushReturnAddressFrom(scratch1());
-
- // Do tail-call to the runtime system.
- ExternalReference store_ic_property = ExternalReference(
- IC_Utility(IC::kStorePropertyWithInterceptor), isolate());
- __ TailCallExternalReference(store_ic_property, 3, 1);
-
- // Return the generated code.
- return GetCode(kind(), Code::FAST, name);
-}
-
-
-Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
- MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
- MapHandleList* transitioned_maps) {
- Label miss;
- __ JumpIfSmi(receiver(), &miss, Label::kNear);
-
- __ movp(scratch1(), FieldOperand(receiver(), HeapObject::kMapOffset));
- int receiver_count = receiver_maps->length();
- for (int i = 0; i < receiver_count; ++i) {
- // Check map and tail call if there's a match
- __ Cmp(scratch1(), receiver_maps->at(i));
- if (transitioned_maps->at(i).is_null()) {
- __ j(equal, handler_stubs->at(i), RelocInfo::CODE_TARGET);
- } else {
- Label next_map;
- __ j(not_equal, &next_map, Label::kNear);
- __ Move(transition_map(),
- transitioned_maps->at(i),
- RelocInfo::EMBEDDED_OBJECT);
- __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
- __ bind(&next_map);
- }
- }
-
- __ bind(&miss);
-
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
-}
-
-
-Register* PropertyAccessCompiler::load_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3, scratch4.
- Register receiver = LoadIC::ReceiverRegister();
- Register name = LoadIC::NameRegister();
- static Register registers[] = { receiver, name, rax, rbx, rdi, r8 };
- return registers;
-}
-
-
-Register* PropertyAccessCompiler::store_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3.
- Register receiver = KeyedStoreIC::ReceiverRegister();
- Register name = KeyedStoreIC::NameRegister();
- DCHECK(rbx.is(KeyedStoreIC::MapRegister()));
- static Register registers[] = { receiver, name, rbx, rdi, r8 };
- return registers;
-}
-
-
-Register NamedStoreHandlerCompiler::value() { return StoreIC::ValueRegister(); }
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
- MacroAssembler* masm, Handle<HeapType> type, Register receiver,
- Handle<JSFunction> getter) {
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- if (!getter.is_null()) {
- // Call the JavaScript getter with the receiver on the stack.
- if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
- // Swap in the global receiver.
- __ movp(receiver,
- FieldOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
- }
- __ Push(receiver);
- ParameterCount actual(0);
- ParameterCount expected(getter);
- __ InvokeFunction(getter, expected, actual,
- CALL_FUNCTION, NullCallWrapper());
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // Restore context register.
- __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- }
- __ ret(0);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
- Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
- Label miss;
- FrontendHeader(receiver(), name, &miss);
-
- // Get the value from the cell.
- Register result = StoreIC::ValueRegister();
- __ Move(result, cell);
- __ movp(result, FieldOperand(result, PropertyCell::kValueOffset));
-
- // Check for deleted property if property can actually be deleted.
- if (is_configurable) {
- __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- __ j(equal, &miss);
- } else if (FLAG_debug_code) {
- __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- __ Check(not_equal, kDontDeleteCellsCannotContainTheHole);
- }
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->named_load_global_stub(), 1);
- __ ret(0);
-
- FrontendFooter(name, &miss);
-
- // Return the generated code.
- return GetCode(kind(), Code::NORMAL, name);
-}
-
-
-Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
- CodeHandleList* handlers,
- Handle<Name> name,
- Code::StubType type,
- IcCheckType check) {
- Label miss;
-
- if (check == PROPERTY &&
- (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
- // In case we are compiling an IC for dictionary loads and stores, just
- // check whether the name is unique.
- if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
- __ JumpIfNotUniqueName(this->name(), &miss);
- } else {
- __ Cmp(this->name(), name);
- __ j(not_equal, &miss);
- }
- }
-
- Label number_case;
- Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
- __ JumpIfSmi(receiver(), smi_target);
-
- // Polymorphic keyed stores may use the map register
- Register map_reg = scratch1();
- DCHECK(kind() != Code::KEYED_STORE_IC ||
- map_reg.is(KeyedStoreIC::MapRegister()));
- __ movp(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
- int receiver_count = types->length();
- int number_of_handled_maps = 0;
- for (int current = 0; current < receiver_count; ++current) {
- Handle<HeapType> type = types->at(current);
- Handle<Map> map = IC::TypeToMap(*type, isolate());
- if (!map->is_deprecated()) {
- number_of_handled_maps++;
- // Check map and tail call if there's a match
- __ Cmp(map_reg, map);
- if (type->Is(HeapType::Number())) {
- DCHECK(!number_case.is_unused());
- __ bind(&number_case);
- }
- __ j(equal, handlers->at(current), RelocInfo::CODE_TARGET);
- }
- }
- DCHECK(number_of_handled_maps > 0);
-
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- InlineCacheState state =
- number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
- return GetCode(kind(), type, name, state);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void ElementHandlerCompiler::GenerateLoadDictionaryElement(
- MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- DCHECK(rdx.is(LoadIC::ReceiverRegister()));
- DCHECK(rcx.is(LoadIC::NameRegister()));
- Label slow, miss;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- __ JumpIfNotSmi(rcx, &miss);
- __ SmiToInteger32(rbx, rcx);
- __ movp(rax, FieldOperand(rdx, JSObject::kElementsOffset));
-
- // Check whether the elements is a number dictionary.
- // rdx: receiver
- // rcx: key
- // rbx: key as untagged int32
- // rax: elements
- __ LoadFromNumberDictionary(&slow, rax, rcx, rbx, r9, rdi, rax);
- __ ret(0);
-
- __ bind(&slow);
- // ----------- S t a t e -------------
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
-
- __ bind(&miss);
- // ----------- S t a t e -------------
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
#include "src/debug.h"
#include "src/deoptimizer.h"
#include "src/frames.h"
-#include "src/stub-cache.h"
#include "src/utils.h"
#include "test/cctest/cctest.h"
#include "src/debug.h"
#include "src/deoptimizer.h"
#include "src/isolate.h"
-#include "src/stub-cache.h"
#include "test/cctest/cctest.h"
using ::v8::base::OS;
#include "src/debug.h"
#include "src/disasm.h"
#include "src/disassembler.h"
+#include "src/ic/ic.h"
#include "src/macro-assembler.h"
#include "src/serialize.h"
-#include "src/stub-cache.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
#include "src/debug.h"
#include "src/disasm.h"
#include "src/disassembler.h"
+#include "src/ic/ic.h"
#include "src/macro-assembler.h"
#include "src/serialize.h"
-#include "src/stub-cache.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
#include "src/execution.h"
#include "src/factory.h"
#include "src/global-handles.h"
+#include "src/ic/ic.h"
#include "src/macro-assembler.h"
-#include "src/stub-cache.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
#include "src/compilation-cache.h"
#include "src/debug.h"
#include "src/heap/spaces.h"
-#include "src/ic-inl.h"
#include "src/natives.h"
#include "src/objects.h"
#include "src/runtime.h"
'../../src/i18n.h',
'../../src/icu_util.cc',
'../../src/icu_util.h',
- '../../src/ic-inl.h',
- '../../src/ic.cc',
- '../../src/ic.h',
+ '../../src/ic/ic-inl.h',
+ '../../src/ic/ic.cc',
+ '../../src/ic/ic.h',
+ '../../src/ic/ic-compiler.cc',
+ '../../src/ic/ic-compiler.h',
'../../src/interface.cc',
'../../src/interface.h',
'../../src/interpreter-irregexp.cc',
'../../src/string-stream.h',
'../../src/strtod.cc',
'../../src/strtod.h',
- '../../src/stub-cache.cc',
- '../../src/stub-cache.h',
+ '../../src/ic/stub-cache.cc',
+ '../../src/ic/stub-cache.h',
'../../src/token.cc',
'../../src/token.h',
'../../src/transitions-inl.h',
'../../src/arm/frames-arm.cc',
'../../src/arm/frames-arm.h',
'../../src/arm/full-codegen-arm.cc',
- '../../src/arm/ic-arm.cc',
'../../src/arm/lithium-arm.cc',
'../../src/arm/lithium-arm.h',
'../../src/arm/lithium-codegen-arm.cc',
'../../src/arm/regexp-macro-assembler-arm.cc',
'../../src/arm/regexp-macro-assembler-arm.h',
'../../src/arm/simulator-arm.cc',
- '../../src/arm/stub-cache-arm.cc',
'../../src/compiler/arm/code-generator-arm.cc',
'../../src/compiler/arm/instruction-codes-arm.h',
'../../src/compiler/arm/instruction-selector-arm.cc',
'../../src/compiler/arm/linkage-arm.cc',
+ '../../src/ic/arm/ic-arm.cc',
+ '../../src/ic/arm/ic-compiler-arm.cc',
+ '../../src/ic/arm/stub-cache-arm.cc',
],
}],
['v8_target_arch=="arm64"', {
'../../src/arm64/frames-arm64.cc',
'../../src/arm64/frames-arm64.h',
'../../src/arm64/full-codegen-arm64.cc',
- '../../src/arm64/ic-arm64.cc',
'../../src/arm64/instructions-arm64.cc',
'../../src/arm64/instructions-arm64.h',
'../../src/arm64/instrument-arm64.cc',
'../../src/arm64/regexp-macro-assembler-arm64.h',
'../../src/arm64/simulator-arm64.cc',
'../../src/arm64/simulator-arm64.h',
- '../../src/arm64/stub-cache-arm64.cc',
'../../src/arm64/utils-arm64.cc',
'../../src/arm64/utils-arm64.h',
'../../src/compiler/arm64/code-generator-arm64.cc',
'../../src/compiler/arm64/instruction-codes-arm64.h',
'../../src/compiler/arm64/instruction-selector-arm64.cc',
'../../src/compiler/arm64/linkage-arm64.cc',
+ '../../src/ic/arm64/ic-arm64.cc',
+ '../../src/ic/arm64/ic-compiler-arm64.cc',
+ '../../src/ic/arm64/stub-cache-arm64.cc',
],
}],
['v8_target_arch=="ia32"', {
'../../src/ia32/frames-ia32.cc',
'../../src/ia32/frames-ia32.h',
'../../src/ia32/full-codegen-ia32.cc',
- '../../src/ia32/ic-ia32.cc',
'../../src/ia32/lithium-codegen-ia32.cc',
'../../src/ia32/lithium-codegen-ia32.h',
'../../src/ia32/lithium-gap-resolver-ia32.cc',
'../../src/ia32/macro-assembler-ia32.h',
'../../src/ia32/regexp-macro-assembler-ia32.cc',
'../../src/ia32/regexp-macro-assembler-ia32.h',
- '../../src/ia32/stub-cache-ia32.cc',
'../../src/compiler/ia32/code-generator-ia32.cc',
'../../src/compiler/ia32/instruction-codes-ia32.h',
'../../src/compiler/ia32/instruction-selector-ia32.cc',
'../../src/compiler/ia32/linkage-ia32.cc',
+ '../../src/ic/ia32/ic-ia32.cc',
+ '../../src/ic/ia32/ic-compiler-ia32.cc',
+ '../../src/ic/ia32/stub-cache-ia32.cc',
],
}],
['v8_target_arch=="x87"', {
'../../src/x64/frames-x64.cc',
'../../src/x64/frames-x64.h',
'../../src/x64/full-codegen-x64.cc',
- '../../src/x64/ic-x64.cc',
'../../src/x64/lithium-codegen-x64.cc',
'../../src/x64/lithium-codegen-x64.h',
'../../src/x64/lithium-gap-resolver-x64.cc',
'../../src/x64/macro-assembler-x64.h',
'../../src/x64/regexp-macro-assembler-x64.cc',
'../../src/x64/regexp-macro-assembler-x64.h',
- '../../src/x64/stub-cache-x64.cc',
'../../src/compiler/x64/code-generator-x64.cc',
'../../src/compiler/x64/instruction-codes-x64.h',
'../../src/compiler/x64/instruction-selector-x64.cc',
'../../src/compiler/x64/linkage-x64.cc',
+ '../../src/ic/x64/ic-x64.cc',
+ '../../src/ic/x64/ic-compiler-x64.cc',
+ '../../src/ic/x64/stub-cache-x64.cc',
],
}],
['OS=="linux"', {