}
-static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
- Label* miss,
- Register elements,
- Register key,
- Register result,
- Register t0,
- Register t1,
- Register t2) {
- // Register use:
- //
- // elements - holds the slow-case elements of the receiver on entry.
- // Unchanged unless 'result' is the same register.
- //
- // key - holds the smi key on entry.
- // Unchanged unless 'result' is the same register.
- //
- // result - holds the result on exit if the load succeeded.
- // Allowed to be the same as 'key' or 'result'.
- // Unchanged on bailout so 'key' or 'result' can be used
- // in further computation.
- //
- // Scratch registers:
- //
- // t0 - holds the untagged key on entry and holds the hash once computed.
- //
- // t1 - used to hold the capacity mask of the dictionary
- //
- // t2 - used for the index into the dictionary.
- Label done;
-
- // Compute the hash code from the untagged key. This must be kept in sync
- // with ComputeIntegerHash in utils.h.
- //
- // hash = ~hash + (hash << 15);
- __ mvn(t1, Operand(t0));
- __ add(t0, t1, Operand(t0, LSL, 15));
- // hash = hash ^ (hash >> 12);
- __ eor(t0, t0, Operand(t0, LSR, 12));
- // hash = hash + (hash << 2);
- __ add(t0, t0, Operand(t0, LSL, 2));
- // hash = hash ^ (hash >> 4);
- __ eor(t0, t0, Operand(t0, LSR, 4));
- // hash = hash * 2057;
- __ mov(t1, Operand(2057));
- __ mul(t0, t0, t1);
- // hash = hash ^ (hash >> 16);
- __ eor(t0, t0, Operand(t0, LSR, 16));
-
- // Compute the capacity mask.
- __ ldr(t1, FieldMemOperand(elements, NumberDictionary::kCapacityOffset));
- __ mov(t1, Operand(t1, ASR, kSmiTagSize)); // convert smi to int
- __ sub(t1, t1, Operand(1));
-
- // Generate an unrolled loop that performs a few probes before giving up.
- static const int kProbes = 4;
- for (int i = 0; i < kProbes; i++) {
- // Use t2 for index calculations and keep the hash intact in t0.
- __ mov(t2, t0);
- // Compute the masked index: (hash + i + i * i) & mask.
- if (i > 0) {
- __ add(t2, t2, Operand(NumberDictionary::GetProbeOffset(i)));
- }
- __ and_(t2, t2, Operand(t1));
-
- // Scale the index by multiplying by the element size.
- ASSERT(NumberDictionary::kEntrySize == 3);
- __ add(t2, t2, Operand(t2, LSL, 1)); // t2 = t2 * 3
-
- // Check if the key is identical to the name.
- __ add(t2, elements, Operand(t2, LSL, kPointerSizeLog2));
- __ ldr(ip, FieldMemOperand(t2, NumberDictionary::kElementsStartOffset));
- __ cmp(key, Operand(ip));
- if (i != kProbes - 1) {
- __ b(eq, &done);
- } else {
- __ b(ne, miss);
- }
- }
-
- __ bind(&done);
- // Check that the value is a normal property.
- // t2: elements + (index * kPointerSize)
- const int kDetailsOffset =
- NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
- __ ldr(t1, FieldMemOperand(t2, kDetailsOffset));
- __ tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::mask())));
- __ b(ne, miss);
-
- // Get the value at the masked, scaled index and return.
- const int kValueOffset =
- NumberDictionary::kElementsStartOffset + kPointerSize;
- __ ldr(result, FieldMemOperand(t2, kValueOffset));
-}
-
-
void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : name
__ b(ne, &slow_load);
__ mov(r0, Operand(r2, ASR, kSmiTagSize));
// r0: untagged index
- GenerateNumberDictionaryLoad(masm, &slow_load, r4, r2, r1, r0, r3, r5);
+ __ LoadFromNumberDictionary(&slow_load, r4, r2, r1, r0, r3, r5);
__ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1, r0, r3);
__ jmp(&do_call);
__ cmp(r3, ip);
__ b(ne, &slow);
__ mov(r2, Operand(r0, ASR, kSmiTagSize));
- GenerateNumberDictionaryLoad(masm, &slow, r4, r0, r0, r2, r3, r5);
+ __ LoadFromNumberDictionary(&slow, r4, r0, r0, r2, r3, r5);
__ Ret();
// Slow case, key and receiver still in r0 and r1.
}
+void MacroAssembler::LoadFromNumberDictionary(Label* miss,
+ Register elements,
+ Register key,
+ Register result,
+ Register t0,
+ Register t1,
+ Register t2) {
+ // Register use:
+ //
+ // elements - holds the slow-case elements of the receiver on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // key - holds the smi key on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // result - holds the result on exit if the load succeeded.
+ // Allowed to be the same as 'key' or 'result'.
+ // Unchanged on bailout so 'key' or 'result' can be used
+ // in further computation.
+ //
+ // Scratch registers:
+ //
+ // t0 - holds the untagged key on entry and holds the hash once computed.
+ //
+ // t1 - used to hold the capacity mask of the dictionary
+ //
+ // t2 - used for the index into the dictionary.
+ Label done;
+
+ // Compute the hash code from the untagged key. This must be kept in sync
+ // with ComputeIntegerHash in utils.h.
+ //
+ // hash = ~hash + (hash << 15);
+ mvn(t1, Operand(t0));
+ add(t0, t1, Operand(t0, LSL, 15));
+ // hash = hash ^ (hash >> 12);
+ eor(t0, t0, Operand(t0, LSR, 12));
+ // hash = hash + (hash << 2);
+ add(t0, t0, Operand(t0, LSL, 2));
+ // hash = hash ^ (hash >> 4);
+ eor(t0, t0, Operand(t0, LSR, 4));
+ // hash = hash * 2057;
+ mov(t1, Operand(2057));
+ mul(t0, t0, t1);
+ // hash = hash ^ (hash >> 16);
+ eor(t0, t0, Operand(t0, LSR, 16));
+
+ // Compute the capacity mask.
+ ldr(t1, FieldMemOperand(elements, NumberDictionary::kCapacityOffset));
+ mov(t1, Operand(t1, ASR, kSmiTagSize)); // convert smi to int
+ sub(t1, t1, Operand(1));
+
+ // Generate an unrolled loop that performs a few probes before giving up.
+ static const int kProbes = 4;
+ for (int i = 0; i < kProbes; i++) {
+ // Use t2 for index calculations and keep the hash intact in t0.
+ mov(t2, t0);
+ // Compute the masked index: (hash + i + i * i) & mask.
+ if (i > 0) {
+ add(t2, t2, Operand(NumberDictionary::GetProbeOffset(i)));
+ }
+ and_(t2, t2, Operand(t1));
+
+ // Scale the index by multiplying by the element size.
+ ASSERT(NumberDictionary::kEntrySize == 3);
+ add(t2, t2, Operand(t2, LSL, 1)); // t2 = t2 * 3
+
+ // Check if the key is identical to the name.
+ add(t2, elements, Operand(t2, LSL, kPointerSizeLog2));
+ ldr(ip, FieldMemOperand(t2, NumberDictionary::kElementsStartOffset));
+ cmp(key, Operand(ip));
+ if (i != kProbes - 1) {
+ b(eq, &done);
+ } else {
+ b(ne, miss);
+ }
+ }
+
+ bind(&done);
+ // Check that the value is a normal property.
+ // t2: elements + (index * kPointerSize)
+ const int kDetailsOffset =
+ NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
+ ldr(t1, FieldMemOperand(t2, kDetailsOffset));
+ tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::mask())));
+ b(ne, miss);
+
+ // Get the value at the masked, scaled index and return.
+ const int kValueOffset =
+ NumberDictionary::kElementsStartOffset + kPointerSize;
+ ldr(result, FieldMemOperand(t2, kValueOffset));
+}
+
+
void MacroAssembler::AllocateInNewSpace(int object_size,
Register result,
Register scratch1,
Register scratch,
Label* miss);
+
+ void LoadFromNumberDictionary(Label* miss,
+ Register elements,
+ Register key,
+ Register result,
+ Register t0,
+ Register t1,
+ Register t2);
+
+
inline void MarkCode(NopMarkerTypes type) {
nop(type);
}
// -- r1 : receiver
// -----------------------------------
Code* stub;
- MaybeObject* maybe_stub = ComputeSharedKeyedLoadElementStub(receiver_map);
+ JSObject::ElementsKind elements_kind = receiver_map->elements_kind();
+ MaybeObject* maybe_stub = KeyedLoadElementStub(elements_kind).TryGetCode();
if (!maybe_stub->To(&stub)) return maybe_stub;
__ DispatchMap(r1,
r2,
// -- r3 : scratch
// -----------------------------------
Code* stub;
- MaybeObject* maybe_stub = ComputeSharedKeyedStoreElementStub(receiver_map);
+ JSObject::ElementsKind elements_kind = receiver_map->elements_kind();
+ bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
+ MaybeObject* maybe_stub =
+ KeyedStoreElementStub(is_js_array, elements_kind).TryGetCode();
if (!maybe_stub->To(&stub)) return maybe_stub;
__ DispatchMap(r2,
r3,
#define __ ACCESS_MASM(masm)
+void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
+ MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- r0 : key
+ // -- r1 : receiver
+ // -----------------------------------
+ Label slow, miss_force_generic;
+
+ Register key = r0;
+ Register receiver = r1;
+
+ __ JumpIfNotSmi(key, &miss_force_generic);
+ __ mov(r2, Operand(key, ASR, kSmiTagSize));
+ __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ LoadFromNumberDictionary(&slow, r4, key, r0, r2, r3, r5);
+ __ Ret();
+
+ __ bind(&slow);
+ __ IncrementCounter(
+ masm->isolate()->counters()->keyed_load_external_array_slow(),
+ 1, r2, r3);
+
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- r0 : key
+ // -- r1 : receiver
+ // -----------------------------------
+ Handle<Code> slow_ic =
+ masm->isolate()->builtins()->KeyedLoadIC_Slow();
+ __ Jump(slow_ic, RelocInfo::CODE_TARGET);
+
+ // Miss case, call the runtime.
+ __ bind(&miss_force_generic);
+
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- r0 : key
+ // -- r1 : receiver
+ // -----------------------------------
+
+ Handle<Code> miss_ic =
+ masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+ __ Jump(miss_ic, RelocInfo::CODE_TARGET);
+}
+
+
static bool IsElementTypeSigned(JSObject::ElementsKind elements_kind) {
switch (elements_kind) {
case JSObject::EXTERNAL_BYTE_ELEMENTS:
}
-void KeyedLoadFastElementStub::Generate(MacroAssembler* masm) {
- KeyedLoadStubCompiler::GenerateLoadFastElement(masm);
-}
-
-
-void KeyedStoreFastElementStub::Generate(MacroAssembler* masm) {
- KeyedStoreStubCompiler::GenerateStoreFastElement(masm, is_js_array_);
-}
-
-
-void KeyedLoadExternalArrayStub::Generate(MacroAssembler* masm) {
- KeyedLoadStubCompiler::GenerateLoadExternalArray(masm, elements_kind_);
+void KeyedLoadElementStub::Generate(MacroAssembler* masm) {
+ switch (elements_kind_) {
+ case JSObject::FAST_ELEMENTS:
+ KeyedLoadStubCompiler::GenerateLoadFastElement(masm);
+ break;
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ UNIMPLEMENTED();
+ break;
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ KeyedLoadStubCompiler::GenerateLoadExternalArray(masm, elements_kind_);
+ break;
+ case JSObject::DICTIONARY_ELEMENTS:
+ KeyedLoadStubCompiler::GenerateLoadDictionaryElement(masm);
+ break;
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
}
-void KeyedStoreExternalArrayStub::Generate(MacroAssembler* masm) {
- KeyedStoreStubCompiler::GenerateStoreExternalArray(masm, elements_kind_);
+void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
+ switch (elements_kind_) {
+ case JSObject::FAST_ELEMENTS:
+ KeyedStoreStubCompiler::GenerateStoreFastElement(masm, is_js_array_);
+ break;
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ UNIMPLEMENTED();
+ break;
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ KeyedStoreStubCompiler::GenerateStoreExternalArray(masm, elements_kind_);
+ break;
+ case JSObject::DICTIONARY_ELEMENTS:
+ KeyedStoreStubCompiler::GenerateStoreDictionaryElement(masm);
+ break;
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
}
V(NumberToString) \
V(CEntry) \
V(JSEntry) \
- V(KeyedLoadFastElement) \
- V(KeyedStoreFastElement) \
- V(KeyedLoadExternalArray) \
- V(KeyedStoreExternalArray) \
+ V(KeyedLoadElement) \
+ V(KeyedStoreElement) \
V(DebuggerStatement) \
V(StringDictionaryNegativeLookup)
};
-class KeyedLoadFastElementStub : public CodeStub {
+class KeyedLoadElementStub : public CodeStub {
public:
- explicit KeyedLoadFastElementStub() {
- }
-
- Major MajorKey() { return KeyedLoadFastElement; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
+ explicit KeyedLoadElementStub(JSObject::ElementsKind elements_kind)
+ : elements_kind_(elements_kind)
+ { }
-
-class KeyedStoreFastElementStub : public CodeStub {
- public:
- explicit KeyedStoreFastElementStub(bool is_js_array)
- : is_js_array_(is_js_array) { }
-
- Major MajorKey() { return KeyedStoreFastElement; }
- int MinorKey() { return is_js_array_ ? 1 : 0; }
-
- void Generate(MacroAssembler* masm);
-
- private:
- bool is_js_array_;
-};
-
-
-class KeyedLoadExternalArrayStub : public CodeStub {
- public:
- explicit KeyedLoadExternalArrayStub(JSObject::ElementsKind elements_kind)
- : elements_kind_(elements_kind) { }
-
- Major MajorKey() { return KeyedLoadExternalArray; }
+ Major MajorKey() { return KeyedLoadElement; }
int MinorKey() { return elements_kind_; }
void Generate(MacroAssembler* masm);
- protected:
+ private:
JSObject::ElementsKind elements_kind_;
+
+ DISALLOW_COPY_AND_ASSIGN(KeyedLoadElementStub);
};
-class KeyedStoreExternalArrayStub : public CodeStub {
+class KeyedStoreElementStub : public CodeStub {
public:
- explicit KeyedStoreExternalArrayStub(JSObject::ElementsKind elements_kind)
- : elements_kind_(elements_kind) { }
+ KeyedStoreElementStub(bool is_js_array,
+ JSObject::ElementsKind elements_kind)
+ : is_js_array_(is_js_array),
+ elements_kind_(elements_kind) { }
- Major MajorKey() { return KeyedStoreExternalArray; }
- int MinorKey() { return elements_kind_; }
+ Major MajorKey() { return KeyedStoreElement; }
+ int MinorKey() {
+ return (is_js_array_ ? 0 : JSObject::kElementsKindCount) + elements_kind_;
+ }
void Generate(MacroAssembler* masm);
- protected:
+ private:
+ bool is_js_array_;
JSObject::ElementsKind elements_kind_;
+
+ DISALLOW_COPY_AND_ASSIGN(KeyedStoreElementStub);
};
}
-static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
- Label* miss,
- Register elements,
- Register key,
- Register r0,
- Register r1,
- Register r2,
- Register result) {
- // Register use:
- //
- // elements - holds the slow-case elements of the receiver and is unchanged.
- //
- // key - holds the smi key on entry and is unchanged.
- //
- // Scratch registers:
- //
- // r0 - holds the untagged key on entry and holds the hash once computed.
- //
- // r1 - used to hold the capacity mask of the dictionary
- //
- // r2 - used for the index into the dictionary.
- //
- // result - holds the result on exit if the load succeeds and we fall through.
-
- Label done;
-
- // Compute the hash code from the untagged key. This must be kept in sync
- // with ComputeIntegerHash in utils.h.
- //
- // hash = ~hash + (hash << 15);
- __ mov(r1, r0);
- __ not_(r0);
- __ shl(r1, 15);
- __ add(r0, Operand(r1));
- // hash = hash ^ (hash >> 12);
- __ mov(r1, r0);
- __ shr(r1, 12);
- __ xor_(r0, Operand(r1));
- // hash = hash + (hash << 2);
- __ lea(r0, Operand(r0, r0, times_4, 0));
- // hash = hash ^ (hash >> 4);
- __ mov(r1, r0);
- __ shr(r1, 4);
- __ xor_(r0, Operand(r1));
- // hash = hash * 2057;
- __ imul(r0, r0, 2057);
- // hash = hash ^ (hash >> 16);
- __ mov(r1, r0);
- __ shr(r1, 16);
- __ xor_(r0, Operand(r1));
-
- // Compute capacity mask.
- __ mov(r1, FieldOperand(elements, NumberDictionary::kCapacityOffset));
- __ shr(r1, kSmiTagSize); // convert smi to int
- __ dec(r1);
-
- // Generate an unrolled loop that performs a few probes before giving up.
- const int kProbes = 4;
- for (int i = 0; i < kProbes; i++) {
- // Use r2 for index calculations and keep the hash intact in r0.
- __ mov(r2, r0);
- // Compute the masked index: (hash + i + i * i) & mask.
- if (i > 0) {
- __ add(Operand(r2), Immediate(NumberDictionary::GetProbeOffset(i)));
- }
- __ and_(r2, Operand(r1));
-
- // Scale the index by multiplying by the entry size.
- ASSERT(NumberDictionary::kEntrySize == 3);
- __ lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
-
- // Check if the key matches.
- __ cmp(key, FieldOperand(elements,
- r2,
- times_pointer_size,
- NumberDictionary::kElementsStartOffset));
- if (i != (kProbes - 1)) {
- __ j(equal, &done);
- } else {
- __ j(not_equal, miss);
- }
- }
-
- __ bind(&done);
- // Check that the value is a normal propety.
- const int kDetailsOffset =
- NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
- ASSERT_EQ(NORMAL, 0);
- __ test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
- Immediate(PropertyDetails::TypeField::mask() << kSmiTagSize));
- __ j(not_zero, miss);
-
- // Get the value at the masked, scaled index.
- const int kValueOffset =
- NumberDictionary::kElementsStartOffset + kPointerSize;
- __ mov(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
-}
-
-
void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : receiver
// Push receiver on the stack to free up a register for the dictionary
// probing.
__ push(edx);
- GenerateNumberDictionaryLoad(masm,
- &slow_pop_receiver,
- ecx,
- eax,
- ebx,
- edx,
- edi,
- eax);
+ __ LoadFromNumberDictionary(&slow_pop_receiver,
+ ecx,
+ eax,
+ ebx,
+ edx,
+ edi,
+ eax);
// Pop receiver before returning.
__ pop(edx);
__ ret(0);
__ SmiUntag(ebx);
// ebx: untagged index
// Receiver in edx will be clobbered, need to reload it on miss.
- GenerateNumberDictionaryLoad(
- masm, &slow_reload_receiver, eax, ecx, ebx, edx, edi, edi);
+ __ LoadFromNumberDictionary(
+ &slow_reload_receiver, eax, ecx, ebx, edx, edi, edi);
__ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1);
__ jmp(&do_call);
}
+void MacroAssembler::LoadFromNumberDictionary(Label* miss,
+ Register elements,
+ Register key,
+ Register r0,
+ Register r1,
+ Register r2,
+ Register result) {
+ // Register use:
+ //
+ // elements - holds the slow-case elements of the receiver and is unchanged.
+ //
+ // key - holds the smi key on entry and is unchanged.
+ //
+ // Scratch registers:
+ //
+ // r0 - holds the untagged key on entry and holds the hash once computed.
+ //
+ // r1 - used to hold the capacity mask of the dictionary
+ //
+ // r2 - used for the index into the dictionary.
+ //
+ // result - holds the result on exit if the load succeeds and we fall through.
+
+ Label done;
+
+ // Compute the hash code from the untagged key. This must be kept in sync
+ // with ComputeIntegerHash in utils.h.
+ //
+ // hash = ~hash + (hash << 15);
+ mov(r1, r0);
+ not_(r0);
+ shl(r1, 15);
+ add(r0, Operand(r1));
+ // hash = hash ^ (hash >> 12);
+ mov(r1, r0);
+ shr(r1, 12);
+ xor_(r0, Operand(r1));
+ // hash = hash + (hash << 2);
+ lea(r0, Operand(r0, r0, times_4, 0));
+ // hash = hash ^ (hash >> 4);
+ mov(r1, r0);
+ shr(r1, 4);
+ xor_(r0, Operand(r1));
+ // hash = hash * 2057;
+ imul(r0, r0, 2057);
+ // hash = hash ^ (hash >> 16);
+ mov(r1, r0);
+ shr(r1, 16);
+ xor_(r0, Operand(r1));
+
+ // Compute capacity mask.
+ mov(r1, FieldOperand(elements, NumberDictionary::kCapacityOffset));
+ shr(r1, kSmiTagSize); // convert smi to int
+ dec(r1);
+
+ // Generate an unrolled loop that performs a few probes before giving up.
+ const int kProbes = 4;
+ for (int i = 0; i < kProbes; i++) {
+ // Use r2 for index calculations and keep the hash intact in r0.
+ mov(r2, r0);
+ // Compute the masked index: (hash + i + i * i) & mask.
+ if (i > 0) {
+ add(Operand(r2), Immediate(NumberDictionary::GetProbeOffset(i)));
+ }
+ and_(r2, Operand(r1));
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(NumberDictionary::kEntrySize == 3);
+ lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
+
+ // Check if the key matches.
+ cmp(key, FieldOperand(elements,
+ r2,
+ times_pointer_size,
+ NumberDictionary::kElementsStartOffset));
+ if (i != (kProbes - 1)) {
+ j(equal, &done);
+ } else {
+ j(not_equal, miss);
+ }
+ }
+
+ bind(&done);
+ // Check that the value is a normal propety.
+ const int kDetailsOffset =
+ NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
+ ASSERT_EQ(NORMAL, 0);
+ test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
+ Immediate(PropertyDetails::TypeField::mask() << kSmiTagSize));
+ j(not_zero, miss);
+
+ // Get the value at the masked, scaled index.
+ const int kValueOffset =
+ NumberDictionary::kElementsStartOffset + kPointerSize;
+ mov(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
+}
+
+
void MacroAssembler::LoadAllocationTopHelper(Register result,
Register scratch,
AllocationFlags flags) {
Label* miss);
+ void LoadFromNumberDictionary(Label* miss,
+ Register elements,
+ Register key,
+ Register r0,
+ Register r1,
+ Register r2,
+ Register result);
+
+
// ---------------------------------------------------------------------------
// Allocation support
// -- esp[0] : return address
// -----------------------------------
Code* stub;
- MaybeObject* maybe_stub = ComputeSharedKeyedStoreElementStub(receiver_map);
+ JSObject::ElementsKind elements_kind = receiver_map->elements_kind();
+ bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE;
+ MaybeObject* maybe_stub =
+ KeyedStoreElementStub(is_jsarray, elements_kind).TryGetCode();
if (!maybe_stub->To(&stub)) return maybe_stub;
__ DispatchMap(edx,
Handle<Map>(receiver_map),
// -- esp[0] : return address
// -----------------------------------
Code* stub;
- MaybeObject* maybe_stub = ComputeSharedKeyedLoadElementStub(receiver_map);
+ JSObject::ElementsKind elements_kind = receiver_map->elements_kind();
+ MaybeObject* maybe_stub = KeyedLoadElementStub(elements_kind).TryGetCode();
if (!maybe_stub->To(&stub)) return maybe_stub;
__ DispatchMap(edx,
Handle<Map>(receiver_map),
#define __ ACCESS_MASM(masm)
+void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
+ MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label slow, miss_force_generic;
+
+ // This stub is meant to be tail-jumped to, the receiver must already
+ // have been verified by the caller to not be a smi.
+ __ JumpIfNotSmi(eax, &miss_force_generic);
+ __ mov(ebx, eax);
+ __ SmiUntag(ebx);
+ __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
+
+ // Push receiver on the stack to free up a register for the dictionary
+ // probing.
+ __ push(edx);
+ __ LoadFromNumberDictionary(&slow,
+ ecx,
+ eax,
+ ebx,
+ edx,
+ edi,
+ eax);
+ // Pop receiver before returning.
+ __ pop(edx);
+ __ ret(0);
+
+ __ bind(&slow);
+ __ pop(edx);
+
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ Handle<Code> slow_ic =
+ masm->isolate()->builtins()->KeyedLoadIC_Slow();
+ __ jmp(slow_ic, RelocInfo::CODE_TARGET);
+
+ __ bind(&miss_force_generic);
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ Handle<Code> miss_force_generic_ic =
+ masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+ __ jmp(miss_force_generic_ic, RelocInfo::CODE_TARGET);
+}
+
+
void KeyedLoadStubCompiler::GenerateLoadExternalArray(
MacroAssembler* masm,
JSObject::ElementsKind elements_kind) {
void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
bool is_js_array) {
// ----------- S t a t e -------------
- // -- eax : key
+ // -- eax : value
+ // -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
}
-MaybeObject* KeyedLoadIC::GetFastElementStubWithoutMapCheck(
- bool is_js_array) {
- return KeyedLoadFastElementStub().TryGetCode();
-}
-
-
-MaybeObject* KeyedLoadIC::GetExternalArrayStubWithoutMapCheck(
+MaybeObject* KeyedLoadIC::GetElementStubWithoutMapCheck(
+ bool is_js_array,
JSObject::ElementsKind elements_kind) {
- return KeyedLoadExternalArrayStub(elements_kind).TryGetCode();
+ return KeyedLoadElementStub(elements_kind).TryGetCode();
}
for (int i = 0; i < target_receiver_maps.length(); ++i) {
Map* receiver_map(target_receiver_maps.at(i));
MaybeObject* maybe_cached_stub = ComputeMonomorphicStubWithoutMapCheck(
- receiver_map, strict_mode, generic_stub);
+ receiver_map, strict_mode);
Code* cached_stub;
if (!maybe_cached_stub->To(&cached_stub)) return maybe_cached_stub;
handler_ics.Add(cached_stub);
MaybeObject* KeyedIC::ComputeMonomorphicStubWithoutMapCheck(
Map* receiver_map,
- StrictModeFlag strict_mode,
- Code* generic_stub) {
+ StrictModeFlag strict_mode) {
if ((receiver_map->instance_type() & kNotStringTag) == 0) {
ASSERT(string_stub() != NULL);
return string_stub();
- } else if (receiver_map->has_external_array_elements()) {
- return GetExternalArrayStubWithoutMapCheck(receiver_map->elements_kind());
- } else if (receiver_map->has_fast_elements()) {
- bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
- return GetFastElementStubWithoutMapCheck(is_js_array);
} else {
- return generic_stub;
+ ASSERT(receiver_map->has_dictionary_elements() ||
+ receiver_map->has_fast_elements() ||
+ receiver_map->has_fast_double_elements() ||
+ receiver_map->has_external_array_elements());
+ bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
+ return GetElementStubWithoutMapCheck(is_js_array,
+ receiver_map->elements_kind());
}
}
Code* result = NULL;
if (receiver->HasFastElements() ||
receiver->HasExternalArrayElements() ||
+ receiver->HasFastDoubleElements() ||
receiver->HasDictionaryElements()) {
MaybeObject* maybe_stub =
isolate()->stub_cache()->ComputeKeyedLoadOrStoreElement(
}
-MaybeObject* KeyedStoreIC::GetFastElementStubWithoutMapCheck(
- bool is_js_array) {
- return KeyedStoreFastElementStub(is_js_array).TryGetCode();
-}
-
-
-MaybeObject* KeyedStoreIC::GetExternalArrayStubWithoutMapCheck(
+MaybeObject* KeyedStoreIC::GetElementStubWithoutMapCheck(
+ bool is_js_array,
JSObject::ElementsKind elements_kind) {
- return KeyedStoreExternalArrayStub(elements_kind).TryGetCode();
+ return KeyedStoreElementStub(is_js_array, elements_kind).TryGetCode();
}
explicit KeyedIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) {}
virtual ~KeyedIC() {}
- virtual MaybeObject* GetFastElementStubWithoutMapCheck(
- bool is_js_array) = 0;
-
- virtual MaybeObject* GetExternalArrayStubWithoutMapCheck(
+ virtual MaybeObject* GetElementStubWithoutMapCheck(
+ bool is_js_array,
JSObject::ElementsKind elements_kind) = 0;
protected:
MaybeObject* ComputeMonomorphicStubWithoutMapCheck(
Map* receiver_map,
- StrictModeFlag strict_mode,
- Code* generic_stub);
+ StrictModeFlag strict_mode);
MaybeObject* ComputeMonomorphicStub(JSObject* receiver,
bool is_store,
static const int kSlowCaseBitFieldMask =
(1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor);
- virtual MaybeObject* GetFastElementStubWithoutMapCheck(
- bool is_js_array);
-
- virtual MaybeObject* GetExternalArrayStubWithoutMapCheck(
+ virtual MaybeObject* GetElementStubWithoutMapCheck(
+ bool is_js_array,
JSObject::ElementsKind elements_kind);
protected:
static void GenerateGeneric(MacroAssembler* masm, StrictModeFlag strict_mode);
static void GenerateNonStrictArguments(MacroAssembler* masm);
- virtual MaybeObject* GetFastElementStubWithoutMapCheck(
- bool is_js_array);
-
- virtual MaybeObject* GetExternalArrayStubWithoutMapCheck(
+ virtual MaybeObject* GetElementStubWithoutMapCheck(
+ bool is_js_array,
JSObject::ElementsKind elements_kind);
protected:
}
-static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
- Label* miss,
- Register elements,
- Register key,
- Register result,
- Register reg0,
- Register reg1,
- Register reg2) {
- // Register use:
- //
- // elements - holds the slow-case elements of the receiver on entry.
- // Unchanged unless 'result' is the same register.
- //
- // key - holds the smi key on entry.
- // Unchanged unless 'result' is the same register.
- //
- //
- // result - holds the result on exit if the load succeeded.
- // Allowed to be the same as 'key' or 'result'.
- // Unchanged on bailout so 'key' or 'result' can be used
- // in further computation.
- //
- // Scratch registers:
- //
- // reg0 - holds the untagged key on entry and holds the hash once computed.
- //
- // reg1 - Used to hold the capacity mask of the dictionary.
- //
- // reg2 - Used for the index into the dictionary.
- // at - Temporary (avoid MacroAssembler instructions also using 'at').
- Label done;
-
- // Compute the hash code from the untagged key. This must be kept in sync
- // with ComputeIntegerHash in utils.h.
- //
- // hash = ~hash + (hash << 15);
- __ nor(reg1, reg0, zero_reg);
- __ sll(at, reg0, 15);
- __ addu(reg0, reg1, at);
-
- // hash = hash ^ (hash >> 12);
- __ srl(at, reg0, 12);
- __ xor_(reg0, reg0, at);
-
- // hash = hash + (hash << 2);
- __ sll(at, reg0, 2);
- __ addu(reg0, reg0, at);
-
- // hash = hash ^ (hash >> 4);
- __ srl(at, reg0, 4);
- __ xor_(reg0, reg0, at);
-
- // hash = hash * 2057;
- __ li(reg1, Operand(2057));
- __ mul(reg0, reg0, reg1);
-
- // hash = hash ^ (hash >> 16);
- __ srl(at, reg0, 16);
- __ xor_(reg0, reg0, at);
-
- // Compute the capacity mask.
- __ lw(reg1, FieldMemOperand(elements, NumberDictionary::kCapacityOffset));
- __ sra(reg1, reg1, kSmiTagSize);
- __ Subu(reg1, reg1, Operand(1));
-
- // Generate an unrolled loop that performs a few probes before giving up.
- static const int kProbes = 4;
- for (int i = 0; i < kProbes; i++) {
- // Use reg2 for index calculations and keep the hash intact in reg0.
- __ mov(reg2, reg0);
- // Compute the masked index: (hash + i + i * i) & mask.
- if (i > 0) {
- __ Addu(reg2, reg2, Operand(NumberDictionary::GetProbeOffset(i)));
- }
- __ and_(reg2, reg2, reg1);
-
- // Scale the index by multiplying by the element size.
- ASSERT(NumberDictionary::kEntrySize == 3);
- __ sll(at, reg2, 1); // 2x.
- __ addu(reg2, reg2, at); // reg2 = reg2 * 3.
-
- // Check if the key is identical to the name.
- __ sll(at, reg2, kPointerSizeLog2);
- __ addu(reg2, elements, at);
-
- __ lw(at, FieldMemOperand(reg2, NumberDictionary::kElementsStartOffset));
- if (i != kProbes - 1) {
- __ Branch(&done, eq, key, Operand(at));
- } else {
- __ Branch(miss, ne, key, Operand(at));
- }
- }
-
- __ bind(&done);
- // Check that the value is a normal property.
- // reg2: elements + (index * kPointerSize).
- const int kDetailsOffset =
- NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
- __ lw(reg1, FieldMemOperand(reg2, kDetailsOffset));
- __ And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::mask())));
- __ Branch(miss, ne, at, Operand(zero_reg));
-
- // Get the value at the masked, scaled index and return.
- const int kValueOffset =
- NumberDictionary::kElementsStartOffset + kPointerSize;
- __ lw(result, FieldMemOperand(reg2, kValueOffset));
-}
-
-
void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a2 : name
__ Branch(&slow_load, ne, a3, Operand(at));
__ sra(a0, a2, kSmiTagSize);
// a0: untagged index
- GenerateNumberDictionaryLoad(masm, &slow_load, t0, a2, a1, a0, a3, t1);
+ __ LoadFromNumberDictionary(&slow_load, t0, a2, a1, a0, a3, t1);
__ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1, a0, a3);
__ jmp(&do_call);
__ LoadRoot(at, Heap::kHashTableMapRootIndex);
__ Branch(&slow, ne, a3, Operand(at));
__ sra(a2, a0, kSmiTagSize);
- GenerateNumberDictionaryLoad(masm, &slow, t0, a0, v0, a2, a3, t1);
+ __ LoadFromNumberDictionary(&slow, t0, a0, v0, a2, a3, t1);
__ Ret();
// Slow case, key and receiver still in a0 and a1.
}
+void MacroAssembler::LoadFromNumberDictionary(Label* miss,
+ Register elements,
+ Register key,
+ Register result,
+ Register reg0,
+ Register reg1,
+ Register reg2) {
+ // Register use:
+ //
+ // elements - holds the slow-case elements of the receiver on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // key - holds the smi key on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ //
+ // result - holds the result on exit if the load succeeded.
+ // Allowed to be the same as 'key' or 'result'.
+ // Unchanged on bailout so 'key' or 'result' can be used
+ // in further computation.
+ //
+ // Scratch registers:
+ //
+ // reg0 - holds the untagged key on entry and holds the hash once computed.
+ //
+ // reg1 - Used to hold the capacity mask of the dictionary.
+ //
+ // reg2 - Used for the index into the dictionary.
+ // at - Temporary (avoid MacroAssembler instructions also using 'at').
+ Label done;
+
+ // Compute the hash code from the untagged key. This must be kept in sync
+ // with ComputeIntegerHash in utils.h.
+ //
+ // hash = ~hash + (hash << 15);
+ nor(reg1, reg0, zero_reg);
+ sll(at, reg0, 15);
+ addu(reg0, reg1, at);
+
+ // hash = hash ^ (hash >> 12);
+ srl(at, reg0, 12);
+ xor_(reg0, reg0, at);
+
+ // hash = hash + (hash << 2);
+ sll(at, reg0, 2);
+ addu(reg0, reg0, at);
+
+ // hash = hash ^ (hash >> 4);
+ srl(at, reg0, 4);
+ xor_(reg0, reg0, at);
+
+ // hash = hash * 2057;
+ li(reg1, Operand(2057));
+ mul(reg0, reg0, reg1);
+
+ // hash = hash ^ (hash >> 16);
+ srl(at, reg0, 16);
+ xor_(reg0, reg0, at);
+
+ // Compute the capacity mask.
+ lw(reg1, FieldMemOperand(elements, NumberDictionary::kCapacityOffset));
+ sra(reg1, reg1, kSmiTagSize);
+ Subu(reg1, reg1, Operand(1));
+
+ // Generate an unrolled loop that performs a few probes before giving up.
+ static const int kProbes = 4;
+ for (int i = 0; i < kProbes; i++) {
+ // Use reg2 for index calculations and keep the hash intact in reg0.
+ mov(reg2, reg0);
+ // Compute the masked index: (hash + i + i * i) & mask.
+ if (i > 0) {
+ Addu(reg2, reg2, Operand(NumberDictionary::GetProbeOffset(i)));
+ }
+ and_(reg2, reg2, reg1);
+
+ // Scale the index by multiplying by the element size.
+ ASSERT(NumberDictionary::kEntrySize == 3);
+ sll(at, reg2, 1); // 2x.
+ addu(reg2, reg2, at); // reg2 = reg2 * 3.
+
+ // Check if the key is identical to the name.
+ sll(at, reg2, kPointerSizeLog2);
+ addu(reg2, elements, at);
+
+ lw(at, FieldMemOperand(reg2, NumberDictionary::kElementsStartOffset));
+ if (i != kProbes - 1) {
+ Branch(&done, eq, key, Operand(at));
+ } else {
+ Branch(miss, ne, key, Operand(at));
+ }
+ }
+
+ bind(&done);
+ // Check that the value is a normal property.
+ // reg2: elements + (index * kPointerSize).
+ const int kDetailsOffset =
+ NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
+ lw(reg1, FieldMemOperand(reg2, kDetailsOffset));
+ And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::mask())));
+ Branch(miss, ne, at, Operand(zero_reg));
+
+ // Get the value at the masked, scaled index and return.
+ const int kValueOffset =
+ NumberDictionary::kElementsStartOffset + kPointerSize;
+ lw(result, FieldMemOperand(reg2, kValueOffset));
+}
+
+
// ---------------------------------------------------------------------------
// Instruction macros.
Register scratch,
Label* miss);
+
+ void LoadFromNumberDictionary(Label* miss,
+ Register elements,
+ Register key,
+ Register result,
+ Register reg0,
+ Register reg1,
+ Register reg2);
+
+
inline void MarkCode(NopMarkerTypes type) {
nop(type);
}
// -- a1 : receiver
// -----------------------------------
Code* stub;
- MaybeObject* maybe_stub = ComputeSharedKeyedLoadElementStub(receiver_map);
+ JSObject::ElementsKind elements_kind = receiver_map->elements_kind();
+ MaybeObject* maybe_stub = KeyedLoadElementStub(elements_kind).TryGetCode();
if (!maybe_stub->To(&stub)) return maybe_stub;
__ DispatchMap(a1,
a2,
// -- a3 : scratch
// -----------------------------------
Code* stub;
- MaybeObject* maybe_stub = ComputeSharedKeyedStoreElementStub(receiver_map);
+ JSObject::ElementsKind elements_kind = receiver_map->elements_kind();
+ bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
+ MaybeObject* maybe_stub =
+ KeyedStoreElementStub(is_js_array, elements_kind).TryGetCode();
if (!maybe_stub->To(&stub)) return maybe_stub;
__ DispatchMap(a2,
a3,
#define __ ACCESS_MASM(masm)
+void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
+ MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ Label slow, miss_force_generic;
+
+ Register key = a0;
+ Register receiver = a1;
+
+ __ JumpIfNotSmi(key, &miss_force_generic);
+ __ lw(t0, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ sra(a2, a0, kSmiTagSize);
+ __ LoadFromNumberDictionary(&slow, t0, a0, v0, a2, a3, t1);
+ __ Ret();
+
+ // Slow case, key and receiver still in a0 and a1.
+ __ bind(&slow);
+ __ IncrementCounter(
+ masm->isolate()->counters()->keyed_load_external_array_slow(),
+ 1, a2, a3);
+ // Entry registers are intact.
+ // ---------- S t a t e --------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ Handle<Code> slow_ic =
+ masm->isolate()->builtins()->KeyedLoadIC_Slow();
+ __ Jump(slow_ic, RelocInfo::CODE_TARGET);
+
+ // Miss case, call the runtime.
+ __ bind(&miss_force_generic);
+
+ // ---------- S t a t e --------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+
+ Handle<Code> miss_ic =
+ masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+ __ Jump(miss_ic, RelocInfo::CODE_TARGET);
+}
+
+
static bool IsElementTypeSigned(JSObject::ElementsKind elements_kind) {
switch (elements_kind) {
case JSObject::EXTERNAL_BYTE_ELEMENTS:
}
-MaybeObject* KeyedLoadStubCompiler::ComputeSharedKeyedLoadElementStub(
- Map* receiver_map) {
- MaybeObject* maybe_stub = NULL;
- if (receiver_map->has_fast_elements()) {
- maybe_stub = KeyedLoadFastElementStub().TryGetCode();
- } else if (receiver_map->has_external_array_elements()) {
- JSObject::ElementsKind elements_kind = receiver_map->elements_kind();
- maybe_stub = KeyedLoadExternalArrayStub(elements_kind).TryGetCode();
- } else if (receiver_map->has_dictionary_elements()) {
- maybe_stub = isolate()->builtins()->builtin(Builtins::kKeyedLoadIC_Slow);
- } else {
- UNREACHABLE();
- }
- return maybe_stub;
-}
-
-
MaybeObject* StoreStubCompiler::GetCode(PropertyType type, String* name) {
Code::Flags flags = Code::ComputeMonomorphicFlags(
Code::STORE_IC, type, strict_mode_);
}
-MaybeObject* KeyedStoreStubCompiler::ComputeSharedKeyedStoreElementStub(
- Map* receiver_map) {
- MaybeObject* maybe_stub = NULL;
- if (receiver_map->has_fast_elements()) {
- bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
- maybe_stub = KeyedStoreFastElementStub(is_js_array).TryGetCode();
- } else if (receiver_map->has_external_array_elements()) {
- JSObject::ElementsKind elements_kind = receiver_map->elements_kind();
- maybe_stub = KeyedStoreExternalArrayStub(elements_kind).TryGetCode();
- } else if (receiver_map->has_dictionary_elements()) {
- maybe_stub = isolate()->builtins()->builtin(Builtins::kKeyedStoreIC_Slow);
- } else {
- UNREACHABLE();
- }
- return maybe_stub;
+void KeyedStoreStubCompiler::GenerateStoreDictionaryElement(
+ MacroAssembler* masm) {
+ KeyedStoreIC::GenerateSlow(masm);
}
static void GenerateLoadFastElement(MacroAssembler* masm);
+ static void GenerateLoadDictionaryElement(MacroAssembler* masm);
+
private:
MaybeObject* GetCode(PropertyType type,
String* name,
InlineCacheState state = MONOMORPHIC);
-
- MaybeObject* ComputeSharedKeyedLoadElementStub(Map* receiver_map);
};
static void GenerateStoreExternalArray(MacroAssembler* masm,
JSObject::ElementsKind elements_kind);
+ static void GenerateStoreDictionaryElement(MacroAssembler* masm);
+
private:
MaybeObject* GetCode(PropertyType type,
String* name,
InlineCacheState state = MONOMORPHIC);
- MaybeObject* ComputeSharedKeyedStoreElementStub(Map* receiver_map);
-
StrictModeFlag strict_mode_;
};
}
-static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
- Label* miss,
- Register elements,
- Register key,
- Register r0,
- Register r1,
- Register r2,
- Register result) {
- // Register use:
- //
- // elements - holds the slow-case elements of the receiver on entry.
- // Unchanged unless 'result' is the same register.
- //
- // key - holds the smi key on entry.
- // Unchanged unless 'result' is the same register.
- //
- // Scratch registers:
- //
- // r0 - holds the untagged key on entry and holds the hash once computed.
- //
- // r1 - used to hold the capacity mask of the dictionary
- //
- // r2 - used for the index into the dictionary.
- //
- // result - holds the result on exit if the load succeeded.
- // Allowed to be the same as 'key' or 'result'.
- // Unchanged on bailout so 'key' or 'result' can be used
- // in further computation.
-
- Label done;
-
- // Compute the hash code from the untagged key. This must be kept in sync
- // with ComputeIntegerHash in utils.h.
- //
- // hash = ~hash + (hash << 15);
- __ movl(r1, r0);
- __ notl(r0);
- __ shll(r1, Immediate(15));
- __ addl(r0, r1);
- // hash = hash ^ (hash >> 12);
- __ movl(r1, r0);
- __ shrl(r1, Immediate(12));
- __ xorl(r0, r1);
- // hash = hash + (hash << 2);
- __ leal(r0, Operand(r0, r0, times_4, 0));
- // hash = hash ^ (hash >> 4);
- __ movl(r1, r0);
- __ shrl(r1, Immediate(4));
- __ xorl(r0, r1);
- // hash = hash * 2057;
- __ imull(r0, r0, Immediate(2057));
- // hash = hash ^ (hash >> 16);
- __ movl(r1, r0);
- __ shrl(r1, Immediate(16));
- __ xorl(r0, r1);
-
- // Compute capacity mask.
- __ SmiToInteger32(r1,
- FieldOperand(elements, NumberDictionary::kCapacityOffset));
- __ decl(r1);
-
- // Generate an unrolled loop that performs a few probes before giving up.
- const int kProbes = 4;
- for (int i = 0; i < kProbes; i++) {
- // Use r2 for index calculations and keep the hash intact in r0.
- __ movq(r2, r0);
- // Compute the masked index: (hash + i + i * i) & mask.
- if (i > 0) {
- __ addl(r2, Immediate(NumberDictionary::GetProbeOffset(i)));
- }
- __ and_(r2, r1);
-
- // Scale the index by multiplying by the entry size.
- ASSERT(NumberDictionary::kEntrySize == 3);
- __ lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
-
- // Check if the key matches.
- __ cmpq(key, FieldOperand(elements,
- r2,
- times_pointer_size,
- NumberDictionary::kElementsStartOffset));
- if (i != (kProbes - 1)) {
- __ j(equal, &done);
- } else {
- __ j(not_equal, miss);
- }
- }
-
- __ bind(&done);
- // Check that the value is a normal propety.
- const int kDetailsOffset =
- NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
- ASSERT_EQ(NORMAL, 0);
- __ Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
- Smi::FromInt(PropertyDetails::TypeField::mask()));
- __ j(not_zero, miss);
-
- // Get the value at the masked, scaled index.
- const int kValueOffset =
- NumberDictionary::kElementsStartOffset + kPointerSize;
- __ movq(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
-}
-
-
void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : receiver
__ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
Heap::kHashTableMapRootIndex);
__ j(not_equal, &slow);
- GenerateNumberDictionaryLoad(masm, &slow, rcx, rax, rbx, r9, rdi, rax);
+ __ LoadFromNumberDictionary(&slow, rcx, rax, rbx, r9, rdi, rax);
__ ret(0);
__ bind(&slow);
__ j(not_equal, &slow_load);
__ SmiToInteger32(rbx, rcx);
// ebx: untagged index
- GenerateNumberDictionaryLoad(masm, &slow_load, rax, rcx, rbx, r9, rdi, rdi);
+ __ LoadFromNumberDictionary(&slow_load, rax, rcx, rbx, r9, rdi, rdi);
__ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1);
__ jmp(&do_call);
}
+void MacroAssembler::LoadFromNumberDictionary(Label* miss,
+ Register elements,
+ Register key,
+ Register r0,
+ Register r1,
+ Register r2,
+ Register result) {
+ // Register use:
+ //
+ // elements - holds the slow-case elements of the receiver on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // key - holds the smi key on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // Scratch registers:
+ //
+ // r0 - holds the untagged key on entry and holds the hash once computed.
+ //
+ // r1 - used to hold the capacity mask of the dictionary
+ //
+ // r2 - used for the index into the dictionary.
+ //
+ // result - holds the result on exit if the load succeeded.
+ // Allowed to be the same as 'key' or 'result'.
+ // Unchanged on bailout so 'key' or 'result' can be used
+ // in further computation.
+
+ Label done;
+
+ // Compute the hash code from the untagged key. This must be kept in sync
+ // with ComputeIntegerHash in utils.h.
+ //
+ // hash = ~hash + (hash << 15);
+ movl(r1, r0);
+ notl(r0);
+ shll(r1, Immediate(15));
+ addl(r0, r1);
+ // hash = hash ^ (hash >> 12);
+ movl(r1, r0);
+ shrl(r1, Immediate(12));
+ xorl(r0, r1);
+ // hash = hash + (hash << 2);
+ leal(r0, Operand(r0, r0, times_4, 0));
+ // hash = hash ^ (hash >> 4);
+ movl(r1, r0);
+ shrl(r1, Immediate(4));
+ xorl(r0, r1);
+ // hash = hash * 2057;
+ imull(r0, r0, Immediate(2057));
+ // hash = hash ^ (hash >> 16);
+ movl(r1, r0);
+ shrl(r1, Immediate(16));
+ xorl(r0, r1);
+
+ // Compute capacity mask.
+ SmiToInteger32(r1,
+ FieldOperand(elements, NumberDictionary::kCapacityOffset));
+ decl(r1);
+
+ // Generate an unrolled loop that performs a few probes before giving up.
+ const int kProbes = 4;
+ for (int i = 0; i < kProbes; i++) {
+ // Use r2 for index calculations and keep the hash intact in r0.
+ movq(r2, r0);
+ // Compute the masked index: (hash + i + i * i) & mask.
+ if (i > 0) {
+ addl(r2, Immediate(NumberDictionary::GetProbeOffset(i)));
+ }
+ and_(r2, r1);
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(NumberDictionary::kEntrySize == 3);
+ lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
+
+ // Check if the key matches.
+ cmpq(key, FieldOperand(elements,
+ r2,
+ times_pointer_size,
+ NumberDictionary::kElementsStartOffset));
+ if (i != (kProbes - 1)) {
+ j(equal, &done);
+ } else {
+ j(not_equal, miss);
+ }
+ }
+
+ bind(&done);
+ // Check that the value is a normal propety.
+ const int kDetailsOffset =
+ NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
+ ASSERT_EQ(NORMAL, 0);
+ Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
+ Smi::FromInt(PropertyDetails::TypeField::mask()));
+ j(not_zero, miss);
+
+ // Get the value at the masked, scaled index.
+ const int kValueOffset =
+ NumberDictionary::kElementsStartOffset + kPointerSize;
+ movq(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
+}
+
+
void MacroAssembler::LoadAllocationTopHelper(Register result,
Register scratch,
AllocationFlags flags) {
Label* miss);
+ void LoadFromNumberDictionary(Label* miss,
+ Register elements,
+ Register key,
+ Register r0,
+ Register r1,
+ Register r2,
+ Register result);
+
+
// ---------------------------------------------------------------------------
// Allocation support
// -- rsp[0] : return address
// -----------------------------------
Code* stub;
- MaybeObject* maybe_stub = ComputeSharedKeyedStoreElementStub(receiver_map);
+ JSObject::ElementsKind elements_kind = receiver_map->elements_kind();
+ bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
+ MaybeObject* maybe_stub =
+ KeyedStoreElementStub(is_js_array, elements_kind).TryGetCode();
if (!maybe_stub->To(&stub)) return maybe_stub;
__ DispatchMap(rdx,
Handle<Map>(receiver_map),
// -- rsp[0] : return address
// -----------------------------------
Code* stub;
- MaybeObject* maybe_stub = ComputeSharedKeyedLoadElementStub(receiver_map);
+ JSObject::ElementsKind elements_kind = receiver_map->elements_kind();
+ MaybeObject* maybe_stub = KeyedLoadElementStub(elements_kind).TryGetCode();
if (!maybe_stub->To(&stub)) return maybe_stub;
__ DispatchMap(rdx,
Handle<Map>(receiver_map),
#define __ ACCESS_MASM(masm)
+void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
+ MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ Label slow, miss_force_generic;
+
+ // This stub is meant to be tail-jumped to, the receiver must already
+ // have been verified by the caller to not be a smi.
+
+ __ JumpIfNotSmi(rax, &miss_force_generic);
+ __ SmiToInteger32(rbx, rax);
+ __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
+
+ // Check whether the elements is a number dictionary.
+ // rdx: receiver
+ // rax: key
+ // rbx: key as untagged int32
+ // rcx: elements
+ __ LoadFromNumberDictionary(&slow, rcx, rax, rbx, r9, rdi, rax);
+ __ ret(0);
+
+ __ bind(&slow);
+ // ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ Handle<Code> slow_ic =
+ masm->isolate()->builtins()->KeyedLoadIC_Slow();
+ __ jmp(slow_ic, RelocInfo::CODE_TARGET);
+
+ __ bind(&miss_force_generic);
+ // ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ Handle<Code> miss_ic =
+ masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+ __ jmp(miss_ic, RelocInfo::CODE_TARGET);
+}
+
void KeyedLoadStubCompiler::GenerateLoadExternalArray(
MacroAssembler* masm,
JSObject::ElementsKind elements_kind) {
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+function init_array(a) {
+ for (var i = 0; i < 10; ++i ){
+ a[i] = i;
+ }
+}
+
+function init_sparse_array(a) {
+ for (var i = 0; i < 10; ++i ){
+ a[i] = i;
+ }
+ a[5000000] = 256;
+ assertTrue(%HasDictionaryElements(a));
+}
+
+function testPolymorphicLoads() {
+ function make_polymorphic_load_function() {
+ function load(a, i) {
+ return a[i];
+ }
+
+ var object_array = new Object;
+ var sparse_object_array = new Object;
+ var js_array = new Array(10);
+ var sparse_js_array = new Array(5000001);
+
+ init_array(object_array);
+ init_array(js_array);
+ init_sparse_array(sparse_object_array);
+ init_sparse_array(sparse_js_array);
+
+ return load;
+ }
+
+ var object_array = new Object;
+ var sparse_object_array = new Object;
+ var js_array = new Array(10);
+ var sparse_js_array = new Array(5000001);
+
+ init_array(object_array);
+ init_array(js_array);
+ init_sparse_array(sparse_object_array);
+ init_sparse_array(sparse_js_array);
+
+ // load() should now use polymorphic element loads.
+ load = make_polymorphic_load_function();
+ assertEquals(1, load(object_array, 1));
+ load = make_polymorphic_load_function();
+ assertEquals(1, load(js_array, 1));
+ load = make_polymorphic_load_function();
+ assertEquals(1, load(sparse_object_array, 1));
+ load = make_polymorphic_load_function();
+ assertEquals(1, load(sparse_js_array, 1));
+
+ load = make_polymorphic_load_function();
+ assertEquals(undefined, load(js_array, new Object()));
+ load = make_polymorphic_load_function();
+ assertEquals(undefined, load(object_array, new Object()));
+ load = make_polymorphic_load_function();
+ assertEquals(undefined, load(sparse_js_array, new Object()));
+ load = make_polymorphic_load_function();
+ assertEquals(undefined, load(sparse_object_array, new Object()));
+
+ // Try with crankshaft.
+ load = make_polymorphic_load_function();
+ %OptimizeFunctionOnNextCall(load);
+ assertEquals(1, load(object_array, 1));
+ assertEquals(1, load(js_array, 1));
+ assertEquals(1, load(sparse_object_array, 1));
+ assertEquals(1, load(sparse_js_array, 1));
+
+ load = make_polymorphic_load_function();
+ %OptimizeFunctionOnNextCall(load);
+ assertEquals(undefined, load(js_array, new Object()));
+ load = make_polymorphic_load_function();
+ %OptimizeFunctionOnNextCall(load);
+ assertEquals(undefined, load(object_array, new Object()));
+ load = make_polymorphic_load_function();
+ %OptimizeFunctionOnNextCall(load);
+ assertEquals(undefined, load(sparse_js_array, new Object()));
+ load = make_polymorphic_load_function();
+ %OptimizeFunctionOnNextCall(load);
+ assertEquals(undefined, load(sparse_object_array, new Object()));
+}
+
+function testPolymorphicStores() {
+ function make_polymorphic_store_function() {
+ function store(a, i, val) {
+ a[i] = val;
+ }
+
+ var object_array = new Object;
+ var sparse_object_array = new Object;
+ var js_array = new Array(10);
+ var sparse_js_array = new Array(5000001);
+
+ init_array(object_array);
+ init_array(js_array);
+ init_sparse_array(sparse_object_array);
+ init_sparse_array(sparse_js_array);
+
+ store(object_array, 1, 256);
+ store(js_array, 1, 256);
+ store(sparse_object_array, 1, 256);
+ store(sparse_js_array, 1, 256);
+
+ return store;
+ }
+
+ var object_array = new Object;
+ var sparse_object_array = new Object;
+ var js_array = new Array(10);
+ var sparse_js_array = new Array(5000001);
+
+ init_array(object_array);
+ init_array(js_array);
+ init_sparse_array(sparse_object_array);
+ init_sparse_array(sparse_js_array);
+
+ store = make_polymorphic_store_function();
+ store(object_array, 2, 257);
+ store = make_polymorphic_store_function();
+ store(js_array, 2, 257);
+ store = make_polymorphic_store_function();
+ store(sparse_object_array, 2, 257);
+ store = make_polymorphic_store_function();
+ store(sparse_js_array, 2, 257);
+
+ assertEquals(257, object_array[2]);
+ assertEquals(257, js_array[2]);
+ assertEquals(257, sparse_js_array[2]);
+ assertEquals(257, sparse_object_array[2]);
+
+ // Now try Crankshaft optimized polymorphic stores
+ store = make_polymorphic_store_function();
+ %OptimizeFunctionOnNextCall(store);
+ store(object_array, 3, 258);
+ store = make_polymorphic_store_function();
+ %OptimizeFunctionOnNextCall(store);
+ store(js_array, 3, 258);
+ store = make_polymorphic_store_function();
+ %OptimizeFunctionOnNextCall(store);
+ store(sparse_object_array, 3, 258);
+ store = make_polymorphic_store_function();
+ %OptimizeFunctionOnNextCall(store);
+ store(sparse_js_array, 3, 258);
+
+ assertEquals(258, object_array[3]);
+ assertEquals(258, js_array[3]);
+ assertEquals(258, sparse_js_array[3]);
+ assertEquals(258, sparse_object_array[3]);
+}
+
+testPolymorphicLoads();
+testPolymorphicStores();