From 40fbed0609ddb3e4ee4338049383004b62d13853 Mon Sep 17 00:00:00 2001 From: mvstanton Date: Fri, 4 Sep 2015 01:36:29 -0700 Subject: [PATCH] Reland Vector ICs: platform support for vector-based stores. The last changes for vector store functionality, they are in 3 areas: 1) The new vector [keyed] store code stubs - implementation. 2) IC and handler compiler adjustments 3) Odds and ends. A change in ast.cc, a test update, a small Oracle fix. TBR=bmeurer@chromium.org, jkummerow@chromium.org BUG= Review URL: https://codereview.chromium.org/1319123004 Cr-Commit-Position: refs/heads/master@{#30581} --- src/arm/code-stubs-arm.cc | 173 +++++++++++++++++- src/arm64/code-stubs-arm64.cc | 157 +++++++++++++++- src/ast.cc | 4 +- src/ia32/code-stubs-ia32.cc | 302 ++++++++++++++++++++++++++++++- src/ic/access-compiler.h | 3 - src/ic/arm/access-compiler-arm.cc | 2 +- src/ic/arm/handler-compiler-arm.cc | 27 ++- src/ic/arm/ic-arm.cc | 24 ++- src/ic/arm/ic-compiler-arm.cc | 5 +- src/ic/arm/stub-cache-arm.cc | 10 +- src/ic/arm64/access-compiler-arm64.cc | 2 +- src/ic/arm64/handler-compiler-arm64.cc | 28 ++- src/ic/arm64/ic-arm64.cc | 16 +- src/ic/arm64/ic-compiler-arm64.cc | 5 +- src/ic/arm64/stub-cache-arm64.cc | 10 +- src/ic/ia32/access-compiler-ia32.cc | 3 +- src/ic/ia32/handler-compiler-ia32.cc | 43 +++-- src/ic/ia32/ic-compiler-ia32.cc | 5 +- src/ic/ia32/ic-ia32.cc | 26 ++- src/ic/ia32/stub-cache-ia32.cc | 46 +++-- src/ic/mips/access-compiler-mips.cc | 2 +- src/ic/mips/handler-compiler-mips.cc | 27 ++- src/ic/mips/ic-compiler-mips.cc | 5 +- src/ic/mips/ic-mips.cc | 14 +- src/ic/mips/stub-cache-mips.cc | 10 +- src/ic/mips64/access-compiler-mips64.cc | 2 +- src/ic/mips64/handler-compiler-mips64.cc | 27 ++- src/ic/mips64/ic-compiler-mips64.cc | 5 +- src/ic/mips64/ic-mips64.cc | 21 ++- src/ic/mips64/stub-cache-mips64.cc | 10 +- src/ic/x64/access-compiler-x64.cc | 3 +- src/ic/x64/handler-compiler-x64.cc | 31 +++- src/ic/x64/ic-compiler-x64.cc | 5 +- src/ic/x64/ic-x64.cc | 14 +- src/ic/x64/stub-cache-x64.cc | 13 +- src/mips/code-stubs-mips.cc | 169 ++++++++++++++++- src/mips64/code-stubs-mips64.cc | 164 ++++++++++++++++- src/type-info.cc | 4 +- src/type-info.h | 1 - src/x64/code-stubs-x64.cc | 150 ++++++++++++++- test/cctest/test-feedback-vector.cc | 102 +++++++++-- 41 files changed, 1493 insertions(+), 177 deletions(-) diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc index 6b0ae54..f504b72 100644 --- a/src/arm/code-stubs-arm.cc +++ b/src/arm/code-stubs-arm.cc @@ -4445,7 +4445,6 @@ void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) { __ bind(&miss); LoadIC::GenerateMiss(masm); - __ bind(&load_smi_map); __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex); __ jmp(&compare_map); @@ -4546,11 +4545,54 @@ void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) { void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) { - Label miss; + Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // r1 + Register key = VectorStoreICDescriptor::NameRegister(); // r2 + Register vector = VectorStoreICDescriptor::VectorRegister(); // r3 + Register slot = VectorStoreICDescriptor::SlotRegister(); // r4 + DCHECK(VectorStoreICDescriptor::ValueRegister().is(r0)); // r0 + Register feedback = r5; + Register receiver_map = r6; + Register scratch1 = r9; + + __ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot)); + __ ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize)); + + // Try to quickly handle the monomorphic case without knowing for sure + // if we have a weak cell in feedback. We do know it's safe to look + // at WeakCell::kValueOffset. + Label try_array, load_smi_map, compare_map; + Label not_array, miss; + HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot, + scratch1, &compare_map, &load_smi_map, &try_array); + + // Is it a fixed array? + __ bind(&try_array); + __ ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset)); + __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex); + __ b(ne, ¬_array); + + // We are using register r8, which is used for the embedded constant pool + // when FLAG_enable_embedded_constant_pool is true. + DCHECK(!FLAG_enable_embedded_constant_pool); + Register scratch2 = r8; + HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, true, + &miss); + + __ bind(¬_array); + __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex); + __ b(ne, &miss); + Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags( + Code::ComputeHandlerFlags(Code::STORE_IC)); + masm->isolate()->stub_cache()->GenerateProbe( + masm, Code::STORE_IC, code_flags, receiver, key, feedback, receiver_map, + scratch1, scratch2); - // TODO(mvstanton): Implement. __ bind(&miss); StoreIC::GenerateMiss(masm); + + __ bind(&load_smi_map); + __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex); + __ jmp(&compare_map); } @@ -4564,12 +4606,133 @@ void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) { } +static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback, + Register receiver_map, Register scratch1, + Register scratch2, Label* miss) { + // feedback initially contains the feedback array + Label next_loop, prepare_next; + Label start_polymorphic; + Label transition_call; + + Register cached_map = scratch1; + Register too_far = scratch2; + Register pointer_reg = feedback; + __ ldr(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset)); + + // +-----+------+------+-----+-----+-----+ ... ----+ + // | map | len | wm0 | wt0 | h0 | wm1 | hN | + // +-----+------+------+-----+-----+ ----+ ... ----+ + // 0 1 2 len-1 + // ^ ^ + // | | + // pointer_reg too_far + // aka feedback scratch2 + // also need receiver_map + // use cached_map (scratch1) to look in the weak map values. + __ add(too_far, feedback, Operand::PointerOffsetFromSmiKey(too_far)); + __ add(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ add(pointer_reg, feedback, + Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag)); + + __ bind(&next_loop); + __ ldr(cached_map, MemOperand(pointer_reg)); + __ ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset)); + __ cmp(receiver_map, cached_map); + __ b(ne, &prepare_next); + // Is it a transitioning store? + __ ldr(too_far, MemOperand(pointer_reg, kPointerSize)); + __ CompareRoot(too_far, Heap::kUndefinedValueRootIndex); + __ b(ne, &transition_call); + __ ldr(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2)); + __ add(pc, pointer_reg, Operand(Code::kHeaderSize - kHeapObjectTag)); + + __ bind(&transition_call); + __ ldr(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset)); + __ JumpIfSmi(too_far, miss); + + __ ldr(receiver_map, MemOperand(pointer_reg, kPointerSize * 2)); + + // Load the map into the correct register. + DCHECK(feedback.is(VectorStoreTransitionDescriptor::MapRegister())); + __ mov(feedback, too_far); + + __ add(pc, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag)); + + __ bind(&prepare_next); + __ add(pointer_reg, pointer_reg, Operand(kPointerSize * 3)); + __ cmp(pointer_reg, too_far); + __ b(lt, &next_loop); + + // We exhausted our array of map handler pairs. + __ jmp(miss); +} + + void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) { - Label miss; + Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // r1 + Register key = VectorStoreICDescriptor::NameRegister(); // r2 + Register vector = VectorStoreICDescriptor::VectorRegister(); // r3 + Register slot = VectorStoreICDescriptor::SlotRegister(); // r4 + DCHECK(VectorStoreICDescriptor::ValueRegister().is(r0)); // r0 + Register feedback = r5; + Register receiver_map = r6; + Register scratch1 = r9; + + __ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot)); + __ ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize)); + + // Try to quickly handle the monomorphic case without knowing for sure + // if we have a weak cell in feedback. We do know it's safe to look + // at WeakCell::kValueOffset. + Label try_array, load_smi_map, compare_map; + Label not_array, miss; + HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot, + scratch1, &compare_map, &load_smi_map, &try_array); + + __ bind(&try_array); + // Is it a fixed array? + __ ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset)); + __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex); + __ b(ne, ¬_array); + + // We have a polymorphic element handler. + Label polymorphic, try_poly_name; + __ bind(&polymorphic); + + // We are using register r8, which is used for the embedded constant pool + // when FLAG_enable_embedded_constant_pool is true. + DCHECK(!FLAG_enable_embedded_constant_pool); + Register scratch2 = r8; + + HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, scratch2, + &miss); + + __ bind(¬_array); + // Is it generic? + __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex); + __ b(ne, &try_poly_name); + Handle megamorphic_stub = + KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState()); + __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET); + + __ bind(&try_poly_name); + // We might have a name in feedback, and a fixed array in the next slot. + __ cmp(key, feedback); + __ b(ne, &miss); + // If the name comparison succeeded, we know we have a fixed array with + // at least one map/handler pair. + __ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot)); + __ ldr(feedback, + FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize)); + HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false, + &miss); - // TODO(mvstanton): Implement. __ bind(&miss); KeyedStoreIC::GenerateMiss(masm); + + __ bind(&load_smi_map); + __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex); + __ jmp(&compare_map); } diff --git a/src/arm64/code-stubs-arm64.cc b/src/arm64/code-stubs-arm64.cc index 7afe46f..1720bdf 100644 --- a/src/arm64/code-stubs-arm64.cc +++ b/src/arm64/code-stubs-arm64.cc @@ -4676,11 +4676,46 @@ void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) { void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) { - Label miss; + Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // x1 + Register key = VectorStoreICDescriptor::NameRegister(); // x2 + Register vector = VectorStoreICDescriptor::VectorRegister(); // x3 + Register slot = VectorStoreICDescriptor::SlotRegister(); // x4 + DCHECK(VectorStoreICDescriptor::ValueRegister().is(x0)); // x0 + Register feedback = x5; + Register receiver_map = x6; + Register scratch1 = x7; + + __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2)); + __ Ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize)); + + // Try to quickly handle the monomorphic case without knowing for sure + // if we have a weak cell in feedback. We do know it's safe to look + // at WeakCell::kValueOffset. + Label try_array, load_smi_map, compare_map; + Label not_array, miss; + HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot, + scratch1, &compare_map, &load_smi_map, &try_array); + + // Is it a fixed array? + __ Bind(&try_array); + __ Ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset)); + __ JumpIfNotRoot(scratch1, Heap::kFixedArrayMapRootIndex, ¬_array); + HandleArrayCases(masm, feedback, receiver_map, scratch1, x8, true, &miss); + + __ Bind(¬_array); + __ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex, &miss); + Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags( + Code::ComputeHandlerFlags(Code::STORE_IC)); + masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, code_flags, + receiver, key, feedback, + receiver_map, scratch1, x8); - // TODO(mvstanton): Implement. __ Bind(&miss); StoreIC::GenerateMiss(masm); + + __ Bind(&load_smi_map); + __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex); + __ jmp(&compare_map); } @@ -4694,12 +4729,126 @@ void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) { } +static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback, + Register receiver_map, Register scratch1, + Register scratch2, Label* miss) { + // feedback initially contains the feedback array + Label next_loop, prepare_next; + Label start_polymorphic; + Label transition_call; + + Register cached_map = scratch1; + Register too_far = scratch2; + Register pointer_reg = feedback; + + __ Ldr(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset)); + + // +-----+------+------+-----+-----+-----+ ... ----+ + // | map | len | wm0 | wt0 | h0 | wm1 | hN | + // +-----+------+------+-----+-----+ ----+ ... ----+ + // 0 1 2 len-1 + // ^ ^ + // | | + // pointer_reg too_far + // aka feedback scratch2 + // also need receiver_map + // use cached_map (scratch1) to look in the weak map values. + __ Add(too_far, feedback, + Operand::UntagSmiAndScale(too_far, kPointerSizeLog2)); + __ Add(too_far, too_far, FixedArray::kHeaderSize - kHeapObjectTag); + __ Add(pointer_reg, feedback, + FixedArray::OffsetOfElementAt(0) - kHeapObjectTag); + + __ Bind(&next_loop); + __ Ldr(cached_map, MemOperand(pointer_reg)); + __ Ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset)); + __ Cmp(receiver_map, cached_map); + __ B(ne, &prepare_next); + // Is it a transitioning store? + __ Ldr(too_far, MemOperand(pointer_reg, kPointerSize)); + __ CompareRoot(too_far, Heap::kUndefinedValueRootIndex); + __ B(ne, &transition_call); + + __ Ldr(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2)); + __ Add(pointer_reg, pointer_reg, Code::kHeaderSize - kHeapObjectTag); + __ Jump(pointer_reg); + + __ Bind(&transition_call); + __ Ldr(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset)); + __ JumpIfSmi(too_far, miss); + + __ Ldr(receiver_map, MemOperand(pointer_reg, kPointerSize * 2)); + // Load the map into the correct register. + DCHECK(feedback.is(VectorStoreTransitionDescriptor::MapRegister())); + __ mov(feedback, too_far); + __ Add(receiver_map, receiver_map, Code::kHeaderSize - kHeapObjectTag); + __ Jump(receiver_map); + + __ Bind(&prepare_next); + __ Add(pointer_reg, pointer_reg, kPointerSize * 3); + __ Cmp(pointer_reg, too_far); + __ B(lt, &next_loop); + + // We exhausted our array of map handler pairs. + __ jmp(miss); +} + + void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) { - Label miss; + Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // x1 + Register key = VectorStoreICDescriptor::NameRegister(); // x2 + Register vector = VectorStoreICDescriptor::VectorRegister(); // x3 + Register slot = VectorStoreICDescriptor::SlotRegister(); // x4 + DCHECK(VectorStoreICDescriptor::ValueRegister().is(x0)); // x0 + Register feedback = x5; + Register receiver_map = x6; + Register scratch1 = x7; + + __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2)); + __ Ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize)); + + // Try to quickly handle the monomorphic case without knowing for sure + // if we have a weak cell in feedback. We do know it's safe to look + // at WeakCell::kValueOffset. + Label try_array, load_smi_map, compare_map; + Label not_array, miss; + HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot, + scratch1, &compare_map, &load_smi_map, &try_array); + + __ Bind(&try_array); + // Is it a fixed array? + __ Ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset)); + __ JumpIfNotRoot(scratch1, Heap::kFixedArrayMapRootIndex, ¬_array); + + // We have a polymorphic element handler. + Label try_poly_name; + HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, x8, &miss); + + __ Bind(¬_array); + // Is it generic? + __ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex, + &try_poly_name); + Handle megamorphic_stub = + KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState()); + __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET); + + __ Bind(&try_poly_name); + // We might have a name in feedback, and a fixed array in the next slot. + __ Cmp(key, feedback); + __ B(ne, &miss); + // If the name comparison succeeded, we know we have a fixed array with + // at least one map/handler pair. + __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2)); + __ Ldr(feedback, + FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize)); + HandleArrayCases(masm, feedback, receiver_map, scratch1, x8, false, &miss); - // TODO(mvstanton): Implement. __ Bind(&miss); KeyedStoreIC::GenerateMiss(masm); + + __ Bind(&load_smi_map); + __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex); + __ jmp(&compare_map); } diff --git a/src/ast.cc b/src/ast.cc index c61c29b..19747d8 100644 --- a/src/ast.cc +++ b/src/ast.cc @@ -343,12 +343,14 @@ FeedbackVectorRequirements ObjectLiteral::ComputeFeedbackRequirements( // This logic that computes the number of slots needed for vector store // ics must mirror FullCodeGenerator::VisitObjectLiteral. int ic_slots = 0; + bool saw_computed_name = false; for (int i = 0; i < properties()->length(); i++) { ObjectLiteral::Property* property = properties()->at(i); if (property->IsCompileTimeValue()) continue; + saw_computed_name |= property->is_computed_name(); Expression* value = property->value(); - if (property->is_computed_name() && + if (saw_computed_name && property->kind() != ObjectLiteral::Property::PROTOTYPE) { if (FunctionLiteral::NeedsHomeObject(value)) ic_slots++; } else if (property->emit_store()) { diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc index 567e1e0..e32b115 100644 --- a/src/ia32/code-stubs-ia32.cc +++ b/src/ia32/code-stubs-ia32.cc @@ -4589,11 +4589,173 @@ void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) { } +// value is on the stack already. +static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register receiver, + Register key, Register vector, + Register slot, Register feedback, + Label* miss) { + // feedback initially contains the feedback array + Label next, next_loop, prepare_next; + Label load_smi_map, compare_map; + Label start_polymorphic; + + __ push(receiver); + __ push(vector); + + Register receiver_map = receiver; + Register cached_map = vector; + + // Receiver might not be a heap object. + __ JumpIfSmi(receiver, &load_smi_map); + __ mov(receiver_map, FieldOperand(receiver, 0)); + __ bind(&compare_map); + __ mov(cached_map, FieldOperand(feedback, FixedArray::OffsetOfElementAt(0))); + + // A named keyed store might have a 2 element array, all other cases can count + // on an array with at least 2 {map, handler} pairs, so they can go right + // into polymorphic array handling. + __ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset)); + __ j(not_equal, &start_polymorphic); + + // found, now call handler. + Register handler = feedback; + DCHECK(handler.is(VectorStoreICDescriptor::ValueRegister())); + __ mov(handler, FieldOperand(feedback, FixedArray::OffsetOfElementAt(1))); + __ pop(vector); + __ pop(receiver); + __ lea(handler, FieldOperand(handler, Code::kHeaderSize)); + __ xchg(handler, Operand(esp, 0)); + __ ret(0); + + // Polymorphic, we have to loop from 2 to N + + // TODO(mvstanton): I think there is a bug here, we are assuming the + // array has more than one map/handler pair, but we call this function in the + // keyed store with a string key case, where it might be just an array of two + // elements. + + __ bind(&start_polymorphic); + __ push(key); + Register counter = key; + __ mov(counter, Immediate(Smi::FromInt(2))); + __ bind(&next_loop); + __ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size, + FixedArray::kHeaderSize)); + __ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset)); + __ j(not_equal, &prepare_next); + __ mov(handler, FieldOperand(feedback, counter, times_half_pointer_size, + FixedArray::kHeaderSize + kPointerSize)); + __ pop(key); + __ pop(vector); + __ pop(receiver); + __ lea(handler, FieldOperand(handler, Code::kHeaderSize)); + __ xchg(handler, Operand(esp, 0)); + __ ret(0); + + __ bind(&prepare_next); + __ add(counter, Immediate(Smi::FromInt(2))); + __ cmp(counter, FieldOperand(feedback, FixedArray::kLengthOffset)); + __ j(less, &next_loop); + + // We exhausted our array of map handler pairs. + __ pop(key); + __ pop(vector); + __ pop(receiver); + __ jmp(miss); + + __ bind(&load_smi_map); + __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex); + __ jmp(&compare_map); +} + + +static void HandleMonomorphicStoreCase(MacroAssembler* masm, Register receiver, + Register key, Register vector, + Register slot, Register weak_cell, + Label* miss) { + // The store ic value is on the stack. + DCHECK(weak_cell.is(VectorStoreICDescriptor::ValueRegister())); + + // feedback initially contains the feedback array + Label compare_smi_map; + + // Move the weak map into the weak_cell register. + Register ic_map = weak_cell; + __ mov(ic_map, FieldOperand(weak_cell, WeakCell::kValueOffset)); + + // Receiver might not be a heap object. + __ JumpIfSmi(receiver, &compare_smi_map); + __ cmp(ic_map, FieldOperand(receiver, 0)); + __ j(not_equal, miss); + __ mov(weak_cell, FieldOperand(vector, slot, times_half_pointer_size, + FixedArray::kHeaderSize + kPointerSize)); + __ lea(weak_cell, FieldOperand(weak_cell, Code::kHeaderSize)); + // Put the store ic value back in it's register. + __ xchg(weak_cell, Operand(esp, 0)); + // "return" to the handler. + __ ret(0); + + // In microbenchmarks, it made sense to unroll this code so that the call to + // the handler is duplicated for a HeapObject receiver and a Smi receiver. + __ bind(&compare_smi_map); + __ CompareRoot(ic_map, Heap::kHeapNumberMapRootIndex); + __ j(not_equal, miss); + __ mov(weak_cell, FieldOperand(vector, slot, times_half_pointer_size, + FixedArray::kHeaderSize + kPointerSize)); + __ lea(weak_cell, FieldOperand(weak_cell, Code::kHeaderSize)); + // Put the store ic value back in it's register. + __ xchg(weak_cell, Operand(esp, 0)); + // "return" to the handler. + __ ret(0); +} + + void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) { + Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // edx + Register key = VectorStoreICDescriptor::NameRegister(); // ecx + Register value = VectorStoreICDescriptor::ValueRegister(); // eax + Register vector = VectorStoreICDescriptor::VectorRegister(); // ebx + Register slot = VectorStoreICDescriptor::SlotRegister(); // edi Label miss; - // TODO(mvstanton): Implement. + __ push(value); + + Register scratch = value; + __ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size, + FixedArray::kHeaderSize)); + + // Is it a weak cell? + Label try_array; + Label not_array, smi_key, key_okay; + __ CompareRoot(FieldOperand(scratch, 0), Heap::kWeakCellMapRootIndex); + __ j(not_equal, &try_array); + HandleMonomorphicStoreCase(masm, receiver, key, vector, slot, scratch, &miss); + + // Is it a fixed array? + __ bind(&try_array); + __ CompareRoot(FieldOperand(scratch, 0), Heap::kFixedArrayMapRootIndex); + __ j(not_equal, ¬_array); + HandlePolymorphicStoreCase(masm, receiver, key, vector, slot, scratch, &miss); + + __ bind(¬_array); + __ CompareRoot(scratch, Heap::kmegamorphic_symbolRootIndex); + __ j(not_equal, &miss); + + __ pop(value); + __ push(slot); + __ push(vector); + Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags( + Code::ComputeHandlerFlags(Code::STORE_IC)); + masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, code_flags, + receiver, key, slot, no_reg); + __ pop(vector); + __ pop(slot); + Label no_pop_miss; + __ jmp(&no_pop_miss); + __ bind(&miss); + __ pop(value); + __ bind(&no_pop_miss); StoreIC::GenerateMiss(masm); } @@ -4608,11 +4770,147 @@ void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) { } +static void HandlePolymorphicKeyedStoreCase(MacroAssembler* masm, + Register receiver, Register key, + Register vector, Register slot, + Register feedback, Label* miss) { + // feedback initially contains the feedback array + Label next, next_loop, prepare_next; + Label load_smi_map, compare_map; + Label transition_call; + Label pop_and_miss; + + __ push(receiver); + __ push(vector); + + Register receiver_map = receiver; + Register cached_map = vector; + + // Receiver might not be a heap object. + __ JumpIfSmi(receiver, &load_smi_map); + __ mov(receiver_map, FieldOperand(receiver, 0)); + __ bind(&compare_map); + + // Polymorphic, we have to loop from 0 to N - 1 + __ push(key); + // On the stack we have: + // key (esp) + // vector + // receiver + // value + Register counter = key; + __ mov(counter, Immediate(Smi::FromInt(0))); + __ bind(&next_loop); + __ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size, + FixedArray::kHeaderSize)); + __ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset)); + __ j(not_equal, &prepare_next); + __ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size, + FixedArray::kHeaderSize + kPointerSize)); + __ CompareRoot(cached_map, Heap::kUndefinedValueRootIndex); + __ j(not_equal, &transition_call); + __ mov(feedback, FieldOperand(feedback, counter, times_half_pointer_size, + FixedArray::kHeaderSize + 2 * kPointerSize)); + __ pop(key); + __ pop(vector); + __ pop(receiver); + __ lea(feedback, FieldOperand(feedback, Code::kHeaderSize)); + __ xchg(feedback, Operand(esp, 0)); + __ ret(0); + + __ bind(&transition_call); + // Oh holy hell this will be tough. + // The map goes in vector register. + __ mov(receiver, FieldOperand(cached_map, WeakCell::kValueOffset)); + // The weak cell may have been cleared. + __ JumpIfSmi(receiver, &pop_and_miss); + // slot goes on the stack, and holds return address. + __ xchg(slot, Operand(esp, 4 * kPointerSize)); + // Get the handler in value. + __ mov(feedback, FieldOperand(feedback, counter, times_half_pointer_size, + FixedArray::kHeaderSize + 2 * kPointerSize)); + __ lea(feedback, FieldOperand(feedback, Code::kHeaderSize)); + // Pop key into place. + __ pop(key); + // Put the return address on top of stack, vector goes in slot. + __ xchg(slot, Operand(esp, 0)); + // put the map on the stack, receiver holds receiver. + __ xchg(receiver, Operand(esp, 1 * kPointerSize)); + // put the vector on the stack, slot holds value. + __ xchg(slot, Operand(esp, 2 * kPointerSize)); + // feedback (value) = value, slot = handler. + __ xchg(feedback, slot); + __ jmp(slot); + + __ bind(&prepare_next); + __ add(counter, Immediate(Smi::FromInt(3))); + __ cmp(counter, FieldOperand(feedback, FixedArray::kLengthOffset)); + __ j(less, &next_loop); + + // We exhausted our array of map handler pairs. + __ bind(&pop_and_miss); + __ pop(key); + __ pop(vector); + __ pop(receiver); + __ jmp(miss); + + __ bind(&load_smi_map); + __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex); + __ jmp(&compare_map); +} + + void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) { + Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // edx + Register key = VectorStoreICDescriptor::NameRegister(); // ecx + Register value = VectorStoreICDescriptor::ValueRegister(); // eax + Register vector = VectorStoreICDescriptor::VectorRegister(); // ebx + Register slot = VectorStoreICDescriptor::SlotRegister(); // edi Label miss; - // TODO(mvstanton): Implement. + __ push(value); + + Register scratch = value; + __ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size, + FixedArray::kHeaderSize)); + + // Is it a weak cell? + Label try_array; + Label not_array, smi_key, key_okay; + __ CompareRoot(FieldOperand(scratch, 0), Heap::kWeakCellMapRootIndex); + __ j(not_equal, &try_array); + HandleMonomorphicStoreCase(masm, receiver, key, vector, slot, scratch, &miss); + + // Is it a fixed array? + __ bind(&try_array); + __ CompareRoot(FieldOperand(scratch, 0), Heap::kFixedArrayMapRootIndex); + __ j(not_equal, ¬_array); + HandlePolymorphicKeyedStoreCase(masm, receiver, key, vector, slot, scratch, + &miss); + + __ bind(¬_array); + Label try_poly_name; + __ CompareRoot(scratch, Heap::kmegamorphic_symbolRootIndex); + __ j(not_equal, &try_poly_name); + + __ pop(value); + + Handle megamorphic_stub = + KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState()); + __ jmp(megamorphic_stub, RelocInfo::CODE_TARGET); + + __ bind(&try_poly_name); + // We might have a name in feedback, and a fixed array in the next slot. + __ cmp(key, scratch); + __ j(not_equal, &miss); + // If the name comparison succeeded, we know we have a fixed array with + // at least one map/handler pair. + __ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size, + FixedArray::kHeaderSize + kPointerSize)); + HandlePolymorphicStoreCase(masm, receiver, key, vector, slot, scratch, &miss); + __ bind(&miss); + __ pop(value); KeyedStoreIC::GenerateMiss(masm); } diff --git a/src/ic/access-compiler.h b/src/ic/access-compiler.h index 4eb70ef..32700f4 100644 --- a/src/ic/access-compiler.h +++ b/src/ic/access-compiler.h @@ -60,9 +60,6 @@ class PropertyAccessCompiler BASE_EMBEDDED { Register scratch2() const { return registers_[3]; } Register scratch3() const { return registers_[4]; } - // Calling convention between indexed store IC and handler. - Register transition_map() const { return scratch1(); } - static Register* GetCallingConvention(Code::Kind); static Register* load_calling_convention(); static Register* store_calling_convention(); diff --git a/src/ic/arm/access-compiler-arm.cc b/src/ic/arm/access-compiler-arm.cc index 3b0c0c2..62f5547 100644 --- a/src/ic/arm/access-compiler-arm.cc +++ b/src/ic/arm/access-compiler-arm.cc @@ -31,7 +31,7 @@ Register* PropertyAccessCompiler::store_calling_convention() { // receiver, name, scratch1, scratch2, scratch3. Register receiver = StoreDescriptor::ReceiverRegister(); Register name = StoreDescriptor::NameRegister(); - DCHECK(r3.is(StoreTransitionDescriptor::MapRegister())); + DCHECK(FLAG_vector_stores || r3.is(StoreTransitionDescriptor::MapRegister())); static Register registers[] = {receiver, name, r3, r4, r5}; return registers; } diff --git a/src/ic/arm/handler-compiler-arm.cc b/src/ic/arm/handler-compiler-arm.cc index 1760a89..e2585fe 100644 --- a/src/ic/arm/handler-compiler-arm.cc +++ b/src/ic/arm/handler-compiler-arm.cc @@ -306,25 +306,35 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall( } +static void StoreIC_PushArgs(MacroAssembler* masm) { + if (FLAG_vector_stores) { + __ Push(StoreDescriptor::ReceiverRegister(), + StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(), + VectorStoreICDescriptor::SlotRegister(), + VectorStoreICDescriptor::VectorRegister()); + } else { + __ Push(StoreDescriptor::ReceiverRegister(), + StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister()); + } +} + + void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) { - // Push receiver, key and value for runtime call. - __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(), - StoreDescriptor::ValueRegister()); + StoreIC_PushArgs(masm); // The slow case calls into the runtime to complete the store without causing // an IC miss that would otherwise cause a transition to the generic stub. - __ TailCallRuntime(Runtime::kStoreIC_Slow, 3, 1); + __ TailCallRuntime(Runtime::kStoreIC_Slow, FLAG_vector_stores ? 5 : 3, 1); } void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) { - // Push receiver, key and value for runtime call. - __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(), - StoreDescriptor::ValueRegister()); + StoreIC_PushArgs(masm); // The slow case calls into the runtime to complete the store without causing // an IC miss that would otherwise cause a transition to the generic stub. - __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, 3, 1); + __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, FLAG_vector_stores ? 5 : 3, + 1); } @@ -567,6 +577,7 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle name, Label* miss) { Label success; __ b(&success); GenerateRestoreName(miss, name); + if (IC::ICUseVector(kind())) PopVectorAndSlot(); TailCallBuiltin(masm(), MissBuiltin(kind())); __ bind(&success); } diff --git a/src/ic/arm/ic-arm.cc b/src/ic/arm/ic-arm.cc index a805f4c..de219ae 100644 --- a/src/ic/arm/ic-arm.cc +++ b/src/ic/arm/ic-arm.cc @@ -692,12 +692,20 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm, __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); __ JumpIfNotUniqueNameInstanceType(r4, &slow); + // We use register r8 when FLAG_vector_stores is enabled, because otherwise + // probing the megamorphic stub cache would require pushing temporaries on + // the stack. + // TODO(mvstanton): quit using register r8 when + // FLAG_enable_embedded_constant_pool is turned on. + DCHECK(!FLAG_vector_stores || !FLAG_enable_embedded_constant_pool); + Register temporary2 = FLAG_vector_stores ? r8 : r4; if (FLAG_vector_stores) { // The handlers in the stub cache expect a vector and slot. Since we won't // change the IC from any downstream misses, a dummy vector can be used. Register vector = VectorStoreICDescriptor::VectorRegister(); Register slot = VectorStoreICDescriptor::SlotRegister(); - DCHECK(!AreAliased(vector, slot, r3, r4, r5, r6)); + + DCHECK(!AreAliased(vector, slot, r5, temporary2, r6, r9)); Handle dummy_vector = TypeFeedbackVector::DummyVector(masm->isolate()); int slot_index = dummy_vector->GetIndex( @@ -708,8 +716,8 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm, Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( Code::ComputeHandlerFlags(Code::STORE_IC)); - masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags, - receiver, key, r3, r4, r5, r6); + masm->isolate()->stub_cache()->GenerateProbe( + masm, Code::STORE_IC, flags, receiver, key, r5, temporary2, r6, r9); // Cache miss. __ b(&miss); @@ -792,20 +800,22 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) { Register receiver = StoreDescriptor::ReceiverRegister(); Register name = StoreDescriptor::NameRegister(); Register value = StoreDescriptor::ValueRegister(); - Register dictionary = r3; + Register dictionary = r5; DCHECK(receiver.is(r1)); DCHECK(name.is(r2)); DCHECK(value.is(r0)); + DCHECK(VectorStoreICDescriptor::VectorRegister().is(r3)); + DCHECK(VectorStoreICDescriptor::SlotRegister().is(r4)); __ ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); - GenerateDictionaryStore(masm, &miss, dictionary, name, value, r4, r5); + GenerateDictionaryStore(masm, &miss, dictionary, name, value, r6, r9); Counters* counters = masm->isolate()->counters(); - __ IncrementCounter(counters->store_normal_hit(), 1, r4, r5); + __ IncrementCounter(counters->store_normal_hit(), 1, r6, r9); __ Ret(); __ bind(&miss); - __ IncrementCounter(counters->store_normal_miss(), 1, r4, r5); + __ IncrementCounter(counters->store_normal_miss(), 1, r6, r9); GenerateMiss(masm); } diff --git a/src/ic/arm/ic-compiler-arm.cc b/src/ic/arm/ic-compiler-arm.cc index ff2bcf0..9b8abd3 100644 --- a/src/ic/arm/ic-compiler-arm.cc +++ b/src/ic/arm/ic-compiler-arm.cc @@ -111,7 +111,10 @@ Handle PropertyICCompiler::CompileKeyedStorePolymorphic( Label next_map; __ b(ne, &next_map); Handle cell = Map::WeakCellForMap(transitioned_maps->at(i)); - __ LoadWeakValue(transition_map(), cell, &miss); + Register transition_map = scratch1(); + DCHECK(!FLAG_vector_stores && + transition_map.is(StoreTransitionDescriptor::MapRegister())); + __ LoadWeakValue(transition_map, cell, &miss); __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, al); __ bind(&next_map); } diff --git a/src/ic/arm/stub-cache-arm.cc b/src/ic/arm/stub-cache-arm.cc index cdd04fa..86710eb 100644 --- a/src/ic/arm/stub-cache-arm.cc +++ b/src/ic/arm/stub-cache-arm.cc @@ -120,8 +120,14 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind, // extra3 don't conflict with the vector and slot registers, which need // to be preserved for a handler call or miss. if (IC::ICUseVector(ic_kind)) { - Register vector = LoadWithVectorDescriptor::VectorRegister(); - Register slot = LoadWithVectorDescriptor::SlotRegister(); + Register vector, slot; + if (ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC) { + vector = VectorStoreICDescriptor::VectorRegister(); + slot = VectorStoreICDescriptor::SlotRegister(); + } else { + vector = LoadWithVectorDescriptor::VectorRegister(); + slot = LoadWithVectorDescriptor::SlotRegister(); + } DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3)); } #endif diff --git a/src/ic/arm64/access-compiler-arm64.cc b/src/ic/arm64/access-compiler-arm64.cc index 14b0fa7..13b0887 100644 --- a/src/ic/arm64/access-compiler-arm64.cc +++ b/src/ic/arm64/access-compiler-arm64.cc @@ -38,7 +38,7 @@ Register* PropertyAccessCompiler::store_calling_convention() { // receiver, value, scratch1, scratch2, scratch3. Register receiver = StoreDescriptor::ReceiverRegister(); Register name = StoreDescriptor::NameRegister(); - DCHECK(x3.is(StoreTransitionDescriptor::MapRegister())); + DCHECK(FLAG_vector_stores || x3.is(StoreTransitionDescriptor::MapRegister())); static Register registers[] = {receiver, name, x3, x4, x5}; return registers; } diff --git a/src/ic/arm64/handler-compiler-arm64.cc b/src/ic/arm64/handler-compiler-arm64.cc index 5de4364..10ea1d7 100644 --- a/src/ic/arm64/handler-compiler-arm64.cc +++ b/src/ic/arm64/handler-compiler-arm64.cc @@ -299,27 +299,36 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter( } +static void StoreIC_PushArgs(MacroAssembler* masm) { + if (FLAG_vector_stores) { + __ Push(StoreDescriptor::ReceiverRegister(), + StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(), + VectorStoreICDescriptor::SlotRegister(), + VectorStoreICDescriptor::VectorRegister()); + } else { + __ Push(StoreDescriptor::ReceiverRegister(), + StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister()); + } +} + + void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) { - // Push receiver, name and value for runtime call. - __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(), - StoreDescriptor::ValueRegister()); + StoreIC_PushArgs(masm); // The slow case calls into the runtime to complete the store without causing // an IC miss that would otherwise cause a transition to the generic stub. - __ TailCallRuntime(Runtime::kStoreIC_Slow, 3, 1); + __ TailCallRuntime(Runtime::kStoreIC_Slow, FLAG_vector_stores ? 5 : 3, 1); } void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) { ASM_LOCATION("ElementHandlerCompiler::GenerateStoreSlow"); - - // Push receiver, key and value for runtime call. - __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(), - StoreDescriptor::ValueRegister()); + StoreIC_PushArgs(masm); // The slow case calls into the runtime to complete the store without causing // an IC miss that would otherwise cause a transition to the generic stub. - __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, 3, 1); + __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, FLAG_vector_stores ? 5 : 3, + 1); } @@ -618,6 +627,7 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle name, Label* miss) { __ B(&success); GenerateRestoreName(miss, name); + if (IC::ICUseVector(kind())) PopVectorAndSlot(); TailCallBuiltin(masm(), MissBuiltin(kind())); __ Bind(&success); diff --git a/src/ic/arm64/ic-arm64.cc b/src/ic/arm64/ic-arm64.cc index 27c4f71..c4c856a 100644 --- a/src/ic/arm64/ic-arm64.cc +++ b/src/ic/arm64/ic-arm64.cc @@ -696,7 +696,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm, // change the IC from any downstream misses, a dummy vector can be used. Register vector = VectorStoreICDescriptor::VectorRegister(); Register slot = VectorStoreICDescriptor::SlotRegister(); - DCHECK(!AreAliased(vector, slot, x3, x4, x5, x6)); + DCHECK(!AreAliased(vector, slot, x5, x6, x7, x8)); Handle dummy_vector = TypeFeedbackVector::DummyVector(masm->isolate()); int slot_index = dummy_vector->GetIndex( @@ -708,7 +708,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm, Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( Code::ComputeHandlerFlags(Code::STORE_IC)); masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags, - receiver, key, x3, x4, x5, x6); + receiver, key, x5, x6, x7, x8); // Cache miss. __ B(&miss); @@ -789,19 +789,21 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) { Register value = StoreDescriptor::ValueRegister(); Register receiver = StoreDescriptor::ReceiverRegister(); Register name = StoreDescriptor::NameRegister(); - Register dictionary = x3; - DCHECK(!AreAliased(value, receiver, name, x3, x4, x5)); + Register dictionary = x5; + DCHECK(!AreAliased(value, receiver, name, + VectorStoreICDescriptor::SlotRegister(), + VectorStoreICDescriptor::VectorRegister(), x5, x6, x7)); __ Ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); - GenerateDictionaryStore(masm, &miss, dictionary, name, value, x4, x5); + GenerateDictionaryStore(masm, &miss, dictionary, name, value, x6, x7); Counters* counters = masm->isolate()->counters(); - __ IncrementCounter(counters->store_normal_hit(), 1, x4, x5); + __ IncrementCounter(counters->store_normal_hit(), 1, x6, x7); __ Ret(); // Cache miss: Jump to runtime. __ Bind(&miss); - __ IncrementCounter(counters->store_normal_miss(), 1, x4, x5); + __ IncrementCounter(counters->store_normal_miss(), 1, x6, x7); GenerateMiss(masm); } diff --git a/src/ic/arm64/ic-compiler-arm64.cc b/src/ic/arm64/ic-compiler-arm64.cc index a86b5e5..b4a4163 100644 --- a/src/ic/arm64/ic-compiler-arm64.cc +++ b/src/ic/arm64/ic-compiler-arm64.cc @@ -116,7 +116,10 @@ Handle PropertyICCompiler::CompileKeyedStorePolymorphic( // This argument is used by the handler stub. For example, see // ElementsTransitionGenerator::GenerateMapChangeElementsTransition. Handle cell = Map::WeakCellForMap(transitioned_maps->at(i)); - __ LoadWeakValue(transition_map(), cell, &miss); + Register transition_map = scratch1(); + DCHECK(!FLAG_vector_stores && + transition_map.is(StoreTransitionDescriptor::MapRegister())); + __ LoadWeakValue(transition_map, cell, &miss); } __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET); __ Bind(&skip); diff --git a/src/ic/arm64/stub-cache-arm64.cc b/src/ic/arm64/stub-cache-arm64.cc index ecd7fe1..eb82f2a 100644 --- a/src/ic/arm64/stub-cache-arm64.cc +++ b/src/ic/arm64/stub-cache-arm64.cc @@ -111,8 +111,14 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind, // extra3 don't conflict with the vector and slot registers, which need // to be preserved for a handler call or miss. if (IC::ICUseVector(ic_kind)) { - Register vector = LoadWithVectorDescriptor::VectorRegister(); - Register slot = LoadWithVectorDescriptor::SlotRegister(); + Register vector, slot; + if (ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC) { + vector = VectorStoreICDescriptor::VectorRegister(); + slot = VectorStoreICDescriptor::SlotRegister(); + } else { + vector = LoadWithVectorDescriptor::VectorRegister(); + slot = LoadWithVectorDescriptor::SlotRegister(); + } DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3)); } #endif diff --git a/src/ic/ia32/access-compiler-ia32.cc b/src/ic/ia32/access-compiler-ia32.cc index 81579e5..acb3526 100644 --- a/src/ic/ia32/access-compiler-ia32.cc +++ b/src/ic/ia32/access-compiler-ia32.cc @@ -30,7 +30,8 @@ Register* PropertyAccessCompiler::store_calling_convention() { // receiver, name, scratch1, scratch2, scratch3. Register receiver = StoreDescriptor::ReceiverRegister(); Register name = StoreDescriptor::NameRegister(); - DCHECK(ebx.is(StoreTransitionDescriptor::MapRegister())); + DCHECK(FLAG_vector_stores || + ebx.is(StoreTransitionDescriptor::MapRegister())); static Register registers[] = {receiver, name, ebx, edi, no_reg}; return registers; } diff --git a/src/ic/ia32/handler-compiler-ia32.cc b/src/ic/ia32/handler-compiler-ia32.cc index 5845abf..1d01909 100644 --- a/src/ic/ia32/handler-compiler-ia32.cc +++ b/src/ic/ia32/handler-compiler-ia32.cc @@ -304,13 +304,24 @@ static void StoreIC_PushArgs(MacroAssembler* masm) { Register name = StoreDescriptor::NameRegister(); Register value = StoreDescriptor::ValueRegister(); - DCHECK(!ebx.is(receiver) && !ebx.is(name) && !ebx.is(value)); - - __ pop(ebx); - __ push(receiver); - __ push(name); - __ push(value); - __ push(ebx); + if (FLAG_vector_stores) { + Register slot = VectorStoreICDescriptor::SlotRegister(); + Register vector = VectorStoreICDescriptor::VectorRegister(); + + __ xchg(receiver, Operand(esp, 0)); + __ push(name); + __ push(value); + __ push(slot); + __ push(vector); + __ push(receiver); // which contains the return address. + } else { + DCHECK(!ebx.is(receiver) && !ebx.is(name) && !ebx.is(value)); + __ pop(ebx); + __ push(receiver); + __ push(name); + __ push(value); + __ push(ebx); + } } @@ -319,7 +330,7 @@ void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) { StoreIC_PushArgs(masm); // Do tail-call to runtime routine. - __ TailCallRuntime(Runtime::kStoreIC_Slow, 3, 1); + __ TailCallRuntime(Runtime::kStoreIC_Slow, FLAG_vector_stores ? 5 : 3, 1); } @@ -328,7 +339,8 @@ void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) { StoreIC_PushArgs(masm); // Do tail-call to runtime routine. - __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, 3, 1); + __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, FLAG_vector_stores ? 5 : 3, + 1); } @@ -352,10 +364,16 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Handle name) { void NamedStoreHandlerCompiler::GeneratePushMap(Register map_reg, Register scratch) { - // Get the return address, push the argument and then continue. - __ pop(scratch); + // current after GeneratePushMap + // ------------------------------------------------- + // ret addr slot + // vector vector + // sp -> slot map + // sp -> ret addr + // + __ xchg(map_reg, Operand(esp, 0)); + __ xchg(map_reg, Operand(esp, 2 * kPointerSize)); __ push(map_reg); - __ push(scratch); } @@ -575,6 +593,7 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle name, Label* miss) { Label success; __ jmp(&success); GenerateRestoreName(miss, name); + if (IC::ICUseVector(kind())) PopVectorAndSlot(); TailCallBuiltin(masm(), MissBuiltin(kind())); __ bind(&success); } diff --git a/src/ic/ia32/ic-compiler-ia32.cc b/src/ic/ia32/ic-compiler-ia32.cc index a1e2cbc..d0a2e0b 100644 --- a/src/ic/ia32/ic-compiler-ia32.cc +++ b/src/ic/ia32/ic-compiler-ia32.cc @@ -112,7 +112,10 @@ Handle PropertyICCompiler::CompileKeyedStorePolymorphic( Label next_map; __ j(not_equal, &next_map, Label::kNear); Handle cell = Map::WeakCellForMap(transitioned_maps->at(i)); - __ LoadWeakValue(transition_map(), cell, &miss); + Register transition_map = scratch1(); + DCHECK(!FLAG_vector_stores && + transition_map.is(StoreTransitionDescriptor::MapRegister())); + __ LoadWeakValue(transition_map, cell, &miss); __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET); __ bind(&next_map); } diff --git a/src/ic/ia32/ic-ia32.cc b/src/ic/ia32/ic-ia32.cc index d683264..7a6a415 100644 --- a/src/ic/ia32/ic-ia32.cc +++ b/src/ic/ia32/ic-ia32.cc @@ -577,7 +577,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm, Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( Code::ComputeHandlerFlags(Code::STORE_IC)); masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags, - receiver, key, ebx, no_reg); + receiver, key, edi, no_reg); if (FLAG_vector_stores) { __ pop(VectorStoreICDescriptor::VectorRegister()); @@ -734,6 +734,12 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm, void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { + if (FLAG_vector_stores) { + // This shouldn't be called. + __ int3(); + return; + } + // Return address is on the stack. Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( Code::ComputeHandlerFlags(Code::STORE_IC)); @@ -787,22 +793,32 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) { Register receiver = StoreDescriptor::ReceiverRegister(); Register name = StoreDescriptor::NameRegister(); Register value = StoreDescriptor::ValueRegister(); - Register dictionary = ebx; - - __ mov(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset)); + Register vector = VectorStoreICDescriptor::VectorRegister(); + Register slot = VectorStoreICDescriptor::SlotRegister(); // A lot of registers are needed for storing to slow case // objects. Push and restore receiver but rely on // GenerateDictionaryStore preserving the value and name. __ push(receiver); + if (FLAG_vector_stores) { + __ push(vector); + __ push(slot); + } + + Register dictionary = ebx; + __ mov(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset)); GenerateDictionaryStore(masm, &restore_miss, dictionary, name, value, receiver, edi); - __ Drop(1); + __ Drop(FLAG_vector_stores ? 3 : 1); Counters* counters = masm->isolate()->counters(); __ IncrementCounter(counters->store_normal_hit(), 1); __ ret(0); __ bind(&restore_miss); + if (FLAG_vector_stores) { + __ pop(slot); + __ pop(vector); + } __ pop(receiver); __ IncrementCounter(counters->store_normal_miss(), 1); GenerateMiss(masm); diff --git a/src/ic/ia32/stub-cache-ia32.cc b/src/ic/ia32/stub-cache-ia32.cc index 68b30e7..e579179 100644 --- a/src/ic/ia32/stub-cache-ia32.cc +++ b/src/ic/ia32/stub-cache-ia32.cc @@ -25,6 +25,9 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm, ExternalReference map_offset(isolate->stub_cache()->map_reference(table)); Label miss; + bool is_vector_store = + IC::ICUseVector(ic_kind) && + (ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC); // Multiply by 3 because there are 3 fields per entry (name, code, map). __ lea(offset, Operand(offset, offset, times_2, 0)); @@ -56,19 +59,28 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm, } #endif - if (IC::ICUseVector(ic_kind)) { - // The vector and slot were pushed onto the stack before starting the - // probe, and need to be dropped before calling the handler. + // The vector and slot were pushed onto the stack before starting the + // probe, and need to be dropped before calling the handler. + if (is_vector_store) { + // The overlap here is rather embarrassing. One does what one must. + Register vector = VectorStoreICDescriptor::VectorRegister(); + DCHECK(extra.is(VectorStoreICDescriptor::SlotRegister())); + __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag)); + __ pop(vector); + __ xchg(extra, Operand(esp, 0)); + // Jump to the first instruction in the code stub. + __ ret(0); + } else { __ pop(LoadWithVectorDescriptor::VectorRegister()); __ pop(LoadDescriptor::SlotRegister()); + __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag)); + __ jmp(extra); } - // Jump to the first instruction in the code stub. - __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag)); - __ jmp(extra); - __ bind(&miss); } else { + DCHECK(ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC); + // Save the offset on the stack. __ push(offset); @@ -105,21 +117,21 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm, __ pop(offset); __ mov(offset, Operand::StaticArray(offset, times_1, value_offset)); - if (IC::ICUseVector(ic_kind)) { + // Jump to the first instruction in the code stub. + if (is_vector_store) { // The vector and slot were pushed onto the stack before starting the // probe, and need to be dropped before calling the handler. - Register vector = LoadWithVectorDescriptor::VectorRegister(); - Register slot = LoadDescriptor::SlotRegister(); - DCHECK(!offset.is(vector) && !offset.is(slot)); - + Register vector = VectorStoreICDescriptor::VectorRegister(); + DCHECK(offset.is(VectorStoreICDescriptor::SlotRegister())); + __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag)); __ pop(vector); - __ pop(slot); + __ xchg(offset, Operand(esp, 0)); + __ ret(0); + } else { + __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag)); + __ jmp(offset); } - // Jump to the first instruction in the code stub. - __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag)); - __ jmp(offset); - // Pop at miss. __ bind(&miss); __ pop(offset); diff --git a/src/ic/mips/access-compiler-mips.cc b/src/ic/mips/access-compiler-mips.cc index 9aba385..f2f6c62 100644 --- a/src/ic/mips/access-compiler-mips.cc +++ b/src/ic/mips/access-compiler-mips.cc @@ -31,7 +31,7 @@ Register* PropertyAccessCompiler::store_calling_convention() { // receiver, name, scratch1, scratch2, scratch3. Register receiver = StoreDescriptor::ReceiverRegister(); Register name = StoreDescriptor::NameRegister(); - DCHECK(a3.is(StoreTransitionDescriptor::MapRegister())); + DCHECK(FLAG_vector_stores || a3.is(StoreTransitionDescriptor::MapRegister())); static Register registers[] = {receiver, name, a3, t0, t1}; return registers; } diff --git a/src/ic/mips/handler-compiler-mips.cc b/src/ic/mips/handler-compiler-mips.cc index 7f10a8e..8c135e4 100644 --- a/src/ic/mips/handler-compiler-mips.cc +++ b/src/ic/mips/handler-compiler-mips.cc @@ -296,25 +296,35 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall( } +static void StoreIC_PushArgs(MacroAssembler* masm) { + if (FLAG_vector_stores) { + __ Push(StoreDescriptor::ReceiverRegister(), + StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(), + VectorStoreICDescriptor::SlotRegister(), + VectorStoreICDescriptor::VectorRegister()); + } else { + __ Push(StoreDescriptor::ReceiverRegister(), + StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister()); + } +} + + void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) { - // Push receiver, key and value for runtime call. - __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(), - StoreDescriptor::ValueRegister()); + StoreIC_PushArgs(masm); // The slow case calls into the runtime to complete the store without causing // an IC miss that would otherwise cause a transition to the generic stub. - __ TailCallRuntime(Runtime::kStoreIC_Slow, 3, 1); + __ TailCallRuntime(Runtime::kStoreIC_Slow, FLAG_vector_stores ? 5 : 3, 1); } void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) { - // Push receiver, key and value for runtime call. - __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(), - StoreDescriptor::ValueRegister()); + StoreIC_PushArgs(masm); // The slow case calls into the runtime to complete the store without causing // an IC miss that would otherwise cause a transition to the generic stub. - __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, 3, 1); + __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, FLAG_vector_stores ? 5 : 3, + 1); } @@ -557,6 +567,7 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle name, Label* miss) { Label success; __ Branch(&success); GenerateRestoreName(miss, name); + if (IC::ICUseVector(kind())) PopVectorAndSlot(); TailCallBuiltin(masm(), MissBuiltin(kind())); __ bind(&success); } diff --git a/src/ic/mips/ic-compiler-mips.cc b/src/ic/mips/ic-compiler-mips.cc index 80f5c47..64f1662 100644 --- a/src/ic/mips/ic-compiler-mips.cc +++ b/src/ic/mips/ic-compiler-mips.cc @@ -100,7 +100,10 @@ Handle PropertyICCompiler::CompileKeyedStorePolymorphic( Label next_map; __ Branch(&next_map, ne, match, Operand(map_reg)); Handle cell = Map::WeakCellForMap(transitioned_maps->at(i)); - __ LoadWeakValue(transition_map(), cell, &miss); + Register transition_map = scratch1(); + DCHECK(!FLAG_vector_stores && + transition_map.is(StoreTransitionDescriptor::MapRegister())); + __ LoadWeakValue(transition_map, cell, &miss); __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET); __ bind(&next_map); } diff --git a/src/ic/mips/ic-mips.cc b/src/ic/mips/ic-mips.cc index a673dbf..a1a1181 100644 --- a/src/ic/mips/ic-mips.cc +++ b/src/ic/mips/ic-mips.cc @@ -681,7 +681,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm, // change the IC from any downstream misses, a dummy vector can be used. Register vector = VectorStoreICDescriptor::VectorRegister(); Register slot = VectorStoreICDescriptor::SlotRegister(); - DCHECK(!AreAliased(vector, slot, a3, t0, t1, t2)); + DCHECK(!AreAliased(vector, slot, t1, t2, t4, t5)); Handle dummy_vector = TypeFeedbackVector::DummyVector(masm->isolate()); int slot_index = dummy_vector->GetIndex( @@ -693,7 +693,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm, Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( Code::ComputeHandlerFlags(Code::STORE_IC)); masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags, - receiver, key, a3, t0, t1, t2); + receiver, key, t1, t2, t4, t5); // Cache miss. __ Branch(&miss); @@ -794,20 +794,22 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) { Register receiver = StoreDescriptor::ReceiverRegister(); Register name = StoreDescriptor::NameRegister(); Register value = StoreDescriptor::ValueRegister(); - Register dictionary = a3; + Register dictionary = t1; DCHECK(receiver.is(a1)); DCHECK(name.is(a2)); DCHECK(value.is(a0)); + DCHECK(VectorStoreICDescriptor::VectorRegister().is(a3)); + DCHECK(VectorStoreICDescriptor::SlotRegister().is(t0)); __ lw(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); - GenerateDictionaryStore(masm, &miss, dictionary, name, value, t0, t1); + GenerateDictionaryStore(masm, &miss, dictionary, name, value, t2, t5); Counters* counters = masm->isolate()->counters(); - __ IncrementCounter(counters->store_normal_hit(), 1, t0, t1); + __ IncrementCounter(counters->store_normal_hit(), 1, t2, t5); __ Ret(); __ bind(&miss); - __ IncrementCounter(counters->store_normal_miss(), 1, t0, t1); + __ IncrementCounter(counters->store_normal_miss(), 1, t2, t5); GenerateMiss(masm); } diff --git a/src/ic/mips/stub-cache-mips.cc b/src/ic/mips/stub-cache-mips.cc index 12cacc8..1a9897e 100644 --- a/src/ic/mips/stub-cache-mips.cc +++ b/src/ic/mips/stub-cache-mips.cc @@ -116,8 +116,14 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind, // extra3 don't conflict with the vector and slot registers, which need // to be preserved for a handler call or miss. if (IC::ICUseVector(ic_kind)) { - Register vector = LoadWithVectorDescriptor::VectorRegister(); - Register slot = LoadWithVectorDescriptor::SlotRegister(); + Register vector, slot; + if (ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC) { + vector = VectorStoreICDescriptor::VectorRegister(); + slot = VectorStoreICDescriptor::SlotRegister(); + } else { + vector = LoadWithVectorDescriptor::VectorRegister(); + slot = LoadWithVectorDescriptor::SlotRegister(); + } DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3)); } #endif diff --git a/src/ic/mips64/access-compiler-mips64.cc b/src/ic/mips64/access-compiler-mips64.cc index a2e7aed..500a6d6 100644 --- a/src/ic/mips64/access-compiler-mips64.cc +++ b/src/ic/mips64/access-compiler-mips64.cc @@ -31,7 +31,7 @@ Register* PropertyAccessCompiler::store_calling_convention() { // receiver, name, scratch1, scratch2, scratch3. Register receiver = StoreDescriptor::ReceiverRegister(); Register name = StoreDescriptor::NameRegister(); - DCHECK(a3.is(StoreTransitionDescriptor::MapRegister())); + DCHECK(FLAG_vector_stores || a3.is(StoreTransitionDescriptor::MapRegister())); static Register registers[] = {receiver, name, a3, a4, a5}; return registers; } diff --git a/src/ic/mips64/handler-compiler-mips64.cc b/src/ic/mips64/handler-compiler-mips64.cc index 6b59a99..e905506 100644 --- a/src/ic/mips64/handler-compiler-mips64.cc +++ b/src/ic/mips64/handler-compiler-mips64.cc @@ -297,25 +297,35 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall( } +static void StoreIC_PushArgs(MacroAssembler* masm) { + if (FLAG_vector_stores) { + __ Push(StoreDescriptor::ReceiverRegister(), + StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(), + VectorStoreICDescriptor::SlotRegister(), + VectorStoreICDescriptor::VectorRegister()); + } else { + __ Push(StoreDescriptor::ReceiverRegister(), + StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister()); + } +} + + void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) { - // Push receiver, key and value for runtime call. - __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(), - StoreDescriptor::ValueRegister()); + StoreIC_PushArgs(masm); // The slow case calls into the runtime to complete the store without causing // an IC miss that would otherwise cause a transition to the generic stub. - __ TailCallRuntime(Runtime::kStoreIC_Slow, 3, 1); + __ TailCallRuntime(Runtime::kStoreIC_Slow, FLAG_vector_stores ? 5 : 3, 1); } void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) { - // Push receiver, key and value for runtime call. - __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(), - StoreDescriptor::ValueRegister()); + StoreIC_PushArgs(masm); // The slow case calls into the runtime to complete the store without causing // an IC miss that would otherwise cause a transition to the generic stub. - __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, 3, 1); + __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, FLAG_vector_stores ? 5 : 3, + 1); } @@ -558,6 +568,7 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle name, Label* miss) { Label success; __ Branch(&success); GenerateRestoreName(miss, name); + if (IC::ICUseVector(kind())) PopVectorAndSlot(); TailCallBuiltin(masm(), MissBuiltin(kind())); __ bind(&success); } diff --git a/src/ic/mips64/ic-compiler-mips64.cc b/src/ic/mips64/ic-compiler-mips64.cc index a834430..8cdd8f0 100644 --- a/src/ic/mips64/ic-compiler-mips64.cc +++ b/src/ic/mips64/ic-compiler-mips64.cc @@ -100,7 +100,10 @@ Handle PropertyICCompiler::CompileKeyedStorePolymorphic( Label next_map; __ Branch(&next_map, ne, match, Operand(map_reg)); Handle cell = Map::WeakCellForMap(transitioned_maps->at(i)); - __ LoadWeakValue(transition_map(), cell, &miss); + Register transition_map = scratch1(); + DCHECK(!FLAG_vector_stores && + transition_map.is(StoreTransitionDescriptor::MapRegister())); + __ LoadWeakValue(transition_map, cell, &miss); __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET); __ bind(&next_map); } diff --git a/src/ic/mips64/ic-mips64.cc b/src/ic/mips64/ic-mips64.cc index 6f3916d..cacc95c 100644 --- a/src/ic/mips64/ic-mips64.cc +++ b/src/ic/mips64/ic-mips64.cc @@ -677,9 +677,10 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm, if (FLAG_vector_stores) { // The handlers in the stub cache expect a vector and slot. Since we won't // change the IC from any downstream misses, a dummy vector can be used. - Register vector = LoadWithVectorDescriptor::VectorRegister(); - Register slot = LoadWithVectorDescriptor::SlotRegister(); - DCHECK(!AreAliased(vector, slot, a3, a4, a5, a6)); + Register vector = VectorStoreICDescriptor::VectorRegister(); + Register slot = VectorStoreICDescriptor::SlotRegister(); + + DCHECK(!AreAliased(vector, slot, a5, a6, a7, t0)); Handle dummy_vector = TypeFeedbackVector::DummyVector(masm->isolate()); int slot_index = dummy_vector->GetIndex( @@ -691,7 +692,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm, Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( Code::ComputeHandlerFlags(Code::STORE_IC)); masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags, - receiver, key, a3, a4, a5, a6); + receiver, key, a5, a6, a7, t0); // Cache miss. __ Branch(&miss); @@ -792,18 +793,20 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) { Register receiver = StoreDescriptor::ReceiverRegister(); Register name = StoreDescriptor::NameRegister(); Register value = StoreDescriptor::ValueRegister(); - Register dictionary = a3; - DCHECK(!AreAliased(value, receiver, name, dictionary, a4, a5)); + Register dictionary = a5; + DCHECK(!AreAliased( + value, receiver, name, VectorStoreICDescriptor::VectorRegister(), + VectorStoreICDescriptor::SlotRegister(), dictionary, a6, a7)); __ ld(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); - GenerateDictionaryStore(masm, &miss, a3, name, value, a4, a5); + GenerateDictionaryStore(masm, &miss, dictionary, name, value, a6, a7); Counters* counters = masm->isolate()->counters(); - __ IncrementCounter(counters->store_normal_hit(), 1, a4, a5); + __ IncrementCounter(counters->store_normal_hit(), 1, a6, a7); __ Ret(); __ bind(&miss); - __ IncrementCounter(counters->store_normal_miss(), 1, a4, a5); + __ IncrementCounter(counters->store_normal_miss(), 1, a6, a7); GenerateMiss(masm); } diff --git a/src/ic/mips64/stub-cache-mips64.cc b/src/ic/mips64/stub-cache-mips64.cc index b1ec640..4ab9f8e 100644 --- a/src/ic/mips64/stub-cache-mips64.cc +++ b/src/ic/mips64/stub-cache-mips64.cc @@ -119,8 +119,14 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind, // extra3 don't conflict with the vector and slot registers, which need // to be preserved for a handler call or miss. if (IC::ICUseVector(ic_kind)) { - Register vector = LoadWithVectorDescriptor::VectorRegister(); - Register slot = LoadWithVectorDescriptor::SlotRegister(); + Register vector, slot; + if (ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC) { + vector = VectorStoreICDescriptor::VectorRegister(); + slot = VectorStoreICDescriptor::SlotRegister(); + } else { + vector = LoadWithVectorDescriptor::VectorRegister(); + slot = LoadWithVectorDescriptor::SlotRegister(); + } DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3)); } #endif diff --git a/src/ic/x64/access-compiler-x64.cc b/src/ic/x64/access-compiler-x64.cc index 63e60f0..85b44ef 100644 --- a/src/ic/x64/access-compiler-x64.cc +++ b/src/ic/x64/access-compiler-x64.cc @@ -31,7 +31,8 @@ Register* PropertyAccessCompiler::store_calling_convention() { // receiver, name, scratch1, scratch2, scratch3. Register receiver = StoreDescriptor::ReceiverRegister(); Register name = StoreDescriptor::NameRegister(); - DCHECK(rbx.is(StoreTransitionDescriptor::MapRegister())); + DCHECK(FLAG_vector_stores || + rbx.is(StoreTransitionDescriptor::MapRegister())); static Register registers[] = {receiver, name, rbx, rdi, r8}; return registers; } diff --git a/src/ic/x64/handler-compiler-x64.cc b/src/ic/x64/handler-compiler-x64.cc index fe6d168..1490c92 100644 --- a/src/ic/x64/handler-compiler-x64.cc +++ b/src/ic/x64/handler-compiler-x64.cc @@ -304,13 +304,26 @@ static void StoreIC_PushArgs(MacroAssembler* masm) { Register name = StoreDescriptor::NameRegister(); Register value = StoreDescriptor::ValueRegister(); - DCHECK(!rbx.is(receiver) && !rbx.is(name) && !rbx.is(value)); + if (FLAG_vector_stores) { + Register slot = VectorStoreICDescriptor::SlotRegister(); + Register vector = VectorStoreICDescriptor::VectorRegister(); + + __ PopReturnAddressTo(r11); + __ Push(receiver); + __ Push(name); + __ Push(value); + __ Push(slot); + __ Push(vector); + __ PushReturnAddressFrom(r11); + } else { + DCHECK(!rbx.is(receiver) && !rbx.is(name) && !rbx.is(value)); - __ PopReturnAddressTo(rbx); - __ Push(receiver); - __ Push(name); - __ Push(value); - __ PushReturnAddressFrom(rbx); + __ PopReturnAddressTo(rbx); + __ Push(receiver); + __ Push(name); + __ Push(value); + __ PushReturnAddressFrom(rbx); + } } @@ -319,7 +332,7 @@ void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) { StoreIC_PushArgs(masm); // Do tail-call to runtime routine. - __ TailCallRuntime(Runtime::kStoreIC_Slow, 3, 1); + __ TailCallRuntime(Runtime::kStoreIC_Slow, FLAG_vector_stores ? 5 : 3, 1); } @@ -328,7 +341,8 @@ void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) { StoreIC_PushArgs(masm); // Do tail-call to runtime routine. - __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, 3, 1); + __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, FLAG_vector_stores ? 5 : 3, + 1); } @@ -575,6 +589,7 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle name, Label* miss) { Label success; __ jmp(&success); GenerateRestoreName(miss, name); + if (IC::ICUseVector(kind())) PopVectorAndSlot(); TailCallBuiltin(masm(), MissBuiltin(kind())); __ bind(&success); } diff --git a/src/ic/x64/ic-compiler-x64.cc b/src/ic/x64/ic-compiler-x64.cc index d5e5484..fd92cca 100644 --- a/src/ic/x64/ic-compiler-x64.cc +++ b/src/ic/x64/ic-compiler-x64.cc @@ -55,7 +55,10 @@ Handle PropertyICCompiler::CompileKeyedStorePolymorphic( Label next_map; __ j(not_equal, &next_map, Label::kNear); Handle cell = Map::WeakCellForMap(transitioned_maps->at(i)); - __ LoadWeakValue(transition_map(), cell, &miss); + Register transition_map = scratch1(); + DCHECK(!FLAG_vector_stores && + transition_map.is(StoreTransitionDescriptor::MapRegister())); + __ LoadWeakValue(transition_map, cell, &miss); __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET); __ bind(&next_map); } diff --git a/src/ic/x64/ic-x64.cc b/src/ic/x64/ic-x64.cc index 8d33480..ff74a96 100644 --- a/src/ic/x64/ic-x64.cc +++ b/src/ic/x64/ic-x64.cc @@ -582,7 +582,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm, Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( Code::ComputeHandlerFlags(Code::STORE_IC)); masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags, - receiver, key, rbx, no_reg); + receiver, key, r9, no_reg); // Cache miss. __ jmp(&miss); @@ -735,8 +735,13 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm, void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { - // The return address is on the stack. + if (FLAG_vector_stores) { + // This shouldn't be called. + __ int3(); + return; + } + // The return address is on the stack. // Get the receiver from the stack and probe the stub cache. Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( Code::ComputeHandlerFlags(Code::STORE_IC)); @@ -785,7 +790,10 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) { Register receiver = StoreDescriptor::ReceiverRegister(); Register name = StoreDescriptor::NameRegister(); Register value = StoreDescriptor::ValueRegister(); - Register dictionary = rbx; + Register dictionary = r11; + DCHECK(!FLAG_vector_stores || + !AreAliased(dictionary, VectorStoreICDescriptor::VectorRegister(), + VectorStoreICDescriptor::SlotRegister())); Label miss; diff --git a/src/ic/x64/stub-cache-x64.cc b/src/ic/x64/stub-cache-x64.cc index 3908018..9a9dfe9 100644 --- a/src/ic/x64/stub-cache-x64.cc +++ b/src/ic/x64/stub-cache-x64.cc @@ -110,9 +110,16 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind, // the vector and slot registers, which need to be preserved for a handler // call or miss. if (IC::ICUseVector(ic_kind)) { - Register vector = LoadWithVectorDescriptor::VectorRegister(); - Register slot = LoadDescriptor::SlotRegister(); - DCHECK(!AreAliased(vector, slot, scratch)); + if (ic_kind == Code::LOAD_IC || ic_kind == Code::KEYED_LOAD_IC) { + Register vector = LoadWithVectorDescriptor::VectorRegister(); + Register slot = LoadDescriptor::SlotRegister(); + DCHECK(!AreAliased(vector, slot, scratch)); + } else { + DCHECK(ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC); + Register vector = VectorStoreICDescriptor::VectorRegister(); + Register slot = VectorStoreICDescriptor::SlotRegister(); + DCHECK(!AreAliased(vector, slot, scratch)); + } } #endif diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc index ead4bc2..11b14be 100644 --- a/src/mips/code-stubs-mips.cc +++ b/src/mips/code-stubs-mips.cc @@ -4771,11 +4771,52 @@ void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) { void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) { - Label miss; + Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // a1 + Register key = VectorStoreICDescriptor::NameRegister(); // a2 + Register vector = VectorStoreICDescriptor::VectorRegister(); // a3 + Register slot = VectorStoreICDescriptor::SlotRegister(); // t0 + DCHECK(VectorStoreICDescriptor::ValueRegister().is(a0)); // a0 + Register feedback = t1; + Register receiver_map = t2; + Register scratch1 = t5; + + __ sll(scratch1, slot, kPointerSizeLog2 - kSmiTagSize); + __ Addu(feedback, vector, Operand(scratch1)); + __ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize)); + + // Try to quickly handle the monomorphic case without knowing for sure + // if we have a weak cell in feedback. We do know it's safe to look + // at WeakCell::kValueOffset. + Label try_array, load_smi_map, compare_map; + Label not_array, miss; + HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot, + scratch1, &compare_map, &load_smi_map, &try_array); + + // Is it a fixed array? + __ bind(&try_array); + __ lw(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset)); + __ LoadRoot(at, Heap::kFixedArrayMapRootIndex); + __ Branch(¬_array, ne, scratch1, Operand(at)); + + Register scratch2 = t4; + HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, true, + &miss); + + __ bind(¬_array); + __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex); + __ Branch(&miss, ne, feedback, Operand(at)); + Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags( + Code::ComputeHandlerFlags(Code::STORE_IC)); + masm->isolate()->stub_cache()->GenerateProbe( + masm, Code::STORE_IC, code_flags, receiver, key, feedback, receiver_map, + scratch1, scratch2); - // TODO(mvstanton): Implement. __ bind(&miss); StoreIC::GenerateMiss(masm); + + __ bind(&load_smi_map); + __ Branch(USE_DELAY_SLOT, &compare_map); + __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex); // In delay slot. } @@ -4789,12 +4830,132 @@ void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) { } +static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback, + Register receiver_map, Register scratch1, + Register scratch2, Label* miss) { + // feedback initially contains the feedback array + Label next_loop, prepare_next; + Label start_polymorphic; + Label transition_call; + + Register cached_map = scratch1; + Register too_far = scratch2; + Register pointer_reg = feedback; + __ lw(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset)); + + // +-----+------+------+-----+-----+-----+ ... ----+ + // | map | len | wm0 | wt0 | h0 | wm1 | hN | + // +-----+------+------+-----+-----+ ----+ ... ----+ + // 0 1 2 len-1 + // ^ ^ + // | | + // pointer_reg too_far + // aka feedback scratch2 + // also need receiver_map + // use cached_map (scratch1) to look in the weak map values. + __ sll(scratch1, too_far, kPointerSizeLog2 - kSmiTagSize); + __ Addu(too_far, feedback, Operand(scratch1)); + __ Addu(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ Addu(pointer_reg, feedback, + Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag)); + + __ bind(&next_loop); + __ lw(cached_map, MemOperand(pointer_reg)); + __ lw(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset)); + __ Branch(&prepare_next, ne, receiver_map, Operand(cached_map)); + // Is it a transitioning store? + __ lw(too_far, MemOperand(pointer_reg, kPointerSize)); + __ LoadRoot(at, Heap::kUndefinedValueRootIndex); + __ Branch(&transition_call, ne, too_far, Operand(at)); + __ lw(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2)); + __ Addu(t9, pointer_reg, Operand(Code::kHeaderSize - kHeapObjectTag)); + __ Jump(t9); + + __ bind(&transition_call); + __ lw(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset)); + __ JumpIfSmi(too_far, miss); + + __ lw(receiver_map, MemOperand(pointer_reg, kPointerSize * 2)); + + // Load the map into the correct register. + DCHECK(feedback.is(VectorStoreTransitionDescriptor::MapRegister())); + __ mov(feedback, too_far); + + __ Addu(t9, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag)); + __ Jump(t9); + + __ bind(&prepare_next); + __ Addu(pointer_reg, pointer_reg, Operand(kPointerSize * 3)); + __ Branch(&next_loop, lt, pointer_reg, Operand(too_far)); + + // We exhausted our array of map handler pairs. + __ jmp(miss); +} + + void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) { - Label miss; + Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // a1 + Register key = VectorStoreICDescriptor::NameRegister(); // a2 + Register vector = VectorStoreICDescriptor::VectorRegister(); // a3 + Register slot = VectorStoreICDescriptor::SlotRegister(); // t0 + DCHECK(VectorStoreICDescriptor::ValueRegister().is(a0)); // a0 + Register feedback = t1; + Register receiver_map = t2; + Register scratch1 = t5; + + __ sll(scratch1, slot, kPointerSizeLog2 - kSmiTagSize); + __ Addu(feedback, vector, Operand(scratch1)); + __ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize)); + + // Try to quickly handle the monomorphic case without knowing for sure + // if we have a weak cell in feedback. We do know it's safe to look + // at WeakCell::kValueOffset. + Label try_array, load_smi_map, compare_map; + Label not_array, miss; + HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot, + scratch1, &compare_map, &load_smi_map, &try_array); + + __ bind(&try_array); + // Is it a fixed array? + __ lw(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset)); + __ LoadRoot(at, Heap::kFixedArrayMapRootIndex); + __ Branch(¬_array, ne, scratch1, Operand(at)); + + // We have a polymorphic element handler. + Label polymorphic, try_poly_name; + __ bind(&polymorphic); + + Register scratch2 = t4; + + HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, scratch2, + &miss); + + __ bind(¬_array); + // Is it generic? + __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex); + __ Branch(&try_poly_name, ne, feedback, Operand(at)); + Handle megamorphic_stub = + KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState()); + __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET); + + __ bind(&try_poly_name); + // We might have a name in feedback, and a fixed array in the next slot. + __ Branch(&miss, ne, key, Operand(feedback)); + // If the name comparison succeeded, we know we have a fixed array with + // at least one map/handler pair. + __ sll(scratch1, slot, kPointerSizeLog2 - kSmiTagSize); + __ Addu(feedback, vector, Operand(scratch1)); + __ lw(feedback, + FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize)); + HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false, + &miss); - // TODO(mvstanton): Implement. __ bind(&miss); KeyedStoreIC::GenerateMiss(masm); + + __ bind(&load_smi_map); + __ Branch(USE_DELAY_SLOT, &compare_map); + __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex); // In delay slot. } diff --git a/src/mips64/code-stubs-mips64.cc b/src/mips64/code-stubs-mips64.cc index 828ffcd..8360b07 100644 --- a/src/mips64/code-stubs-mips64.cc +++ b/src/mips64/code-stubs-mips64.cc @@ -4801,11 +4801,50 @@ void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) { void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) { - Label miss; + Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // a1 + Register key = VectorStoreICDescriptor::NameRegister(); // a2 + Register vector = VectorStoreICDescriptor::VectorRegister(); // a3 + Register slot = VectorStoreICDescriptor::SlotRegister(); // a4 + DCHECK(VectorStoreICDescriptor::ValueRegister().is(a0)); // a0 + Register feedback = a5; + Register receiver_map = a6; + Register scratch1 = a7; + + __ SmiScale(scratch1, slot, kPointerSizeLog2); + __ Daddu(feedback, vector, Operand(scratch1)); + __ ld(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize)); + + // Try to quickly handle the monomorphic case without knowing for sure + // if we have a weak cell in feedback. We do know it's safe to look + // at WeakCell::kValueOffset. + Label try_array, load_smi_map, compare_map; + Label not_array, miss; + HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot, + scratch1, &compare_map, &load_smi_map, &try_array); + + // Is it a fixed array? + __ bind(&try_array); + __ ld(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset)); + __ Branch(¬_array, ne, scratch1, Heap::kFixedArrayMapRootIndex); + + Register scratch2 = t0; + HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, true, + &miss); + + __ bind(¬_array); + __ Branch(&miss, ne, feedback, Heap::kmegamorphic_symbolRootIndex); + Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags( + Code::ComputeHandlerFlags(Code::STORE_IC)); + masm->isolate()->stub_cache()->GenerateProbe( + masm, Code::STORE_IC, code_flags, receiver, key, feedback, receiver_map, + scratch1, scratch2); - // TODO(mvstanton): Implement. __ bind(&miss); StoreIC::GenerateMiss(masm); + + __ bind(&load_smi_map); + __ Branch(USE_DELAY_SLOT, &compare_map); + __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex); // In delay slot. } @@ -4819,12 +4858,129 @@ void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) { } +static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback, + Register receiver_map, Register scratch1, + Register scratch2, Label* miss) { + // feedback initially contains the feedback array + Label next_loop, prepare_next; + Label start_polymorphic; + Label transition_call; + + Register cached_map = scratch1; + Register too_far = scratch2; + Register pointer_reg = feedback; + + __ ld(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset)); + + // +-----+------+------+-----+-----+-----+ ... ----+ + // | map | len | wm0 | wt0 | h0 | wm1 | hN | + // +-----+------+------+-----+-----+ ----+ ... ----+ + // 0 1 2 len-1 + // ^ ^ + // | | + // pointer_reg too_far + // aka feedback scratch2 + // also need receiver_map + // use cached_map (scratch1) to look in the weak map values. + __ SmiScale(too_far, too_far, kPointerSizeLog2); + __ Daddu(too_far, feedback, Operand(too_far)); + __ Daddu(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ Daddu(pointer_reg, feedback, + Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag)); + + __ bind(&next_loop); + __ ld(cached_map, MemOperand(pointer_reg)); + __ ld(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset)); + __ Branch(&prepare_next, ne, receiver_map, Operand(cached_map)); + // Is it a transitioning store? + __ ld(too_far, MemOperand(pointer_reg, kPointerSize)); + __ LoadRoot(at, Heap::kUndefinedValueRootIndex); + __ Branch(&transition_call, ne, too_far, Operand(at)); + + __ ld(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2)); + __ Daddu(t9, pointer_reg, Operand(Code::kHeaderSize - kHeapObjectTag)); + __ Jump(t9); + + __ bind(&transition_call); + __ ld(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset)); + __ JumpIfSmi(too_far, miss); + + __ ld(receiver_map, MemOperand(pointer_reg, kPointerSize * 2)); + // Load the map into the correct register. + DCHECK(feedback.is(VectorStoreTransitionDescriptor::MapRegister())); + __ Move(feedback, too_far); + __ Daddu(t9, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag)); + __ Jump(t9); + + __ bind(&prepare_next); + __ Daddu(pointer_reg, pointer_reg, Operand(kPointerSize * 3)); + __ Branch(&next_loop, lt, pointer_reg, Operand(too_far)); + + // We exhausted our array of map handler pairs. + __ Branch(miss); +} + + void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) { - Label miss; + Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // a1 + Register key = VectorStoreICDescriptor::NameRegister(); // a2 + Register vector = VectorStoreICDescriptor::VectorRegister(); // a3 + Register slot = VectorStoreICDescriptor::SlotRegister(); // a4 + DCHECK(VectorStoreICDescriptor::ValueRegister().is(a0)); // a0 + Register feedback = a5; + Register receiver_map = a6; + Register scratch1 = a7; + + __ SmiScale(scratch1, slot, kPointerSizeLog2); + __ Daddu(feedback, vector, Operand(scratch1)); + __ ld(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize)); + + // Try to quickly handle the monomorphic case without knowing for sure + // if we have a weak cell in feedback. We do know it's safe to look + // at WeakCell::kValueOffset. + Label try_array, load_smi_map, compare_map; + Label not_array, miss; + HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot, + scratch1, &compare_map, &load_smi_map, &try_array); + + __ bind(&try_array); + // Is it a fixed array? + __ ld(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset)); + __ Branch(¬_array, ne, scratch1, Heap::kFixedArrayMapRootIndex); + + // We have a polymorphic element handler. + Label try_poly_name; + + Register scratch2 = t0; + + HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, scratch2, + &miss); + + __ bind(¬_array); + // Is it generic? + __ Branch(&try_poly_name, ne, feedback, Heap::kmegamorphic_symbolRootIndex); + Handle megamorphic_stub = + KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState()); + __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET); + + __ bind(&try_poly_name); + // We might have a name in feedback, and a fixed array in the next slot. + __ Branch(&miss, ne, key, Operand(feedback)); + // If the name comparison succeeded, we know we have a fixed array with + // at least one map/handler pair. + __ SmiScale(scratch1, slot, kPointerSizeLog2); + __ Daddu(feedback, vector, Operand(scratch1)); + __ ld(feedback, + FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize)); + HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false, + &miss); - // TODO(mvstanton): Implement. __ bind(&miss); KeyedStoreIC::GenerateMiss(masm); + + __ bind(&load_smi_map); + __ Branch(USE_DELAY_SLOT, &compare_map); + __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex); // In delay slot. } diff --git a/src/type-info.cc b/src/type-info.cc index 15a64ef..317b4e3 100644 --- a/src/type-info.cc +++ b/src/type-info.cc @@ -136,10 +136,10 @@ bool TypeFeedbackOracle::StoreIsUninitialized(FeedbackVectorICSlot slot) { Code::Kind kind = feedback_vector_->GetKind(slot); if (kind == Code::STORE_IC) { StoreICNexus nexus(feedback_vector_, slot); - return nexus.StateFromFeedback(); + return nexus.StateFromFeedback() == UNINITIALIZED; } else if (kind == Code::KEYED_STORE_IC) { KeyedStoreICNexus nexus(feedback_vector_, slot); - return nexus.StateFromFeedback(); + return nexus.StateFromFeedback() == UNINITIALIZED; } } return true; diff --git a/src/type-info.h b/src/type-info.h index 96539c9..9698e4a 100644 --- a/src/type-info.h +++ b/src/type-info.h @@ -46,7 +46,6 @@ class TypeFeedbackOracle: public ZoneObject { void GetStoreModeAndKeyType(FeedbackVectorICSlot slot, KeyedAccessStoreMode* store_mode, IcCheckType* key_type); - void GetLoadKeyType(TypeFeedbackId id, IcCheckType* key_type); void PropertyReceiverTypes(FeedbackVectorICSlot slot, Handle name, SmallMapList* receiver_types); diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc index cdde6a8..55891d5 100644 --- a/src/x64/code-stubs-x64.cc +++ b/src/x64/code-stubs-x64.cc @@ -4504,11 +4504,50 @@ void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) { void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) { - Label miss; + Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // rdx + Register key = VectorStoreICDescriptor::NameRegister(); // rcx + Register vector = VectorStoreICDescriptor::VectorRegister(); // rbx + Register slot = VectorStoreICDescriptor::SlotRegister(); // rdi + DCHECK(VectorStoreICDescriptor::ValueRegister().is(rax)); // rax + Register feedback = r8; + Register integer_slot = r9; + Register receiver_map = r11; + DCHECK(!AreAliased(feedback, integer_slot, vector, slot, receiver_map)); + + __ SmiToInteger32(integer_slot, slot); + __ movp(feedback, FieldOperand(vector, integer_slot, times_pointer_size, + FixedArray::kHeaderSize)); + + // Try to quickly handle the monomorphic case without knowing for sure + // if we have a weak cell in feedback. We do know it's safe to look + // at WeakCell::kValueOffset. + Label try_array, load_smi_map, compare_map; + Label not_array, miss; + HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, + integer_slot, &compare_map, &load_smi_map, &try_array); + + // Is it a fixed array? + __ bind(&try_array); + __ CompareRoot(FieldOperand(feedback, 0), Heap::kFixedArrayMapRootIndex); + __ j(not_equal, ¬_array); + HandleArrayCases(masm, feedback, receiver_map, integer_slot, r14, r15, true, + &miss); + + __ bind(¬_array); + __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex); + __ j(not_equal, &miss); + + Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags( + Code::ComputeHandlerFlags(Code::STORE_IC)); + masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, code_flags, + receiver, key, feedback, no_reg); - // TODO(mvstanton): Implement. __ bind(&miss); StoreIC::GenerateMiss(masm); + + __ bind(&load_smi_map); + __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex); + __ jmp(&compare_map); } @@ -4522,12 +4561,115 @@ void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) { } +static void HandlePolymorphicKeyedStoreCase(MacroAssembler* masm, + Register receiver_map, + Register feedback, Register scratch, + Register scratch1, + Register scratch2, Label* miss) { + // feedback initially contains the feedback array + Label next, next_loop, prepare_next; + Label transition_call; + + Register cached_map = scratch; + Register counter = scratch1; + Register length = scratch2; + + // Polymorphic, we have to loop from 0 to N - 1 + __ movp(counter, Immediate(0)); + __ movp(length, FieldOperand(feedback, FixedArray::kLengthOffset)); + __ SmiToInteger32(length, length); + + __ bind(&next_loop); + __ movp(cached_map, FieldOperand(feedback, counter, times_pointer_size, + FixedArray::kHeaderSize)); + __ cmpp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset)); + __ j(not_equal, &prepare_next); + __ movp(cached_map, FieldOperand(feedback, counter, times_pointer_size, + FixedArray::kHeaderSize + kPointerSize)); + __ CompareRoot(cached_map, Heap::kUndefinedValueRootIndex); + __ j(not_equal, &transition_call); + __ movp(feedback, FieldOperand(feedback, counter, times_pointer_size, + FixedArray::kHeaderSize + 2 * kPointerSize)); + __ leap(feedback, FieldOperand(feedback, Code::kHeaderSize)); + __ jmp(feedback); + + __ bind(&transition_call); + DCHECK(receiver_map.is(VectorStoreTransitionDescriptor::MapRegister())); + __ movp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset)); + // The weak cell may have been cleared. + __ JumpIfSmi(receiver_map, miss); + // Get the handler in value. + __ movp(feedback, FieldOperand(feedback, counter, times_pointer_size, + FixedArray::kHeaderSize + 2 * kPointerSize)); + __ leap(feedback, FieldOperand(feedback, Code::kHeaderSize)); + __ jmp(feedback); + + __ bind(&prepare_next); + __ addl(counter, Immediate(3)); + __ cmpl(counter, length); + __ j(less, &next_loop); + + // We exhausted our array of map handler pairs. + __ jmp(miss); +} + + void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) { - Label miss; + Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // rdx + Register key = VectorStoreICDescriptor::NameRegister(); // rcx + Register vector = VectorStoreICDescriptor::VectorRegister(); // rbx + Register slot = VectorStoreICDescriptor::SlotRegister(); // rdi + DCHECK(VectorStoreICDescriptor::ValueRegister().is(rax)); // rax + Register feedback = r8; + Register integer_slot = r9; + Register receiver_map = r11; + DCHECK(!AreAliased(feedback, integer_slot, vector, slot, receiver_map)); + + __ SmiToInteger32(integer_slot, slot); + __ movp(feedback, FieldOperand(vector, integer_slot, times_pointer_size, + FixedArray::kHeaderSize)); + + // Try to quickly handle the monomorphic case without knowing for sure + // if we have a weak cell in feedback. We do know it's safe to look + // at WeakCell::kValueOffset. + Label try_array, load_smi_map, compare_map; + Label not_array, miss; + HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, + integer_slot, &compare_map, &load_smi_map, &try_array); + + // Is it a fixed array? + __ bind(&try_array); + __ CompareRoot(FieldOperand(feedback, 0), Heap::kFixedArrayMapRootIndex); + __ j(not_equal, ¬_array); + HandlePolymorphicKeyedStoreCase(masm, receiver_map, feedback, integer_slot, + r15, r14, &miss); + + __ bind(¬_array); + Label try_poly_name; + __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex); + __ j(not_equal, &try_poly_name); + + Handle megamorphic_stub = + KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState()); + __ jmp(megamorphic_stub, RelocInfo::CODE_TARGET); + + __ bind(&try_poly_name); + // We might have a name in feedback, and a fixed array in the next slot. + __ cmpp(key, feedback); + __ j(not_equal, &miss); + // If the name comparison succeeded, we know we have a fixed array with + // at least one map/handler pair. + __ movp(feedback, FieldOperand(vector, integer_slot, times_pointer_size, + FixedArray::kHeaderSize + kPointerSize)); + HandleArrayCases(masm, feedback, receiver_map, integer_slot, r14, r15, false, + &miss); - // TODO(mvstanton): Implement. __ bind(&miss); KeyedStoreIC::GenerateMiss(masm); + + __ bind(&load_smi_map); + __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex); + __ jmp(&compare_map); } diff --git a/test/cctest/test-feedback-vector.cc b/test/cctest/test-feedback-vector.cc index b982c0f..2f7f961 100644 --- a/test/cctest/test-feedback-vector.cc +++ b/test/cctest/test-feedback-vector.cc @@ -416,9 +416,17 @@ TEST(ReferenceContextAllocatesNoSlots) { // There should be two LOAD_ICs, one for a and one for y at the end. Handle feedback_vector = handle(f->shared()->feedback_vector(), isolate); - CHECK_EQ(2, feedback_vector->ICSlots()); - CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(0)) == Code::LOAD_IC); - CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(1)) == Code::LOAD_IC); + if (FLAG_vector_stores) { + CHECK_EQ(4, feedback_vector->ICSlots()); + CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(0)) == Code::STORE_IC); + CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(1)) == Code::LOAD_IC); + CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(2)) == Code::STORE_IC); + CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(3)) == Code::LOAD_IC); + } else { + CHECK_EQ(2, feedback_vector->ICSlots()); + CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(0)) == Code::LOAD_IC); + CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(1)) == Code::LOAD_IC); + } CompileRun( "function testprop(x) {" @@ -430,7 +438,11 @@ TEST(ReferenceContextAllocatesNoSlots) { // There should be one LOAD_IC, for the load of a. feedback_vector = handle(f->shared()->feedback_vector(), isolate); - CHECK_EQ(1, feedback_vector->ICSlots()); + if (FLAG_vector_stores) { + CHECK_EQ(2, feedback_vector->ICSlots()); + } else { + CHECK_EQ(1, feedback_vector->ICSlots()); + } CompileRun( "function testpropfunc(x) {" @@ -444,11 +456,20 @@ TEST(ReferenceContextAllocatesNoSlots) { // There should be 2 LOAD_ICs and 2 CALL_ICs. feedback_vector = handle(f->shared()->feedback_vector(), isolate); - CHECK_EQ(4, feedback_vector->ICSlots()); - CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(0)) == Code::CALL_IC); - CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(1)) == Code::LOAD_IC); - CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(2)) == Code::CALL_IC); - CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(3)) == Code::LOAD_IC); + if (FLAG_vector_stores) { + CHECK_EQ(5, feedback_vector->ICSlots()); + CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(0)) == Code::CALL_IC); + CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(1)) == Code::LOAD_IC); + CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(2)) == Code::STORE_IC); + CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(3)) == Code::CALL_IC); + CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(4)) == Code::LOAD_IC); + } else { + CHECK_EQ(4, feedback_vector->ICSlots()); + CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(0)) == Code::CALL_IC); + CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(1)) == Code::LOAD_IC); + CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(2)) == Code::CALL_IC); + CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(3)) == Code::LOAD_IC); + } CompileRun( "function testkeyedprop(x) {" @@ -462,10 +483,19 @@ TEST(ReferenceContextAllocatesNoSlots) { // There should be 1 LOAD_ICs for the load of a, and one KEYED_LOAD_IC for the // load of x[0] in the return statement. feedback_vector = handle(f->shared()->feedback_vector(), isolate); - CHECK_EQ(2, feedback_vector->ICSlots()); - CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(0)) == Code::LOAD_IC); - CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(1)) == - Code::KEYED_LOAD_IC); + if (FLAG_vector_stores) { + CHECK_EQ(3, feedback_vector->ICSlots()); + CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(0)) == Code::LOAD_IC); + CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(1)) == + Code::KEYED_STORE_IC); + CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(2)) == + Code::KEYED_LOAD_IC); + } else { + CHECK_EQ(2, feedback_vector->ICSlots()); + CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(0)) == Code::LOAD_IC); + CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(1)) == + Code::KEYED_LOAD_IC); + } CompileRun( "function testcompound(x) {" @@ -478,9 +508,47 @@ TEST(ReferenceContextAllocatesNoSlots) { // There should be 3 LOAD_ICs, for load of a and load of x.old and x.young. feedback_vector = handle(f->shared()->feedback_vector(), isolate); - CHECK_EQ(3, feedback_vector->ICSlots()); - CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(0)) == Code::LOAD_IC); - CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(1)) == Code::LOAD_IC); - CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(2)) == Code::LOAD_IC); + if (FLAG_vector_stores) { + CHECK_EQ(6, feedback_vector->ICSlots()); + CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(0)) == Code::LOAD_IC); + CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(1)) == Code::STORE_IC); + CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(2)) == Code::STORE_IC); + CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(3)) == Code::STORE_IC); + CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(4)) == Code::LOAD_IC); + CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(5)) == Code::LOAD_IC); + } else { + CHECK_EQ(3, feedback_vector->ICSlots()); + CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(0)) == Code::LOAD_IC); + CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(1)) == Code::LOAD_IC); + CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(2)) == Code::LOAD_IC); + } +} + + +TEST(VectorStoreICBasic) { + if (i::FLAG_always_opt) return; + if (!i::FLAG_vector_stores) return; + + CcTest::InitializeVM(); + LocalContext context; + v8::HandleScope scope(context->GetIsolate()); + Isolate* isolate = CcTest::i_isolate(); + + CompileRun( + "function f(a) {" + " a.foo = 5;" + "}" + "var a = { foo: 3 };" + "f(a);" + "f(a);" + "f(a);"); + Handle f = GetFunction("f"); + // There should be one IC slot. + Handle feedback_vector = + Handle(f->shared()->feedback_vector(), isolate); + CHECK_EQ(1, feedback_vector->ICSlots()); + FeedbackVectorICSlot slot(0); + StoreICNexus nexus(feedback_vector, slot); + CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback()); } } -- 2.7.4