void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
Label miss;
Register receiver = LoadDescriptor::ReceiverRegister();
-
- NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r3,
- r4, &miss);
+ // Ensure that the vector and slot registers won't be clobbered before
+ // calling the miss handler.
+ DCHECK(!FLAG_vector_ics ||
+ !AreAliased(r4, r5, VectorLoadICDescriptor::VectorRegister(),
+ VectorLoadICDescriptor::SlotRegister()));
+
+ NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r4,
+ r5, &miss);
__ bind(&miss);
PropertyAccessCompiler::TailCallBuiltin(
masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
Register receiver = LoadDescriptor::ReceiverRegister();
Register index = LoadDescriptor::NameRegister();
- Register scratch = r3;
+ Register scratch = r5;
Register result = r0;
DCHECK(!scratch.is(receiver) && !scratch.is(index));
+ DCHECK(!FLAG_vector_ics ||
+ (!scratch.is(VectorLoadICDescriptor::VectorRegister()) &&
+ result.is(VectorLoadICDescriptor::SlotRegister())));
+ // StringCharAtGenerator doesn't use the result register until it's passed
+ // the different miss possibilities. If it did, we would have a conflict
+ // when FLAG_vector_ics is true.
StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
&miss, // When not a string.
&miss, // When not a number.
UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
LOperand* name_register =
UseFixed(instr->name(), LoadDescriptor::NameRegister());
+ LOperand* slot = NULL;
+ LOperand* vector = NULL;
+ if (FLAG_vector_ics) {
+ slot = UseFixed(instr->slot(), VectorLoadICDescriptor::SlotRegister());
+ vector =
+ UseFixed(instr->vector(), VectorLoadICDescriptor::VectorRegister());
+ }
+
// Not marked as call. It can't deoptimize, and it never returns.
return new (zone()) LTailCallThroughMegamorphicCache(
- context, receiver_register, name_register);
+ context, receiver_register, name_register, slot, vector);
}
LOperand* global_object =
UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
- if (FLAG_vector_ics) {
+ if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
LLoadGlobalGeneric* result =
LOperand* object =
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
- if (FLAG_vector_ics) {
+ if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
LOperand* vector = NULL;
- if (FLAG_vector_ics) {
+ if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
class LTailCallThroughMegamorphicCache FINAL
- : public LTemplateInstruction<0, 3, 0> {
+ : public LTemplateInstruction<0, 5, 0> {
public:
- explicit LTailCallThroughMegamorphicCache(LOperand* context,
- LOperand* receiver,
- LOperand* name) {
+ LTailCallThroughMegamorphicCache(LOperand* context, LOperand* receiver,
+ LOperand* name, LOperand* slot,
+ LOperand* vector) {
inputs_[0] = context;
inputs_[1] = receiver;
inputs_[2] = name;
+ inputs_[3] = slot;
+ inputs_[4] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* receiver() { return inputs_[1]; }
LOperand* name() { return inputs_[2]; }
+ LOperand* slot() { return inputs_[3]; }
+ LOperand* vector() { return inputs_[4]; }
DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
"tail-call-through-megamorphic-cache")
DECLARE_HYDROGEN_ACCESSOR(TailCallThroughMegamorphicCache)
};
+
class LUnknownOSRValue FINAL : public LTemplateInstruction<1, 0, 0> {
public:
bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
const CallInterfaceDescriptor descriptor() { return descriptor_; }
+ DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
+
private:
DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
- DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
void PrintDataTo(StringStream* stream) OVERRIDE;
void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
DCHECK(FLAG_vector_ics);
Register vector_register = ToRegister(instr->temp_vector());
+ Register slot_register = VectorLoadICDescriptor::SlotRegister();
DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
+ DCHECK(slot_register.is(r0));
+
+ AllowDeferredHandleDereference vector_structure_check;
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ Move(vector_register, vector);
// No need to allocate this register.
- DCHECK(VectorLoadICDescriptor::SlotRegister().is(r0));
- int index = vector->GetIndex(instr->hydrogen()->slot());
- __ mov(VectorLoadICDescriptor::SlotRegister(), Operand(Smi::FromInt(index)));
+ FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ int index = vector->GetIndex(slot);
+ __ mov(slot_register, Operand(Smi::FromInt(index)));
}
DCHECK(name.is(LoadDescriptor::NameRegister()));
DCHECK(receiver.is(r1));
DCHECK(name.is(r2));
+ Register scratch = r4;
+ Register extra = r5;
+ Register extra2 = r6;
+ Register extra3 = r9;
- Register scratch = r3;
- Register extra = r4;
- Register extra2 = r5;
- Register extra3 = r6;
+#ifdef DEBUG
+ Register slot = FLAG_vector_ics ? ToRegister(instr->slot()) : no_reg;
+ Register vector = FLAG_vector_ics ? ToRegister(instr->vector()) : no_reg;
+ DCHECK(!FLAG_vector_ics ||
+ !AreAliased(slot, vector, scratch, extra, extra2, extra3));
+#endif
// Important for the tail-call.
bool must_teardown_frame = NeedsEagerFrame();
- // The probe will tail call to a handler if found.
- isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
- must_teardown_frame, receiver, name,
- scratch, extra, extra2, extra3);
+ if (!instr->hydrogen()->is_just_miss()) {
+ DCHECK(!instr->hydrogen()->is_keyed_load());
+
+ // The probe will tail call to a handler if found.
+ isolate()->stub_cache()->GenerateProbe(
+ masm(), Code::LOAD_IC, instr->hydrogen()->flags(), must_teardown_frame,
+ receiver, name, scratch, extra, extra2, extra3);
+ }
// Tail call to miss if we ended up here.
if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
- LoadIC::GenerateMiss(masm());
+ if (instr->hydrogen()->is_keyed_load()) {
+ KeyedLoadIC::GenerateMiss(masm());
+ } else {
+ LoadIC::GenerateMiss(masm());
+ }
}
void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
DCHECK(ToRegister(instr->result()).is(r0));
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
-
- if (instr->target()->IsConstantOperand()) {
- LConstantOperand* target = LConstantOperand::cast(instr->target());
- Handle<Code> code = Handle<Code>::cast(ToHandle(target));
- generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
- PlatformInterfaceDescriptor* call_descriptor =
- instr->descriptor().platform_specific_descriptor();
- __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None(), al,
- call_descriptor->storage_mode());
+ if (instr->hydrogen()->IsTailCall()) {
+ if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
+
+ if (instr->target()->IsConstantOperand()) {
+ LConstantOperand* target = LConstantOperand::cast(instr->target());
+ Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+ __ Jump(code, RelocInfo::CODE_TARGET);
+ } else {
+ DCHECK(instr->target()->IsRegister());
+ Register target = ToRegister(instr->target());
+ // Make sure we don't emit any additional entries in the constant pool
+ // before the call to ensure that the CallCodeSize() calculated the
+ // correct
+ // number of instructions for the constant pool load.
+ {
+ ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
+ __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+ }
+ __ Jump(target);
+ }
} else {
- DCHECK(instr->target()->IsRegister());
- Register target = ToRegister(instr->target());
- generator.BeforeCall(__ CallSize(target));
- // Make sure we don't emit any additional entries in the constant pool
- // before the call to ensure that the CallCodeSize() calculated the correct
- // number of instructions for the constant pool load.
- {
- ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
- __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+ if (instr->target()->IsConstantOperand()) {
+ LConstantOperand* target = LConstantOperand::cast(instr->target());
+ Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+ generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
+ PlatformInterfaceDescriptor* call_descriptor =
+ instr->descriptor().platform_specific_descriptor();
+ __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None(), al,
+ call_descriptor->storage_mode());
+ } else {
+ DCHECK(instr->target()->IsRegister());
+ Register target = ToRegister(instr->target());
+ generator.BeforeCall(__ CallSize(target));
+ // Make sure we don't emit any additional entries in the constant pool
+ // before the call to ensure that the CallCodeSize() calculated the
+ // correct
+ // number of instructions for the constant pool load.
+ {
+ ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
+ __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+ }
+ __ Call(target);
}
- __ Call(target);
+ generator.AfterCall();
}
- generator.AfterCall();
}
void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
Label miss;
Register receiver = LoadDescriptor::ReceiverRegister();
+ // Ensure that the vector and slot registers won't be clobbered before
+ // calling the miss handler.
+ DCHECK(!FLAG_vector_ics ||
+ !AreAliased(x10, x11, VectorLoadICDescriptor::VectorRegister(),
+ VectorLoadICDescriptor::SlotRegister()));
NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, x10,
x11, &miss);
Register receiver = LoadDescriptor::ReceiverRegister();
Register index = LoadDescriptor::NameRegister();
Register result = x0;
- Register scratch = x3;
+ Register scratch = x10;
DCHECK(!scratch.is(receiver) && !scratch.is(index));
+ DCHECK(!FLAG_vector_ics ||
+ (!scratch.is(VectorLoadICDescriptor::VectorRegister()) &&
+ result.is(VectorLoadICDescriptor::SlotRegister())));
+ // StringCharAtGenerator doesn't use the result register until it's passed
+ // the different miss possibilities. If it did, we would have a conflict
+ // when FLAG_vector_ics is true.
StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
&miss, // When not a string.
&miss, // When not a number.
UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
LOperand* name_register =
UseFixed(instr->name(), LoadDescriptor::NameRegister());
+ LOperand* slot = NULL;
+ LOperand* vector = NULL;
+ if (FLAG_vector_ics) {
+ slot = UseFixed(instr->slot(), VectorLoadICDescriptor::SlotRegister());
+ vector =
+ UseFixed(instr->vector(), VectorLoadICDescriptor::VectorRegister());
+ }
+
// Not marked as call. It can't deoptimize, and it never returns.
return new (zone()) LTailCallThroughMegamorphicCache(
- context, receiver_register, name_register);
+ context, receiver_register, name_register, slot, vector);
}
LOperand* global_object =
UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
- if (FLAG_vector_ics) {
+ if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
LOperand* vector = NULL;
- if (FLAG_vector_ics) {
+ if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
LOperand* object =
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
- if (FLAG_vector_ics) {
+ if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
class LTailCallThroughMegamorphicCache FINAL
- : public LTemplateInstruction<0, 3, 0> {
+ : public LTemplateInstruction<0, 5, 0> {
public:
- explicit LTailCallThroughMegamorphicCache(LOperand* context,
- LOperand* receiver,
- LOperand* name) {
+ LTailCallThroughMegamorphicCache(LOperand* context, LOperand* receiver,
+ LOperand* name, LOperand* slot,
+ LOperand* vector) {
inputs_[0] = context;
inputs_[1] = receiver;
inputs_[2] = name;
+ inputs_[3] = slot;
+ inputs_[4] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* receiver() { return inputs_[1]; }
LOperand* name() { return inputs_[2]; }
+ LOperand* slot() { return inputs_[3]; }
+ LOperand* vector() { return inputs_[4]; }
DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
"tail-call-through-megamorphic-cache")
CallInterfaceDescriptor descriptor() { return descriptor_; }
+ DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
+
private:
DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
- DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
void PrintDataTo(StringStream* stream) OVERRIDE;
DCHECK(name.is(LoadDescriptor::NameRegister()));
DCHECK(receiver.is(x1));
DCHECK(name.is(x2));
-
- Register scratch = x3;
- Register extra = x4;
- Register extra2 = x5;
- Register extra3 = x6;
+ Register scratch = x4;
+ Register extra = x5;
+ Register extra2 = x6;
+ Register extra3 = x7;
+ DCHECK(!FLAG_vector_ics ||
+ !AreAliased(ToRegister(instr->slot()), ToRegister(instr->vector()),
+ scratch, extra, extra2, extra3));
// Important for the tail-call.
bool must_teardown_frame = NeedsEagerFrame();
- // The probe will tail call to a handler if found.
- isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
- must_teardown_frame, receiver, name,
- scratch, extra, extra2, extra3);
+ if (!instr->hydrogen()->is_just_miss()) {
+ DCHECK(!instr->hydrogen()->is_keyed_load());
+
+ // The probe will tail call to a handler if found.
+ isolate()->stub_cache()->GenerateProbe(
+ masm(), Code::LOAD_IC, instr->hydrogen()->flags(), must_teardown_frame,
+ receiver, name, scratch, extra, extra2, extra3);
+ }
// Tail call to miss if we ended up here.
if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
- LoadIC::GenerateMiss(masm());
+ if (instr->hydrogen()->is_keyed_load()) {
+ KeyedLoadIC::GenerateMiss(masm());
+ } else {
+ LoadIC::GenerateMiss(masm());
+ }
}
DCHECK(instr->IsMarkedAsCall());
DCHECK(ToRegister(instr->result()).Is(x0));
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
-
- if (instr->target()->IsConstantOperand()) {
- LConstantOperand* target = LConstantOperand::cast(instr->target());
- Handle<Code> code = Handle<Code>::cast(ToHandle(target));
- generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
- // TODO(all): on ARM we use a call descriptor to specify a storage mode
- // but on ARM64 we only have one storage mode so it isn't necessary. Check
- // this understanding is correct.
- __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None());
+ if (instr->hydrogen()->IsTailCall()) {
+ if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
+
+ if (instr->target()->IsConstantOperand()) {
+ LConstantOperand* target = LConstantOperand::cast(instr->target());
+ Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+ // TODO(all): on ARM we use a call descriptor to specify a storage mode
+ // but on ARM64 we only have one storage mode so it isn't necessary. Check
+ // this understanding is correct.
+ __ Jump(code, RelocInfo::CODE_TARGET);
+ } else {
+ DCHECK(instr->target()->IsRegister());
+ Register target = ToRegister(instr->target());
+ __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
+ __ Br(target);
+ }
} else {
- DCHECK(instr->target()->IsRegister());
- Register target = ToRegister(instr->target());
- generator.BeforeCall(__ CallSize(target));
- __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
- __ Call(target);
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+ if (instr->target()->IsConstantOperand()) {
+ LConstantOperand* target = LConstantOperand::cast(instr->target());
+ Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+ generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
+ // TODO(all): on ARM we use a call descriptor to specify a storage mode
+ // but on ARM64 we only have one storage mode so it isn't necessary. Check
+ // this understanding is correct.
+ __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None());
+ } else {
+ DCHECK(instr->target()->IsRegister());
+ Register target = ToRegister(instr->target());
+ generator.BeforeCall(__ CallSize(target));
+ __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
+ __ Call(target);
+ }
+ generator.AfterCall();
}
- generator.AfterCall();
+
after_push_argument_ = false;
}
void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
DCHECK(FLAG_vector_ics);
Register vector_register = ToRegister(instr->temp_vector());
+ Register slot_register = VectorLoadICDescriptor::SlotRegister();
DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
+ DCHECK(slot_register.is(x0));
+
+ AllowDeferredHandleDereference vector_structure_check;
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ Mov(vector_register, vector);
// No need to allocate this register.
- DCHECK(VectorLoadICDescriptor::SlotRegister().is(x0));
- int index = vector->GetIndex(instr->hydrogen()->slot());
- __ Mov(VectorLoadICDescriptor::SlotRegister(), Smi::FromInt(index));
+ FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ int index = vector->GetIndex(slot);
+ __ Mov(slot_register, Smi::FromInt(index));
}
#include "src/code-stubs.h"
#include "src/field-index.h"
#include "src/hydrogen.h"
+#include "src/ic/ic.h"
#include "src/lithium.h"
namespace v8 {
HValue* shared_info,
HValue* native_context);
+ // Tail calls handler found at array[map_index + 1].
+ void TailCallHandler(HValue* receiver, HValue* name, HValue* array,
+ HValue* map_index, HValue* slot, HValue* vector);
+
+ // Tail calls handler_code.
+ void TailCallHandler(HValue* receiver, HValue* name, HValue* slot,
+ HValue* vector, HValue* handler_code);
+
+ void TailCallMiss(HValue* receiver, HValue* name, HValue* slot,
+ HValue* vector, bool keyed_load);
+
+ // Handle MONOMORPHIC and POLYMORPHIC LoadIC and KeyedLoadIC cases.
+ void HandleArrayCases(HValue* array, HValue* receiver, HValue* name,
+ HValue* slot, HValue* vector, bool keyed_load);
+
private:
HValue* BuildArraySingleArgumentConstructor(JSArrayBuilder* builder);
HValue* BuildArrayNArgumentsConstructor(JSArrayBuilder* builder,
}
+void CodeStubGraphBuilderBase::TailCallHandler(HValue* receiver, HValue* name,
+ HValue* array, HValue* map_index,
+ HValue* slot, HValue* vector) {
+ // The handler is at array[map_index + 1]. Compute this with a custom offset
+ // to HLoadKeyed.
+ int offset =
+ GetDefaultHeaderSizeForElementsKind(FAST_ELEMENTS) + kPointerSize;
+ HValue* handler_code =
+ Add<HLoadKeyed>(array, map_index, static_cast<HValue*>(NULL),
+ FAST_ELEMENTS, NEVER_RETURN_HOLE, offset);
+ TailCallHandler(receiver, name, slot, vector, handler_code);
+}
+
+
+void CodeStubGraphBuilderBase::TailCallHandler(HValue* receiver, HValue* name,
+ HValue* slot, HValue* vector,
+ HValue* handler_code) {
+ VectorLoadICDescriptor descriptor(isolate());
+ HValue* op_vals[] = {context(), receiver, name, slot, vector};
+ Add<HCallWithDescriptor>(handler_code, 0, descriptor,
+ Vector<HValue*>(op_vals, 5), TAIL_CALL);
+ // We never return here, it is a tail call.
+}
+
+
+void CodeStubGraphBuilderBase::TailCallMiss(HValue* receiver, HValue* name,
+ HValue* slot, HValue* vector,
+ bool keyed_load) {
+ DCHECK(FLAG_vector_ics);
+ Add<HTailCallThroughMegamorphicCache>(
+ receiver, name, slot, vector,
+ HTailCallThroughMegamorphicCache::ComputeFlags(keyed_load, true));
+ // We never return here, it is a tail call.
+}
+
+
+void CodeStubGraphBuilderBase::HandleArrayCases(HValue* array, HValue* receiver,
+ HValue* name, HValue* slot,
+ HValue* vector,
+ bool keyed_load) {
+ IfBuilder if_receiver_heap_object(this);
+ if_receiver_heap_object.IfNot<HIsSmiAndBranch>(receiver);
+ if_receiver_heap_object.Then();
+ {
+ HConstant* constant_two = Add<HConstant>(2);
+ HConstant* constant_three = Add<HConstant>(3);
+
+ HValue* receiver_map = AddLoadMap(receiver, static_cast<HValue*>(NULL));
+ HValue* start =
+ keyed_load ? graph()->GetConstant1() : graph()->GetConstant0();
+ HValue* array_map =
+ Add<HLoadKeyed>(array, start, static_cast<HValue*>(NULL), FAST_ELEMENTS,
+ ALLOW_RETURN_HOLE);
+ IfBuilder if_correct_map(this);
+ if_correct_map.If<HCompareObjectEqAndBranch>(receiver_map, array_map);
+ if_correct_map.Then();
+ { TailCallHandler(receiver, name, array, start, slot, vector); }
+ if_correct_map.Else();
+ {
+ // If our array has more elements, the ic is polymorphic. Look for the
+ // receiver map in the rest of the array.
+ HValue* length =
+ AddLoadFixedArrayLength(array, static_cast<HValue*>(NULL));
+ LoopBuilder builder(this, context(), LoopBuilder::kPostIncrement,
+ constant_two);
+ start = keyed_load ? constant_three : constant_two;
+ HValue* key = builder.BeginBody(start, length, Token::LT);
+ {
+ HValue* array_map =
+ Add<HLoadKeyed>(array, key, static_cast<HValue*>(NULL),
+ FAST_ELEMENTS, ALLOW_RETURN_HOLE);
+ IfBuilder if_correct_poly_map(this);
+ if_correct_poly_map.If<HCompareObjectEqAndBranch>(receiver_map,
+ array_map);
+ if_correct_poly_map.Then();
+ { TailCallHandler(receiver, name, array, key, slot, vector); }
+ }
+ builder.EndBody();
+ }
+ if_correct_map.End();
+ }
+}
+
+
template <>
HValue* CodeStubGraphBuilder<VectorLoadStub>::BuildCodeStub() {
HValue* receiver = GetParameter(VectorLoadICDescriptor::kReceiverIndex);
- Add<HDeoptimize>("Always deopt", Deoptimizer::EAGER);
- return receiver;
+ HValue* name = GetParameter(VectorLoadICDescriptor::kNameIndex);
+ HValue* slot = GetParameter(VectorLoadICDescriptor::kSlotIndex);
+ HValue* vector = GetParameter(VectorLoadICDescriptor::kVectorIndex);
+
+ // If the feedback is an array, then the IC is in the monomorphic or
+ // polymorphic state.
+ HValue* feedback = Add<HLoadKeyed>(vector, slot, static_cast<HValue*>(NULL),
+ FAST_ELEMENTS, ALLOW_RETURN_HOLE);
+ IfBuilder array_checker(this);
+ array_checker.If<HCompareMap>(feedback,
+ isolate()->factory()->fixed_array_map());
+ array_checker.Then();
+ { HandleArrayCases(feedback, receiver, name, slot, vector, false); }
+ array_checker.Else();
+ {
+ // Is the IC megamorphic?
+ IfBuilder mega_checker(this);
+ HConstant* megamorphic_symbol =
+ Add<HConstant>(isolate()->factory()->megamorphic_symbol());
+ mega_checker.If<HCompareObjectEqAndBranch>(feedback, megamorphic_symbol);
+ mega_checker.Then();
+ {
+ // Probe the stub cache.
+ Add<HTailCallThroughMegamorphicCache>(
+ receiver, name, slot, vector,
+ HTailCallThroughMegamorphicCache::ComputeFlags(false, false));
+ }
+ mega_checker.End();
+ }
+ array_checker.End();
+
+ TailCallMiss(receiver, name, slot, vector, false);
+ return graph()->GetConstant0();
}
template <>
HValue* CodeStubGraphBuilder<VectorKeyedLoadStub>::BuildCodeStub() {
HValue* receiver = GetParameter(VectorLoadICDescriptor::kReceiverIndex);
- Add<HDeoptimize>("Always deopt", Deoptimizer::EAGER);
- return receiver;
+ HValue* name = GetParameter(VectorLoadICDescriptor::kNameIndex);
+ HValue* slot = GetParameter(VectorLoadICDescriptor::kSlotIndex);
+ HValue* vector = GetParameter(VectorLoadICDescriptor::kVectorIndex);
+ HConstant* zero = graph()->GetConstant0();
+
+ // If the feedback is an array, then the IC is in the monomorphic or
+ // polymorphic state.
+ HValue* feedback = Add<HLoadKeyed>(vector, slot, static_cast<HValue*>(NULL),
+ FAST_ELEMENTS, ALLOW_RETURN_HOLE);
+ IfBuilder array_checker(this);
+ array_checker.If<HCompareMap>(feedback,
+ isolate()->factory()->fixed_array_map());
+ array_checker.Then();
+ {
+ // If feedback[0] is 0, then the IC has element handlers and name should be
+ // a smi. If feedback[0] is a string, verify that it matches name.
+ HValue* recorded_name =
+ Add<HLoadKeyed>(feedback, zero, static_cast<HValue*>(NULL),
+ FAST_ELEMENTS, ALLOW_RETURN_HOLE);
+
+ IfBuilder recorded_name_is_zero(this);
+ recorded_name_is_zero.If<HCompareObjectEqAndBranch>(recorded_name, zero);
+ recorded_name_is_zero.Then();
+ { Add<HCheckSmi>(name); }
+ recorded_name_is_zero.Else();
+ {
+ IfBuilder strings_match(this);
+ strings_match.IfNot<HCompareObjectEqAndBranch>(name, recorded_name);
+ strings_match.Then();
+ TailCallMiss(receiver, name, slot, vector, true);
+ strings_match.End();
+ }
+ recorded_name_is_zero.End();
+
+ HandleArrayCases(feedback, receiver, name, slot, vector, true);
+ }
+ array_checker.Else();
+ {
+ // Check if the IC is in generic state.
+ IfBuilder generic_checker(this);
+ HConstant* generic_symbol =
+ Add<HConstant>(isolate()->factory()->generic_symbol());
+ generic_checker.If<HCompareObjectEqAndBranch>(feedback, generic_symbol);
+ generic_checker.Then();
+ {
+ // Tail-call to the generic KeyedLoadIC, treating it like a handler.
+ Handle<Code> stub = KeyedLoadIC::generic_stub(isolate());
+ HValue* constant_stub = Add<HConstant>(stub);
+ LoadDescriptor descriptor(isolate());
+ HValue* op_vals[] = {context(), receiver, name};
+ Add<HCallWithDescriptor>(constant_stub, 0, descriptor,
+ Vector<HValue*>(op_vals, 3), TAIL_CALL);
+ // We never return here, it is a tail call.
+ }
+ generic_checker.End();
+ }
+ array_checker.End();
+
+ TailCallMiss(receiver, name, slot, vector, true);
+ return zero;
}
template <>
HValue* CodeStubGraphBuilder<MegamorphicLoadStub>::BuildCodeStub() {
- // The return address is on the stack.
HValue* receiver = GetParameter(LoadDescriptor::kReceiverIndex);
HValue* name = GetParameter(LoadDescriptor::kNameIndex);
+ // We shouldn't generate this when FLAG_vector_ics is true because the
+ // megamorphic case is handled as part of the default stub.
+ DCHECK(!FLAG_vector_ics);
+
// Probe the stub cache.
- Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::LOAD_IC));
- Add<HTailCallThroughMegamorphicCache>(receiver, name, flags);
+ Add<HTailCallThroughMegamorphicCache>(receiver, name);
// We never continue.
return graph()->GetConstant0();
case HValue::kCallNew:
case HValue::kCallNewArray:
case HValue::kCallStub:
- case HValue::kCallWithDescriptor:
case HValue::kCapturedObject:
case HValue::kClassOfTestAndBranch:
case HValue::kCompareGeneric:
case HValue::kBranch:
case HValue::kCallJSFunction:
case HValue::kCallRuntime:
+ case HValue::kCallWithDescriptor:
case HValue::kChange:
case HValue::kCheckHeapObject:
case HValue::kCheckInstanceType:
}
+Code::Flags HTailCallThroughMegamorphicCache::flags() const {
+ Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
+ Code::ComputeHandlerFlags(Code::LOAD_IC));
+ return code_flags;
+}
+
+
std::ostream& HTailCallThroughMegamorphicCache::PrintDataTo(
std::ostream& os) const { // NOLINT
for (int i = 0; i < OperandCount(); i++) {
};
+enum CallMode { NORMAL_CALL, TAIL_CALL };
+
+
class HCallWithDescriptor FINAL : public HInstruction {
public:
static HCallWithDescriptor* New(Zone* zone, HValue* context, HValue* target,
int argument_count,
CallInterfaceDescriptor descriptor,
- const Vector<HValue*>& operands) {
+ const Vector<HValue*>& operands,
+ CallMode call_mode = NORMAL_CALL) {
DCHECK(operands.length() == descriptor.GetEnvironmentLength());
- HCallWithDescriptor* res = new (zone)
- HCallWithDescriptor(target, argument_count, descriptor, operands, zone);
+ HCallWithDescriptor* res = new (zone) HCallWithDescriptor(
+ target, argument_count, descriptor, operands, call_mode, zone);
return res;
}
HType CalculateInferredType() FINAL { return HType::Tagged(); }
+ bool IsTailCall() const { return call_mode_ == TAIL_CALL; }
+
virtual int argument_count() const {
return argument_count_;
}
// The argument count includes the receiver.
HCallWithDescriptor(HValue* target, int argument_count,
CallInterfaceDescriptor descriptor,
- const Vector<HValue*>& operands, Zone* zone)
+ const Vector<HValue*>& operands, CallMode call_mode,
+ Zone* zone)
: descriptor_(descriptor),
- values_(descriptor.GetEnvironmentLength() + 1, zone) {
- argument_count_ = argument_count;
+ values_(descriptor.GetEnvironmentLength() + 1, zone),
+ argument_count_(argument_count),
+ call_mode_(call_mode) {
+ // We can only tail call without any stack arguments.
+ DCHECK(call_mode != TAIL_CALL || argument_count == 0);
AddOperand(target, zone);
for (int i = 0; i < operands.length(); i++) {
AddOperand(operands[i], zone);
CallInterfaceDescriptor descriptor_;
ZoneList<HValue*> values_;
int argument_count_;
+ CallMode call_mode_;
};
};
-class HTailCallThroughMegamorphicCache FINAL : public HTemplateInstruction<3> {
+class HTailCallThroughMegamorphicCache FINAL : public HInstruction {
public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HTailCallThroughMegamorphicCache,
- HValue*, HValue*, Code::Flags);
+ enum Flags {
+ NONE = 0,
+ CALLED_FROM_KEYED_LOAD = 1 << 0,
+ PERFORM_MISS_ONLY = 1 << 1
+ };
+
+ static Flags ComputeFlags(bool called_from_keyed_load,
+ bool perform_miss_only) {
+ Flags flags = NONE;
+ if (called_from_keyed_load) {
+ flags = static_cast<Flags>(flags | CALLED_FROM_KEYED_LOAD);
+ }
+ if (perform_miss_only) {
+ flags = static_cast<Flags>(flags | PERFORM_MISS_ONLY);
+ }
+ return flags;
+ }
+
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P5(
+ HTailCallThroughMegamorphicCache, HValue*, HValue*, HValue*, HValue*,
+ HTailCallThroughMegamorphicCache::Flags);
+
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HTailCallThroughMegamorphicCache,
+ HValue*, HValue*);
Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::Tagged();
}
+ virtual int OperandCount() const FINAL OVERRIDE {
+ return FLAG_vector_ics ? 5 : 3;
+ }
+ virtual HValue* OperandAt(int i) const FINAL OVERRIDE { return inputs_[i]; }
+
HValue* context() const { return OperandAt(0); }
HValue* receiver() const { return OperandAt(1); }
HValue* name() const { return OperandAt(2); }
- Code::Flags flags() const { return flags_; }
+ HValue* slot() const {
+ DCHECK(FLAG_vector_ics);
+ return OperandAt(3);
+ }
+ HValue* vector() const {
+ DCHECK(FLAG_vector_ics);
+ return OperandAt(4);
+ }
+ Code::Flags flags() const;
+
+ bool is_keyed_load() const { return flags_ & CALLED_FROM_KEYED_LOAD; }
+ bool is_just_miss() const { return flags_ & PERFORM_MISS_ONLY; }
std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE; // NOLINT
DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache)
+ protected:
+ virtual void InternalSetOperandAt(int i, HValue* value) FINAL OVERRIDE {
+ inputs_[i] = value;
+ }
+
private:
HTailCallThroughMegamorphicCache(HValue* context, HValue* receiver,
- HValue* name, Code::Flags flags)
+ HValue* name, HValue* slot, HValue* vector,
+ Flags flags)
: flags_(flags) {
+ DCHECK(FLAG_vector_ics);
SetOperandAt(0, context);
SetOperandAt(1, receiver);
SetOperandAt(2, name);
+ SetOperandAt(3, slot);
+ SetOperandAt(4, vector);
}
- Code::Flags flags_;
+ HTailCallThroughMegamorphicCache(HValue* context, HValue* receiver,
+ HValue* name)
+ : flags_(NONE) {
+ SetOperandAt(0, context);
+ SetOperandAt(1, receiver);
+ SetOperandAt(2, name);
+ }
+
+ EmbeddedContainer<HValue*, 5> inputs_;
+ Flags flags_;
};
HValue* global_object() { return OperandAt(1); }
Handle<String> name() const { return name_; }
bool for_typeof() const { return for_typeof_; }
- FeedbackVectorICSlot slot() const {
- DCHECK(FLAG_vector_ics && !slot_.IsInvalid());
- return slot_;
- }
+ FeedbackVectorICSlot slot() const { return slot_; }
Handle<TypeFeedbackVector> feedback_vector() const {
return feedback_vector_;
}
+ bool HasVectorAndSlot() const { return FLAG_vector_ics; }
void SetVectorAndSlot(Handle<TypeFeedbackVector> vector,
FeedbackVectorICSlot slot) {
DCHECK(FLAG_vector_ics);
HValue* object() const { return OperandAt(1); }
Handle<Object> name() const { return name_; }
- FeedbackVectorICSlot slot() const {
- DCHECK(FLAG_vector_ics && !slot_.IsInvalid());
- return slot_;
- }
+ FeedbackVectorICSlot slot() const { return slot_; }
Handle<TypeFeedbackVector> feedback_vector() const {
return feedback_vector_;
}
+ bool HasVectorAndSlot() const { return FLAG_vector_ics; }
void SetVectorAndSlot(Handle<TypeFeedbackVector> vector,
FeedbackVectorICSlot slot) {
DCHECK(FLAG_vector_ics);
if (!other->IsLoadKeyed()) return false;
HLoadKeyed* other_load = HLoadKeyed::cast(other);
- if (IsDehoisted() && base_offset() != other_load->base_offset())
- return false;
+ if (base_offset() != other_load->base_offset()) return false;
return elements_kind() == other_load->elements_kind();
}
HValue* object() const { return OperandAt(0); }
HValue* key() const { return OperandAt(1); }
HValue* context() const { return OperandAt(2); }
- FeedbackVectorICSlot slot() const {
- DCHECK(FLAG_vector_ics && !slot_.IsInvalid());
- return slot_;
- }
+ FeedbackVectorICSlot slot() const { return slot_; }
Handle<TypeFeedbackVector> feedback_vector() const {
return feedback_vector_;
}
+ bool HasVectorAndSlot() const { return FLAG_vector_ics; }
void SetVectorAndSlot(Handle<TypeFeedbackVector> vector,
FeedbackVectorICSlot slot) {
DCHECK(FLAG_vector_ics);
if (FLAG_vector_ics) {
Handle<SharedFunctionInfo> current_shared =
function_state()->compilation_info()->shared_info();
- result->SetVectorAndSlot(
- handle(current_shared->feedback_vector(), isolate()),
- expr->AsProperty()->PropertyFeedbackSlot());
+ Handle<TypeFeedbackVector> vector =
+ handle(current_shared->feedback_vector(), isolate());
+ FeedbackVectorICSlot slot = expr->AsProperty()->PropertyFeedbackSlot();
+ result->SetVectorAndSlot(vector, slot);
}
return result;
} else {
if (FLAG_vector_ics) {
Handle<SharedFunctionInfo> current_shared =
function_state()->compilation_info()->shared_info();
- result->SetVectorAndSlot(
- handle(current_shared->feedback_vector(), isolate()),
- expr->AsProperty()->PropertyFeedbackSlot());
+ Handle<TypeFeedbackVector> vector =
+ handle(current_shared->feedback_vector(), isolate());
+ FeedbackVectorICSlot slot = expr->AsProperty()->PropertyFeedbackSlot();
+ result->SetVectorAndSlot(vector, slot);
}
return result;
} else {
HValue* obj, HValue* key, HValue* val, Expression* expr, BailoutId ast_id,
BailoutId return_id, PropertyAccessType access_type,
bool* has_side_effects) {
- if (key->ActualValue()->IsConstant()) {
+ // TODO(mvstanton): This optimization causes trouble for vector-based
+ // KeyedLoadICs, turn it off for now.
+ if (!FLAG_vector_ics && key->ActualValue()->IsConstant()) {
Handle<Object> constant =
HConstant::cast(key->ActualValue())->handle(isolate());
uint32_t array_index;
void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
Label miss;
Register receiver = LoadDescriptor::ReceiverRegister();
+ if (FLAG_vector_ics) {
+ // With careful management, we won't have to save slot and vector on
+ // the stack. Simply handle the possibly missing case first.
+ // TODO(mvstanton): this code can be more efficient.
+ __ cmp(FieldOperand(receiver, JSFunction::kPrototypeOrInitialMapOffset),
+ Immediate(isolate()->factory()->the_hole_value()));
+ __ j(equal, &miss);
+ __ TryGetFunctionPrototype(receiver, eax, ebx, &miss);
+ __ ret(0);
+ } else {
+ NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, eax,
+ ebx, &miss);
+ }
- NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, eax,
- ebx, &miss);
__ bind(&miss);
PropertyAccessCompiler::TailCallBuiltin(
masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
Register receiver = LoadDescriptor::ReceiverRegister();
Register index = LoadDescriptor::NameRegister();
- Register scratch = ebx;
+ Register scratch = edi;
DCHECK(!scratch.is(receiver) && !scratch.is(index));
Register result = eax;
DCHECK(!result.is(scratch));
+ DCHECK(!FLAG_vector_ics ||
+ (!scratch.is(VectorLoadICDescriptor::VectorRegister()) &&
+ result.is(VectorLoadICDescriptor::SlotRegister())));
+ // StringCharAtGenerator doesn't use the result register until it's passed
+ // the different miss possibilities. If it did, we would have a conflict
+ // when FLAG_vector_ics is true.
StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
&miss, // When not a string.
&miss, // When not a number.
void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
DCHECK(FLAG_vector_ics);
Register vector_register = ToRegister(instr->temp_vector());
+ Register slot_register = VectorLoadICDescriptor::SlotRegister();
DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
+ DCHECK(slot_register.is(eax));
+
+ AllowDeferredHandleDereference vector_structure_check;
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ mov(vector_register, vector);
// No need to allocate this register.
- DCHECK(VectorLoadICDescriptor::SlotRegister().is(eax));
- int index = vector->GetIndex(instr->hydrogen()->slot());
- __ mov(VectorLoadICDescriptor::SlotRegister(),
- Immediate(Smi::FromInt(index)));
+ FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ int index = vector->GetIndex(slot);
+ __ mov(slot_register, Immediate(Smi::FromInt(index)));
}
Register name = ToRegister(instr->name());
DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
DCHECK(name.is(LoadDescriptor::NameRegister()));
+ Register slot = FLAG_vector_ics ? ToRegister(instr->slot()) : no_reg;
+ Register vector = FLAG_vector_ics ? ToRegister(instr->vector()) : no_reg;
Register scratch = ebx;
- Register extra = eax;
+ Register extra = edi;
+ DCHECK(!extra.is(slot) && !extra.is(vector));
DCHECK(!scratch.is(receiver) && !scratch.is(name));
DCHECK(!extra.is(receiver) && !extra.is(name));
// Important for the tail-call.
bool must_teardown_frame = NeedsEagerFrame();
- // The probe will tail call to a handler if found.
- isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
- must_teardown_frame, receiver, name,
- scratch, extra);
+ if (!instr->hydrogen()->is_just_miss()) {
+ if (FLAG_vector_ics) {
+ __ push(slot);
+ __ push(vector);
+ }
+
+ // The probe will tail call to a handler if found.
+ // If --vector-ics is on, then it knows to pop the two args first.
+ DCHECK(!instr->hydrogen()->is_keyed_load());
+ isolate()->stub_cache()->GenerateProbe(
+ masm(), Code::LOAD_IC, instr->hydrogen()->flags(), must_teardown_frame,
+ receiver, name, scratch, extra);
+
+ if (FLAG_vector_ics) {
+ __ pop(vector);
+ __ pop(slot);
+ }
+ }
// Tail call to miss if we ended up here.
if (must_teardown_frame) __ leave();
- LoadIC::GenerateMiss(masm());
+ if (instr->hydrogen()->is_keyed_load()) {
+ KeyedLoadIC::GenerateMiss(masm());
+ } else {
+ LoadIC::GenerateMiss(masm());
+ }
}
void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
DCHECK(ToRegister(instr->result()).is(eax));
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+ if (instr->hydrogen()->IsTailCall()) {
+ if (NeedsEagerFrame()) __ leave();
- if (instr->target()->IsConstantOperand()) {
- LConstantOperand* target = LConstantOperand::cast(instr->target());
- Handle<Code> code = Handle<Code>::cast(ToHandle(target));
- generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
- __ call(code, RelocInfo::CODE_TARGET);
+ if (instr->target()->IsConstantOperand()) {
+ LConstantOperand* target = LConstantOperand::cast(instr->target());
+ Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+ __ jmp(code, RelocInfo::CODE_TARGET);
+ } else {
+ DCHECK(instr->target()->IsRegister());
+ Register target = ToRegister(instr->target());
+ __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(target);
+ }
} else {
- DCHECK(instr->target()->IsRegister());
- Register target = ToRegister(instr->target());
- generator.BeforeCall(__ CallSize(Operand(target)));
- __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ call(target);
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+ if (instr->target()->IsConstantOperand()) {
+ LConstantOperand* target = LConstantOperand::cast(instr->target());
+ Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+ generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
+ __ call(code, RelocInfo::CODE_TARGET);
+ } else {
+ DCHECK(instr->target()->IsRegister());
+ Register target = ToRegister(instr->target());
+ generator.BeforeCall(__ CallSize(Operand(target)));
+ __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ call(target);
+ }
+ generator.AfterCall();
}
- generator.AfterCall();
}
UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
LOperand* name_register =
UseFixed(instr->name(), LoadDescriptor::NameRegister());
+ LOperand* slot = NULL;
+ LOperand* vector = NULL;
+ if (FLAG_vector_ics) {
+ slot = UseFixed(instr->slot(), VectorLoadICDescriptor::SlotRegister());
+ vector =
+ UseFixed(instr->vector(), VectorLoadICDescriptor::VectorRegister());
+ }
+
// Not marked as call. It can't deoptimize, and it never returns.
return new (zone()) LTailCallThroughMegamorphicCache(
- context, receiver_register, name_register);
+ context, receiver_register, name_register, slot, vector);
}
LOperand* global_object =
UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
- if (FLAG_vector_ics) {
+ if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
LOperand* object =
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
- if (FLAG_vector_ics) {
+ if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
LOperand* vector = NULL;
- if (FLAG_vector_ics) {
+ if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
LLoadKeyedGeneric* result =
class LTailCallThroughMegamorphicCache FINAL
- : public LTemplateInstruction<0, 3, 0> {
+ : public LTemplateInstruction<0, 5, 0> {
public:
- explicit LTailCallThroughMegamorphicCache(LOperand* context,
- LOperand* receiver,
- LOperand* name) {
+ LTailCallThroughMegamorphicCache(LOperand* context, LOperand* receiver,
+ LOperand* name, LOperand* slot,
+ LOperand* vector) {
inputs_[0] = context;
inputs_[1] = receiver;
inputs_[2] = name;
+ inputs_[3] = slot;
+ inputs_[4] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* receiver() { return inputs_[1]; }
LOperand* name() { return inputs_[2]; }
+ LOperand* slot() { return inputs_[3]; }
+ LOperand* vector() { return inputs_[4]; }
DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
"tail-call-through-megamorphic-cache")
Register receiver() const { return registers_[0]; }
Register name() const { return registers_[1]; }
+ Register slot() const {
+ DCHECK(FLAG_vector_ics);
+ return VectorLoadICDescriptor::SlotRegister();
+ }
+ Register vector() const {
+ DCHECK(FLAG_vector_ics);
+ return VectorLoadICDescriptor::VectorRegister();
+ }
Register scratch1() const { return registers_[2]; }
Register scratch2() const { return registers_[3]; }
Register scratch3() const { return registers_[4]; }
}
+void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
+ Register slot) {
+ MacroAssembler* masm = this->masm();
+ __ push(vector);
+ __ push(slot);
+}
+
+
+void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
+ MacroAssembler* masm = this->masm();
+ __ pop(slot);
+ __ pop(vector);
+}
+
+
+void PropertyHandlerCompiler::DiscardVectorAndSlot() {
+ MacroAssembler* masm = this->masm();
+ // Remove vector and slot.
+ __ add(sp, sp, Operand(2 * kPointerSize));
+}
+
+
void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
MacroAssembler* masm, Label* miss_label, Register receiver,
Handle<Name> name, Register scratch0, Register scratch1) {
Label success;
__ b(&success);
__ bind(miss);
+ if (IC::ICUseVector(kind())) {
+ DCHECK(kind() == Code::LOAD_IC);
+ PopVectorAndSlot();
+ }
TailCallBuiltin(masm(), MissBuiltin(kind()));
__ bind(&success);
}
} else {
__ Push(holder_reg, this->name());
}
+ InterceptorVectorSlotPush(holder_reg);
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
// of this method.)
__ Ret();
__ bind(&interceptor_failed);
+ InterceptorVectorSlotPop(holder_reg);
__ pop(this->name());
__ pop(holder_reg);
if (must_preserve_receiver_reg) {
Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
Handle<JSObject> object, Handle<Name> name,
Handle<ExecutableAccessorInfo> callback) {
- Register holder_reg = Frontend(receiver(), name);
+ Register holder_reg = Frontend(name);
__ push(receiver()); // receiver
__ push(holder_reg);
Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
Label miss;
+ if (IC::ICUseVector(kind())) {
+ PushVectorAndSlot();
+ }
FrontendHeader(receiver(), name, &miss);
// Get the value from the cell.
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_load_global_stub(), 1, r1, r3);
+ if (IC::ICUseVector(kind())) {
+ DiscardVectorAndSlot();
+ }
__ Ret();
FrontendFooter(name, &miss);
// The return address is in lr.
Isolate* isolate = masm->isolate();
- __ IncrementCounter(isolate->counters()->load_miss(), 1, r3, r4);
+ DCHECK(!FLAG_vector_ics ||
+ !AreAliased(r4, r5, VectorLoadICDescriptor::SlotRegister(),
+ VectorLoadICDescriptor::VectorRegister()));
+ __ IncrementCounter(isolate->counters()->load_miss(), 1, r4, r5);
LoadIC_PushArgs(masm);
// The return address is in lr.
Isolate* isolate = masm->isolate();
- __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, r3, r4);
+ DCHECK(!FLAG_vector_ics ||
+ !AreAliased(r4, r5, VectorLoadICDescriptor::SlotRegister(),
+ VectorLoadICDescriptor::VectorRegister()));
+ __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, r4, r5);
LoadIC_PushArgs(masm);
__ JumpIfNotUniqueNameInstanceType(r4, &slow);
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
- key, r3, r4, r5, r6);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, Code::STORE_IC, flags, false, receiver, key, r3, r4, r5, r6);
// Cache miss.
__ b(&miss);
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
- name, r3, r4, r5, r6);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, Code::STORE_IC, flags, false, receiver, name, r3, r4, r5, r6);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
if (check == PROPERTY &&
(kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
- // In case we are compiling an IC for dictionary loads and stores, just
+ // In case we are compiling an IC for dictionary loads or stores, just
// check whether the name is unique.
if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
+ // Keyed loads with dictionaries shouldn't be here, they go generic.
+ // The DCHECK is to protect assumptions when --vector-ics is on.
+ DCHECK(kind() != Code::KEYED_LOAD_IC);
Register tmp = scratch1();
__ JumpIfSmi(this->name(), &miss);
__ ldr(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset));
#if V8_TARGET_ARCH_ARM
#include "src/codegen.h"
+#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
+#include "src/interface-descriptors.h"
namespace v8 {
namespace internal {
static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
- Code::Flags flags, bool leave_frame,
+ Code::Kind ic_kind, Code::Flags flags, bool leave_frame,
StubCache::Table table, Register receiver, Register name,
// Number of the cache entry, not scaled.
Register offset, Register scratch, Register scratch2,
}
-void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
- bool leave_frame, Register receiver,
- Register name, Register scratch, Register extra,
- Register extra2, Register extra3) {
+void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
+ Code::Flags flags, bool leave_frame,
+ Register receiver, Register name,
+ Register scratch, Register extra, Register extra2,
+ Register extra3) {
Isolate* isolate = masm->isolate();
Label miss;
DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
// Make sure that there are no register conflicts.
- DCHECK(!scratch.is(receiver));
- DCHECK(!scratch.is(name));
- DCHECK(!extra.is(receiver));
- DCHECK(!extra.is(name));
- DCHECK(!extra.is(scratch));
- DCHECK(!extra2.is(receiver));
- DCHECK(!extra2.is(name));
- DCHECK(!extra2.is(scratch));
- DCHECK(!extra2.is(extra));
+ DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
// Check scratch, extra and extra2 registers are valid.
DCHECK(!scratch.is(no_reg));
DCHECK(!extra2.is(no_reg));
DCHECK(!extra3.is(no_reg));
+#ifdef DEBUG
+ // If vector-based ics are in use, ensure that scratch, extra, extra2 and
+ // extra3 don't conflict with the vector and slot registers, which need
+ // to be preserved for a handler call or miss.
+ if (IC::ICUseVector(ic_kind)) {
+ Register vector = VectorLoadICDescriptor::VectorRegister();
+ Register slot = VectorLoadICDescriptor::SlotRegister();
+ DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
+ }
+#endif
+
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
extra3);
__ and_(scratch, scratch, Operand(mask));
// Probe the primary table.
- ProbeTable(isolate, masm, flags, leave_frame, kPrimary, receiver, name,
- scratch, extra, extra2, extra3);
+ ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kPrimary, receiver,
+ name, scratch, extra, extra2, extra3);
// Primary miss: Compute hash for secondary probe.
__ sub(scratch, scratch, Operand(name, LSR, kCacheIndexShift));
__ and_(scratch, scratch, Operand(mask2));
// Probe the secondary table.
- ProbeTable(isolate, masm, flags, leave_frame, kSecondary, receiver, name,
- scratch, extra, extra2, extra3);
+ ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kSecondary, receiver,
+ name, scratch, extra, extra2, extra3);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
#define __ ACCESS_MASM(masm)
+void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
+ Register slot) {
+ MacroAssembler* masm = this->masm();
+ __ Push(vector);
+ __ Push(slot);
+}
+
+
+void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
+ MacroAssembler* masm = this->masm();
+ __ Pop(slot);
+ __ Pop(vector);
+}
+
+
+void PropertyHandlerCompiler::DiscardVectorAndSlot() {
+ MacroAssembler* masm = this->masm();
+ // Remove vector and slot.
+ __ Drop(2);
+}
+
void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
MacroAssembler* masm, Label* miss_label, Register receiver,
Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
Label miss;
+ if (IC::ICUseVector(kind())) {
+ PushVectorAndSlot();
+ }
FrontendHeader(receiver(), name, &miss);
// Get the value from the cell.
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_load_global_stub(), 1, x1, x3);
+ if (IC::ICUseVector(kind())) {
+ DiscardVectorAndSlot();
+ }
__ Ret();
FrontendFooter(name, &miss);
__ B(&success);
__ Bind(miss);
+ if (IC::ICUseVector(kind())) {
+ DCHECK(kind() == Code::LOAD_IC);
+ PopVectorAndSlot();
+ }
TailCallBuiltin(masm(), MissBuiltin(kind()));
__ Bind(&success);
} else {
__ Push(holder_reg, this->name());
}
+ InterceptorVectorSlotPush(holder_reg);
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
// of this method.)
__ Ret();
__ Bind(&interceptor_failed);
+ InterceptorVectorSlotPop(holder_reg);
if (must_preserve_receiver_reg) {
__ Pop(this->name(), holder_reg, receiver());
} else {
Handle<JSObject> object, Handle<Name> name,
Handle<ExecutableAccessorInfo> callback) {
ASM_LOCATION("NamedStoreHandlerCompiler::CompileStoreCallback");
- Register holder_reg = Frontend(receiver(), name);
+ Register holder_reg = Frontend(name);
// Stub never generated for non-global objects that require access checks.
DCHECK(holder()->IsJSGlobalProxy() || !holder()->IsAccessCheckNeeded());
Isolate* isolate = masm->isolate();
ASM_LOCATION("LoadIC::GenerateMiss");
- __ IncrementCounter(isolate->counters()->load_miss(), 1, x3, x4);
+ DCHECK(!FLAG_vector_ics ||
+ !AreAliased(x4, x5, VectorLoadICDescriptor::SlotRegister(),
+ VectorLoadICDescriptor::VectorRegister()));
+ __ IncrementCounter(isolate->counters()->load_miss(), 1, x4, x5);
// Perform tail call to the entry.
if (FLAG_vector_ics) {
// The return address is in lr.
Isolate* isolate = masm->isolate();
+ DCHECK(!FLAG_vector_ics ||
+ !AreAliased(x10, x11, VectorLoadICDescriptor::SlotRegister(),
+ VectorLoadICDescriptor::VectorRegister()));
__ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, x10, x11);
if (FLAG_vector_ics) {
__ JumpIfNotUniqueNameInstanceType(x10, &slow);
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
- key, x3, x4, x5, x6);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, Code::STORE_IC, flags, false, receiver, key, x3, x4, x5, x6);
// Cache miss.
__ B(&miss);
// Probe the stub cache.
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
- name, x3, x4, x5, x6);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, Code::STORE_IC, flags, false, receiver, name, x3, x4, x5, x6);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
if (check == PROPERTY &&
(kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
- // In case we are compiling an IC for dictionary loads and stores, just
+ // In case we are compiling an IC for dictionary loads or stores, just
// check whether the name is unique.
if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
+ // Keyed loads with dictionaries shouldn't be here, they go generic.
+ // The DCHECK is to protect assumptions when --vector-ics is on.
+ DCHECK(kind() != Code::KEYED_LOAD_IC);
Register tmp = scratch1();
__ JumpIfSmi(this->name(), &miss);
__ Ldr(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset));
#if V8_TARGET_ARCH_ARM64
#include "src/codegen.h"
+#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
+#include "src/interface-descriptors.h"
namespace v8 {
namespace internal {
//
// 'receiver', 'name' and 'offset' registers are preserved on miss.
static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
- Code::Flags flags, bool leave_frame,
+ Code::Kind ic_kind, Code::Flags flags, bool leave_frame,
StubCache::Table table, Register receiver, Register name,
Register offset, Register scratch, Register scratch2,
Register scratch3) {
}
-void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
- bool leave_frame, Register receiver,
- Register name, Register scratch, Register extra,
- Register extra2, Register extra3) {
+void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
+ Code::Flags flags, bool leave_frame,
+ Register receiver, Register name,
+ Register scratch, Register extra, Register extra2,
+ Register extra3) {
Isolate* isolate = masm->isolate();
Label miss;
DCHECK(!extra2.is(no_reg));
DCHECK(!extra3.is(no_reg));
+#ifdef DEBUG
+ // If vector-based ics are in use, ensure that scratch, extra, extra2 and
+ // extra3 don't conflict with the vector and slot registers, which need
+ // to be preserved for a handler call or miss.
+ if (IC::ICUseVector(ic_kind)) {
+ Register vector = VectorLoadICDescriptor::VectorRegister();
+ Register slot = VectorLoadICDescriptor::SlotRegister();
+ DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
+ }
+#endif
+
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
extra3);
CountTrailingZeros(kPrimaryTableSize, 64));
// Probe the primary table.
- ProbeTable(isolate, masm, flags, leave_frame, kPrimary, receiver, name,
- scratch, extra, extra2, extra3);
+ ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kPrimary, receiver,
+ name, scratch, extra, extra2, extra3);
// Primary miss: Compute hash for secondary table.
__ Sub(scratch, scratch, Operand(name, LSR, kCacheIndexShift));
__ And(scratch, scratch, kSecondaryTableSize - 1);
// Probe the secondary table.
- ProbeTable(isolate, masm, flags, leave_frame, kSecondary, receiver, name,
- scratch, extra, extra2, extra3);
+ ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kSecondary, receiver,
+ name, scratch, extra, extra2, extra3);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
}
-Register PropertyHandlerCompiler::Frontend(Register object_reg,
- Handle<Name> name) {
+Register PropertyHandlerCompiler::Frontend(Handle<Name> name) {
Label miss;
- Register reg = FrontendHeader(object_reg, name, &miss);
+ if (IC::ICUseVector(kind())) {
+ PushVectorAndSlot();
+ }
+ Register reg = FrontendHeader(receiver(), name, &miss);
FrontendFooter(name, &miss);
+ // The footer consumes the vector and slot from the stack if miss occurs.
+ if (IC::ICUseVector(kind())) {
+ DiscardVectorAndSlot();
+ }
return reg;
}
Handle<Code> NamedLoadHandlerCompiler::CompileLoadField(Handle<Name> name,
FieldIndex field) {
- Register reg = Frontend(receiver(), name);
+ Register reg = Frontend(name);
__ Move(receiver(), reg);
LoadFieldStub stub(isolate(), field);
GenerateTailCall(masm(), stub.GetCode());
Handle<Code> NamedLoadHandlerCompiler::CompileLoadConstant(Handle<Name> name,
int constant_index) {
- Register reg = Frontend(receiver(), name);
+ Register reg = Frontend(name);
__ Move(receiver(), reg);
LoadConstantStub stub(isolate(), constant_index);
GenerateTailCall(masm(), stub.GetCode());
Handle<Code> NamedLoadHandlerCompiler::CompileLoadNonexistent(
Handle<Name> name) {
Label miss;
+ if (IC::ICUseVector(kind())) {
+ DCHECK(kind() == Code::LOAD_IC);
+ PushVectorAndSlot();
+ }
NonexistentFrontendHeader(name, &miss, scratch2(), scratch3());
+ if (IC::ICUseVector(kind())) {
+ DiscardVectorAndSlot();
+ }
GenerateLoadConstant(isolate()->factory()->undefined_value());
FrontendFooter(name, &miss);
return GetCode(kind(), Code::FAST, name);
Handle<Code> NamedLoadHandlerCompiler::CompileLoadCallback(
Handle<Name> name, Handle<ExecutableAccessorInfo> callback) {
- Register reg = Frontend(receiver(), name);
+ Register reg = Frontend(name);
GenerateLoadCallback(reg, callback);
return GetCode(kind(), Code::FAST, name);
}
Handle<Code> NamedLoadHandlerCompiler::CompileLoadCallback(
Handle<Name> name, const CallOptimization& call_optimization) {
DCHECK(call_optimization.is_simple_api_call());
- Frontend(receiver(), name);
+ Frontend(name);
Handle<Map> receiver_map = IC::TypeToMap(*type(), isolate());
GenerateFastApiCall(masm(), call_optimization, receiver_map, receiver(),
scratch1(), false, 0, NULL);
}
+void NamedLoadHandlerCompiler::InterceptorVectorSlotPush(Register holder_reg) {
+ if (IC::ICUseVector(kind())) {
+ if (holder_reg.is(receiver())) {
+ PushVectorAndSlot();
+ } else {
+ DCHECK(holder_reg.is(scratch1()));
+ PushVectorAndSlot(scratch2(), scratch3());
+ }
+ }
+}
+
+
+void NamedLoadHandlerCompiler::InterceptorVectorSlotPop(Register holder_reg,
+ PopMode mode) {
+ if (IC::ICUseVector(kind())) {
+ if (mode == DISCARD) {
+ DiscardVectorAndSlot();
+ } else {
+ if (holder_reg.is(receiver())) {
+ PopVectorAndSlot();
+ } else {
+ DCHECK(holder_reg.is(scratch1()));
+ PopVectorAndSlot(scratch2(), scratch3());
+ }
+ }
+ }
+}
+
+
Handle<Code> NamedLoadHandlerCompiler::CompileLoadInterceptor(
LookupIterator* it) {
// So far the most popular follow ups for interceptor loads are FIELD and
}
}
- Register reg = Frontend(receiver(), it->name());
+ Label miss;
+ InterceptorVectorSlotPush(receiver());
+ Register reg = FrontendHeader(receiver(), it->name(), &miss);
+ FrontendFooter(it->name(), &miss);
+ InterceptorVectorSlotPop(reg);
+
if (inline_followup) {
// TODO(368): Compile in the whole chain: all the interceptors in
// prototypes and ultimate answer.
set_type_for_object(holder());
set_holder(real_named_property_holder);
- Register reg = Frontend(interceptor_reg, it->name());
+
+ Label miss;
+ InterceptorVectorSlotPush(interceptor_reg);
+ Register reg = FrontendHeader(interceptor_reg, it->name(), &miss);
+ FrontendFooter(it->name(), &miss);
+ // We discard the vector and slot now because we don't miss below this point.
+ InterceptorVectorSlotPop(reg, DISCARD);
switch (it->state()) {
case LookupIterator::ACCESS_CHECK:
Handle<Code> NamedLoadHandlerCompiler::CompileLoadViaGetter(
Handle<Name> name, Handle<JSFunction> getter) {
- Frontend(receiver(), name);
+ Frontend(name);
GenerateLoadViaGetter(masm(), type(), receiver(), getter);
return GetCode(kind(), Code::FAST, name);
}
Handle<Code> NamedStoreHandlerCompiler::CompileStoreViaSetter(
Handle<JSObject> object, Handle<Name> name, Handle<JSFunction> setter) {
- Frontend(receiver(), name);
+ Frontend(name);
GenerateStoreViaSetter(masm(), type(), receiver(), setter);
return GetCode(kind(), Code::FAST, name);
Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
Handle<JSObject> object, Handle<Name> name,
const CallOptimization& call_optimization) {
- Frontend(receiver(), name);
+ Frontend(name);
Register values[] = {value()};
GenerateFastApiCall(masm(), call_optimization, handle(object->map()),
receiver(), scratch1(), true, 1, values);
virtual void FrontendFooter(Handle<Name> name, Label* miss) { UNREACHABLE(); }
- Register Frontend(Register object_reg, Handle<Name> name);
+ // Frontend loads from receiver(), returns holder register which may be
+ // different.
+ Register Frontend(Handle<Name> name);
void NonexistentFrontendHeader(Handle<Name> name, Label* miss,
Register scratch1, Register scratch2);
+ // When FLAG_vector_ics is true, handlers that have the possibility of missing
+ // will need to save and pass these to miss handlers.
+ void PushVectorAndSlot() { PushVectorAndSlot(vector(), slot()); }
+ void PushVectorAndSlot(Register vector, Register slot);
+ void PopVectorAndSlot() { PopVectorAndSlot(vector(), slot()); }
+ void PopVectorAndSlot(Register vector, Register slot);
+
+ void DiscardVectorAndSlot();
+
// TODO(verwaest): Make non-static.
static void GenerateFastApiCall(MacroAssembler* masm,
const CallOptimization& optimization,
Handle<ExecutableAccessorInfo> callback);
void GenerateLoadCallback(const CallOptimization& call_optimization,
Handle<Map> receiver_map);
+
+ // Helper emits no code if vector-ics are disabled.
+ void InterceptorVectorSlotPush(Register holder_reg);
+ enum PopMode { POP, DISCARD };
+ void InterceptorVectorSlotPop(Register holder_reg, PopMode mode = POP);
+
void GenerateLoadInterceptor(Register holder_reg);
void GenerateLoadInterceptorWithFollowup(LookupIterator* it,
Register holder_reg);
}
+void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
+ Register slot) {
+ MacroAssembler* masm = this->masm();
+ __ push(vector);
+ __ push(slot);
+}
+
+
+void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
+ MacroAssembler* masm = this->masm();
+ __ pop(slot);
+ __ pop(vector);
+}
+
+
+void PropertyHandlerCompiler::DiscardVectorAndSlot() {
+ MacroAssembler* masm = this->masm();
+ // Remove vector and slot.
+ __ add(esp, Immediate(2 * kPointerSize));
+}
+
+
void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
MacroAssembler* masm, Label* miss_label, Register receiver,
Handle<Name> name, Register scratch0, Register scratch1) {
void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
MacroAssembler* masm, Register receiver, Register scratch1,
Register scratch2, Label* miss_label) {
+ DCHECK(!FLAG_vector_ics);
__ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
__ mov(eax, scratch1);
__ ret(0);
Label success;
__ jmp(&success);
__ bind(miss);
+ if (IC::ICUseVector(kind())) {
+ DCHECK(kind() == Code::LOAD_IC);
+ PopVectorAndSlot();
+ }
TailCallBuiltin(masm(), MissBuiltin(kind()));
__ bind(&success);
}
}
__ push(holder_reg);
__ push(this->name());
-
+ InterceptorVectorSlotPush(holder_reg);
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
// of this method.)
__ mov(this->name(), Immediate(bit_cast<int32_t>(kZapValue)));
}
+ InterceptorVectorSlotPop(holder_reg);
__ pop(this->name());
__ pop(holder_reg);
if (must_preserve_receiver_reg) {
Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
Handle<JSObject> object, Handle<Name> name,
Handle<ExecutableAccessorInfo> callback) {
- Register holder_reg = Frontend(receiver(), name);
+ Register holder_reg = Frontend(name);
__ pop(scratch1()); // remove the return address
__ push(receiver());
Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
Label miss;
-
+ if (IC::ICUseVector(kind())) {
+ PushVectorAndSlot();
+ }
FrontendHeader(receiver(), name, &miss);
// Get the value from the cell.
Register result = StoreDescriptor::ValueRegister();
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_load_global_stub(), 1);
// The code above already loads the result into the return register.
+ if (IC::ICUseVector(kind())) {
+ DiscardVectorAndSlot();
+ }
__ ret(0);
FrontendFooter(name, &miss);
Label miss;
if (check == PROPERTY &&
- (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
- // In case we are compiling an IC for dictionary loads and stores, just
+ (kind() == Code::KEYED_STORE_IC || kind() == Code::KEYED_LOAD_IC)) {
+ // In case we are compiling an IC for dictionary loads or stores, just
// check whether the name is unique.
if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
+ // Keyed loads with dictionaries shouldn't be here, they go generic.
+ // The DCHECK is to protect assumptions when --vector-ics is on.
+ DCHECK(kind() != Code::KEYED_LOAD_IC);
Register tmp = scratch1();
__ JumpIfSmi(this->name(), &miss);
__ mov(tmp, FieldOperand(this->name(), HeapObject::kMapOffset));
__ JumpIfNotUniqueNameInstanceType(ebx, &slow);
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
- key, ebx, no_reg);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, Code::STORE_IC, flags, false, receiver, key, ebx, no_reg);
// Cache miss.
__ jmp(&miss);
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, false, StoreDescriptor::ReceiverRegister(),
+ masm, Code::STORE_IC, flags, false, StoreDescriptor::ReceiverRegister(),
StoreDescriptor::NameRegister(), ebx, no_reg);
// Cache miss: Jump to runtime.
#if V8_TARGET_ARCH_IA32
#include "src/codegen.h"
+#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
+#include "src/interface-descriptors.h"
namespace v8 {
namespace internal {
static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
- Code::Flags flags, bool leave_frame,
+ Code::Kind ic_kind, Code::Flags flags, bool leave_frame,
StubCache::Table table, Register name, Register receiver,
// Number of the cache entry pointer-size scaled.
Register offset, Register extra) {
}
#endif
+ if (IC::ICUseVector(ic_kind)) {
+ // The vector and slot were pushed onto the stack before starting the
+ // probe, and need to be dropped before calling the handler.
+ __ pop(VectorLoadICDescriptor::VectorRegister());
+ __ pop(VectorLoadICDescriptor::SlotRegister());
+ }
+
if (leave_frame) __ leave();
// Jump to the first instruction in the code stub.
__ pop(offset);
__ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
+ if (IC::ICUseVector(ic_kind)) {
+ // The vector and slot were pushed onto the stack before starting the
+ // probe, and need to be dropped before calling the handler.
+ Register vector = VectorLoadICDescriptor::VectorRegister();
+ Register slot = VectorLoadICDescriptor::SlotRegister();
+ DCHECK(!offset.is(vector) && !offset.is(slot));
+
+ __ pop(vector);
+ __ pop(slot);
+ }
+
if (leave_frame) __ leave();
// Jump to the first instruction in the code stub.
}
-void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
- bool leave_frame, Register receiver,
- Register name, Register scratch, Register extra,
- Register extra2, Register extra3) {
+void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
+ Code::Flags flags, bool leave_frame,
+ Register receiver, Register name,
+ Register scratch, Register extra, Register extra2,
+ Register extra3) {
Label miss;
// Assert that code is valid. The multiplying code relies on the entry size
DCHECK(kCacheIndexShift == kPointerSizeLog2);
// Probe the primary table.
- ProbeTable(isolate(), masm, flags, leave_frame, kPrimary, name, receiver,
- offset, extra);
+ ProbeTable(isolate(), masm, ic_kind, flags, leave_frame, kPrimary, name,
+ receiver, offset, extra);
// Primary miss: Compute hash for secondary probe.
__ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
__ and_(offset, (kSecondaryTableSize - 1) << kCacheIndexShift);
// Probe the secondary table.
- ProbeTable(isolate(), masm, flags, leave_frame, kSecondary, name, receiver,
- offset, extra);
+ ProbeTable(isolate(), masm, ic_kind, flags, leave_frame, kSecondary, name,
+ receiver, offset, extra);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
nexus->ConfigureGeneric();
} else if (new_state == PREMONOMORPHIC) {
nexus->ConfigurePremonomorphic();
- } else if (new_state == MEGAMORPHIC) {
- nexus->ConfigureMegamorphic();
} else {
UNREACHABLE();
}
CopyICToMegamorphicCache(name);
}
if (UseVector()) {
- ConfigureVectorState(MEGAMORPHIC);
+ ConfigureVectorState(kind() == Code::KEYED_LOAD_IC ? GENERIC
+ : MEGAMORPHIC);
} else {
set_target(*megamorphic_stub());
}
Handle<Smi> slot = args.at<Smi>(2);
Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(3);
FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
- LoadICNexus nexus(vector, vector_slot);
- LoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
- ic.UpdateState(receiver, key);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
+ // A monomorphic or polymorphic KeyedLoadIC with a string key can call the
+ // LoadIC miss handler if the handler misses. Since the vector Nexus is
+ // set up outside the IC, handle that here.
+ if (vector->GetKind(vector_slot) == Code::LOAD_IC) {
+ LoadICNexus nexus(vector, vector_slot);
+ LoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+ ic.UpdateState(receiver, key);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ ic.Load(receiver, key));
+ } else {
+ DCHECK(vector->GetKind(vector_slot) == Code::KEYED_LOAD_IC);
+ KeyedLoadICNexus nexus(vector, vector_slot);
+ KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+ ic.UpdateState(receiver, key);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ ic.Load(receiver, key));
+ }
} else {
DCHECK(args.length() == 2);
LoadIC ic(IC::NO_EXTRA_FRAME, isolate);
Handle<Smi> slot = args.at<Smi>(2);
Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(3);
FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
- LoadICNexus nexus(vector, vector_slot);
- LoadIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
- ic.UpdateState(receiver, key);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
+ // A monomorphic or polymorphic KeyedLoadIC with a string key can call the
+ // LoadIC miss handler if the handler misses. Since the vector Nexus is
+ // set up outside the IC, handle that here.
+ if (vector->GetKind(vector_slot) == Code::LOAD_IC) {
+ LoadICNexus nexus(vector, vector_slot);
+ LoadIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
+ ic.UpdateState(receiver, key);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ ic.Load(receiver, key));
+ } else {
+ DCHECK(vector->GetKind(vector_slot) == Code::KEYED_LOAD_IC);
+ KeyedLoadICNexus nexus(vector, vector_slot);
+ KeyedLoadIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
+ ic.UpdateState(receiver, key);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ ic.Load(receiver, key));
+ }
} else {
DCHECK(args.length() == 2);
LoadIC ic(IC::EXTRA_CALL_FRAME, isolate);
static Handle<HeapType> CurrentTypeOf(Handle<Object> object,
Isolate* isolate);
+ static bool ICUseVector(Code::Kind kind) {
+ return (FLAG_vector_ics &&
+ (kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC)) ||
+ kind == Code::CALL_IC;
+ }
+
protected:
// Get the call-site target; used for determining the state.
Handle<Code> target() const { return target_; }
inline void set_target(Code* code);
bool is_target_set() { return target_set_; }
- static bool ICUseVector(Code::Kind kind) {
- return (FLAG_vector_ics &&
- (kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC)) ||
- kind == Code::CALL_IC;
- }
-
bool UseVector() const {
bool use = ICUseVector(kind());
// If we are supposed to use the nexus, verify the nexus is non-null.
}
+void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
+ Register slot) {
+ MacroAssembler* masm = this->masm();
+ __ Push(vector, slot);
+}
+
+
+void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
+ MacroAssembler* masm = this->masm();
+ __ Pop(vector, slot);
+}
+
+
+void PropertyHandlerCompiler::DiscardVectorAndSlot() {
+ MacroAssembler* masm = this->masm();
+ // Remove vector and slot.
+ __ Addu(sp, sp, Operand(2 * kPointerSize));
+}
+
+
void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
MacroAssembler* masm, Label* miss_label, Register receiver,
Handle<Name> name, Register scratch0, Register scratch1) {
Label success;
__ Branch(&success);
__ bind(miss);
+ if (IC::ICUseVector(kind())) {
+ DCHECK(kind() == Code::LOAD_IC);
+ PopVectorAndSlot();
+ }
TailCallBuiltin(masm(), MissBuiltin(kind()));
__ bind(&success);
}
} else {
__ Push(holder_reg, this->name());
}
+ InterceptorVectorSlotPush(holder_reg);
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
// of this method).
__ Ret();
__ bind(&interceptor_failed);
+ InterceptorVectorSlotPop(holder_reg);
if (must_preserve_receiver_reg) {
__ Pop(receiver(), holder_reg, this->name());
} else {
Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
Handle<JSObject> object, Handle<Name> name,
Handle<ExecutableAccessorInfo> callback) {
- Register holder_reg = Frontend(receiver(), name);
+ Register holder_reg = Frontend(name);
__ Push(receiver(), holder_reg); // Receiver.
__ li(at, Operand(callback)); // Callback info.
Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
Label miss;
+ if (IC::ICUseVector(kind())) {
+ PushVectorAndSlot();
+ }
FrontendHeader(receiver(), name, &miss);
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_load_global_stub(), 1, a1, a3);
+ if (IC::ICUseVector(kind())) {
+ DiscardVectorAndSlot();
+ }
__ Ret(USE_DELAY_SLOT);
__ mov(v0, result);
if (check == PROPERTY &&
(kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
- // In case we are compiling an IC for dictionary loads and stores, just
+ // In case we are compiling an IC for dictionary loads or stores, just
// check whether the name is unique.
if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
+ // Keyed loads with dictionaries shouldn't be here, they go generic.
+ // The DCHECK is to protect assumptions when --vector-ics is on.
+ DCHECK(kind() != Code::KEYED_LOAD_IC);
Register tmp = scratch1();
__ JumpIfSmi(this->name(), &miss);
__ lw(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset));
// The return address is in ra.
Isolate* isolate = masm->isolate();
- __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, t0);
+ DCHECK(!FLAG_vector_ics ||
+ !AreAliased(t0, t1, VectorLoadICDescriptor::SlotRegister(),
+ VectorLoadICDescriptor::VectorRegister()));
+ __ IncrementCounter(isolate->counters()->load_miss(), 1, t0, t1);
LoadIC_PushArgs(masm);
// The return address is in ra.
Isolate* isolate = masm->isolate();
- __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, t0);
+ DCHECK(!FLAG_vector_ics ||
+ !AreAliased(t0, t1, VectorLoadICDescriptor::SlotRegister(),
+ VectorLoadICDescriptor::VectorRegister()));
+ __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, t0, t1);
LoadIC_PushArgs(masm);
__ JumpIfNotUniqueNameInstanceType(t0, &slow);
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
- key, a3, t0, t1, t2);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, Code::STORE_IC, flags, false, receiver, key, a3, t0, t1, t2);
// Cache miss.
__ Branch(&miss);
// Get the receiver from the stack and probe the stub cache.
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
- name, a3, t0, t1, t2);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, Code::STORE_IC, flags, false, receiver, name, a3, t0, t1, t2);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
#if V8_TARGET_ARCH_MIPS
#include "src/codegen.h"
+#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
+#include "src/interface-descriptors.h"
namespace v8 {
namespace internal {
static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
- Code::Flags flags, bool leave_frame,
+ Code::Kind ic_kind, Code::Flags flags, bool leave_frame,
StubCache::Table table, Register receiver, Register name,
// Number of the cache entry, not scaled.
Register offset, Register scratch, Register scratch2,
}
-void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
- bool leave_frame, Register receiver,
- Register name, Register scratch, Register extra,
- Register extra2, Register extra3) {
+void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
+ Code::Flags flags, bool leave_frame,
+ Register receiver, Register name,
+ Register scratch, Register extra, Register extra2,
+ Register extra3) {
Isolate* isolate = masm->isolate();
Label miss;
DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
// Make sure that there are no register conflicts.
- DCHECK(!scratch.is(receiver));
- DCHECK(!scratch.is(name));
- DCHECK(!extra.is(receiver));
- DCHECK(!extra.is(name));
- DCHECK(!extra.is(scratch));
- DCHECK(!extra2.is(receiver));
- DCHECK(!extra2.is(name));
- DCHECK(!extra2.is(scratch));
- DCHECK(!extra2.is(extra));
+ DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
// Check register validity.
DCHECK(!scratch.is(no_reg));
DCHECK(!extra2.is(no_reg));
DCHECK(!extra3.is(no_reg));
+#ifdef DEBUG
+ // If vector-based ics are in use, ensure that scratch, extra, extra2 and
+ // extra3 don't conflict with the vector and slot registers, which need
+ // to be preserved for a handler call or miss.
+ if (IC::ICUseVector(ic_kind)) {
+ Register vector = VectorLoadICDescriptor::VectorRegister();
+ Register slot = VectorLoadICDescriptor::SlotRegister();
+ DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
+ }
+#endif
+
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
extra3);
__ And(scratch, scratch, Operand(mask));
// Probe the primary table.
- ProbeTable(isolate, masm, flags, leave_frame, kPrimary, receiver, name,
- scratch, extra, extra2, extra3);
+ ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kPrimary, receiver,
+ name, scratch, extra, extra2, extra3);
// Primary miss: Compute hash for secondary probe.
__ srl(at, name, kCacheIndexShift);
__ And(scratch, scratch, Operand(mask2));
// Probe the secondary table.
- ProbeTable(isolate, masm, flags, leave_frame, kSecondary, receiver, name,
- scratch, extra, extra2, extra3);
+ ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kSecondary, receiver,
+ name, scratch, extra, extra2, extra3);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
}
+void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
+ Register slot) {
+ MacroAssembler* masm = this->masm();
+ __ Push(vector, slot);
+}
+
+
+void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
+ MacroAssembler* masm = this->masm();
+ __ Pop(vector, slot);
+}
+
+
+void PropertyHandlerCompiler::DiscardVectorAndSlot() {
+ MacroAssembler* masm = this->masm();
+ // Remove vector and slot.
+ __ Daddu(sp, sp, Operand(2 * kPointerSize));
+}
+
+
void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
MacroAssembler* masm, Label* miss_label, Register receiver,
Handle<Name> name, Register scratch0, Register scratch1) {
Label success;
__ Branch(&success);
__ bind(miss);
+ if (IC::ICUseVector(kind())) {
+ DCHECK(kind() == Code::LOAD_IC);
+ PopVectorAndSlot();
+ }
TailCallBuiltin(masm(), MissBuiltin(kind()));
__ bind(&success);
}
} else {
__ Push(holder_reg, this->name());
}
+ InterceptorVectorSlotPush(holder_reg);
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
// of this method).
__ Ret();
__ bind(&interceptor_failed);
+ InterceptorVectorSlotPop(holder_reg);
if (must_preserve_receiver_reg) {
__ Pop(receiver(), holder_reg, this->name());
} else {
Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
Handle<JSObject> object, Handle<Name> name,
Handle<ExecutableAccessorInfo> callback) {
- Register holder_reg = Frontend(receiver(), name);
+ Register holder_reg = Frontend(name);
__ Push(receiver(), holder_reg); // Receiver.
__ li(at, Operand(callback)); // Callback info.
Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
Label miss;
+ if (IC::ICUseVector(kind())) {
+ PushVectorAndSlot();
+ }
FrontendHeader(receiver(), name, &miss);
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_load_global_stub(), 1, a1, a3);
+ if (IC::ICUseVector(kind())) {
+ DiscardVectorAndSlot();
+ }
__ Ret(USE_DELAY_SLOT);
__ mov(v0, result);
if (check == PROPERTY &&
(kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
- // In case we are compiling an IC for dictionary loads and stores, just
+ // In case we are compiling an IC for dictionary loads or stores, just
// check whether the name is unique.
if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
+ // Keyed loads with dictionaries shouldn't be here, they go generic.
+ // The DCHECK is to protect assumptions when --vector-ics is on.
+ DCHECK(kind() != Code::KEYED_LOAD_IC);
Register tmp = scratch1();
__ JumpIfSmi(this->name(), &miss);
__ ld(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset));
// The return address is on the stack.
Isolate* isolate = masm->isolate();
- __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, a4);
+ DCHECK(!FLAG_vector_ics ||
+ !AreAliased(a4, a5, VectorLoadICDescriptor::SlotRegister(),
+ VectorLoadICDescriptor::VectorRegister()));
+ __ IncrementCounter(isolate->counters()->load_miss(), 1, a4, a5);
LoadIC_PushArgs(masm);
// The return address is in ra.
Isolate* isolate = masm->isolate();
- __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, a4);
+ DCHECK(!FLAG_vector_ics ||
+ !AreAliased(a4, a5, VectorLoadICDescriptor::SlotRegister(),
+ VectorLoadICDescriptor::VectorRegister()));
+ __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a4, a5);
LoadIC_PushArgs(masm);
__ JumpIfNotUniqueNameInstanceType(a4, &slow);
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
- key, a3, a4, a5, a6);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, Code::STORE_IC, flags, false, receiver, key, a3, a4, a5, a6);
// Cache miss.
__ Branch(&miss);
// Get the receiver from the stack and probe the stub cache.
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
- name, a3, a4, a5, a6);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, Code::STORE_IC, flags, false, receiver, name, a3, a4, a5, a6);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
#if V8_TARGET_ARCH_MIPS64
#include "src/codegen.h"
+#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
+#include "src/interface-descriptors.h"
namespace v8 {
namespace internal {
static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
- Code::Flags flags, bool leave_frame,
+ Code::Kind ic_kind, Code::Flags flags, bool leave_frame,
StubCache::Table table, Register receiver, Register name,
// Number of the cache entry, not scaled.
Register offset, Register scratch, Register scratch2,
}
-void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
- bool leave_frame, Register receiver,
- Register name, Register scratch, Register extra,
- Register extra2, Register extra3) {
+void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
+ Code::Flags flags, bool leave_frame,
+ Register receiver, Register name,
+ Register scratch, Register extra, Register extra2,
+ Register extra3) {
Isolate* isolate = masm->isolate();
Label miss;
DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
// Make sure that there are no register conflicts.
- DCHECK(!scratch.is(receiver));
- DCHECK(!scratch.is(name));
- DCHECK(!extra.is(receiver));
- DCHECK(!extra.is(name));
- DCHECK(!extra.is(scratch));
- DCHECK(!extra2.is(receiver));
- DCHECK(!extra2.is(name));
- DCHECK(!extra2.is(scratch));
- DCHECK(!extra2.is(extra));
+ DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
// Check register validity.
DCHECK(!scratch.is(no_reg));
DCHECK(!extra2.is(no_reg));
DCHECK(!extra3.is(no_reg));
+#ifdef DEBUG
+ // If vector-based ics are in use, ensure that scratch, extra, extra2 and
+ // extra3 don't conflict with the vector and slot registers, which need
+ // to be preserved for a handler call or miss.
+ if (IC::ICUseVector(ic_kind)) {
+ Register vector = VectorLoadICDescriptor::VectorRegister();
+ Register slot = VectorLoadICDescriptor::SlotRegister();
+ DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
+ }
+#endif
+
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
extra3);
__ And(scratch, scratch, Operand(mask));
// Probe the primary table.
- ProbeTable(isolate, masm, flags, leave_frame, kPrimary, receiver, name,
- scratch, extra, extra2, extra3);
+ ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kPrimary, receiver,
+ name, scratch, extra, extra2, extra3);
// Primary miss: Compute hash for secondary probe.
__ dsrl(at, name, kCacheIndexShift);
__ And(scratch, scratch, Operand(mask2));
// Probe the secondary table.
- ProbeTable(isolate, masm, flags, leave_frame, kSecondary, receiver, name,
- scratch, extra, extra2, extra3);
+ ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kSecondary, receiver,
+ name, scratch, extra, extra2, extra3);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
// Arguments extra, extra2 and extra3 may be used to pass additional scratch
// registers. Set to no_reg if not needed.
// If leave_frame is true, then exit a frame before the tail call.
- void GenerateProbe(MacroAssembler* masm, Code::Flags flags, bool leave_frame,
- Register receiver, Register name, Register scratch,
- Register extra, Register extra2 = no_reg,
- Register extra3 = no_reg);
+ void GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
+ Code::Flags flags, bool leave_frame, Register receiver,
+ Register name, Register scratch, Register extra,
+ Register extra2 = no_reg, Register extra3 = no_reg);
enum Table { kPrimary, kSecondary };
#define __ ACCESS_MASM(masm)
+void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
+ Register slot) {
+ MacroAssembler* masm = this->masm();
+ __ Push(vector);
+ __ Push(slot);
+}
+
+
+void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
+ MacroAssembler* masm = this->masm();
+ __ Pop(slot);
+ __ Pop(vector);
+}
+
+
+void PropertyHandlerCompiler::DiscardVectorAndSlot() {
+ MacroAssembler* masm = this->masm();
+ // Remove vector and slot.
+ __ addp(rsp, Immediate(2 * kPointerSize));
+}
+
+
void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
MacroAssembler* masm, Label* miss_label, Register receiver,
Handle<Name> name, Register scratch0, Register scratch1) {
Label success;
__ jmp(&success);
__ bind(miss);
+ if (IC::ICUseVector(kind())) {
+ DCHECK(kind() == Code::LOAD_IC);
+ PopVectorAndSlot();
+ }
TailCallBuiltin(masm(), MissBuiltin(kind()));
__ bind(&success);
}
}
__ Push(holder_reg);
__ Push(this->name());
+ InterceptorVectorSlotPush(holder_reg);
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
__ ret(0);
__ bind(&interceptor_failed);
+ InterceptorVectorSlotPop(holder_reg);
__ Pop(this->name());
__ Pop(holder_reg);
if (must_preserve_receiver_reg) {
Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
Handle<JSObject> object, Handle<Name> name,
Handle<ExecutableAccessorInfo> callback) {
- Register holder_reg = Frontend(receiver(), name);
+ Register holder_reg = Frontend(name);
__ PopReturnAddressTo(scratch1());
__ Push(receiver());
Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
Label miss;
+ if (IC::ICUseVector(kind())) {
+ PushVectorAndSlot();
+ }
FrontendHeader(receiver(), name, &miss);
// Get the value from the cell.
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_load_global_stub(), 1);
+ if (IC::ICUseVector(kind())) {
+ DiscardVectorAndSlot();
+ }
__ ret(0);
FrontendFooter(name, &miss);
if (check == PROPERTY &&
(kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
- // In case we are compiling an IC for dictionary loads and stores, just
+ // In case we are compiling an IC for dictionary loads or stores, just
// check whether the name is unique.
if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
+ // Keyed loads with dictionaries shouldn't be here, they go generic.
+ // The DCHECK is to protect assumptions when --vector-ics is on.
+ DCHECK(kind() != Code::KEYED_LOAD_IC);
Register tmp = scratch1();
__ JumpIfSmi(this->name(), &miss);
__ movp(tmp, FieldOperand(this->name(), HeapObject::kMapOffset));
__ JumpIfNotUniqueNameInstanceType(r9, &slow_with_tagged_index);
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
- key, rbx, no_reg);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, Code::STORE_IC, flags, false, receiver, key, rbx, no_reg);
// Cache miss.
__ jmp(&miss);
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, false, StoreDescriptor::ReceiverRegister(),
+ masm, Code::STORE_IC, flags, false, StoreDescriptor::ReceiverRegister(),
StoreDescriptor::NameRegister(), rbx, no_reg);
// Cache miss: Jump to runtime.
#if V8_TARGET_ARCH_X64
#include "src/codegen.h"
+#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
+#include "src/interface-descriptors.h"
namespace v8 {
namespace internal {
static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
- Code::Flags flags, bool leave_frame,
+ Code::Kind ic_kind, Code::Flags flags, bool leave_frame,
StubCache::Table table, Register receiver, Register name,
// The offset is scaled by 4, based on
// kCacheIndexShift, which is two bits
}
-void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
- bool leave_frame, Register receiver,
- Register name, Register scratch, Register extra,
- Register extra2, Register extra3) {
+void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
+ Code::Flags flags, bool leave_frame,
+ Register receiver, Register name,
+ Register scratch, Register extra, Register extra2,
+ Register extra3) {
Isolate* isolate = masm->isolate();
Label miss;
USE(extra); // The register extra is not used on the X64 platform.
DCHECK(extra2.is(no_reg));
DCHECK(extra3.is(no_reg));
+#ifdef DEBUG
+ // If vector-based ics are in use, ensure that scratch doesn't conflict with
+ // the vector and slot registers, which need to be preserved for a handler
+ // call or miss.
+ if (IC::ICUseVector(ic_kind)) {
+ Register vector = VectorLoadICDescriptor::VectorRegister();
+ Register slot = VectorLoadICDescriptor::SlotRegister();
+ DCHECK(!AreAliased(vector, slot, scratch));
+ }
+#endif
+
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
__ andp(scratch, Immediate((kPrimaryTableSize - 1) << kCacheIndexShift));
// Probe the primary table.
- ProbeTable(isolate, masm, flags, leave_frame, kPrimary, receiver, name,
- scratch);
+ ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kPrimary, receiver,
+ name, scratch);
// Primary miss: Compute hash for secondary probe.
__ movl(scratch, FieldOperand(name, Name::kHashFieldOffset));
__ andp(scratch, Immediate((kSecondaryTableSize - 1) << kCacheIndexShift));
// Probe the secondary table.
- ProbeTable(isolate, masm, flags, leave_frame, kSecondary, receiver, name,
- scratch);
+ ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kSecondary, receiver,
+ name, scratch);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
}
+void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
+ Register slot) {
+ MacroAssembler* masm = this->masm();
+ __ push(vector);
+ __ push(slot);
+}
+
+
+void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
+ MacroAssembler* masm = this->masm();
+ __ pop(slot);
+ __ pop(vector);
+}
+
+
+void PropertyHandlerCompiler::DiscardVectorAndSlot() {
+ MacroAssembler* masm = this->masm();
+ // Remove vector and slot.
+ __ add(esp, Immediate(2 * kPointerSize));
+}
+
+
void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
MacroAssembler* masm, Label* miss_label, Register receiver,
Handle<Name> name, Register scratch0, Register scratch1) {
void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
MacroAssembler* masm, Register receiver, Register scratch1,
Register scratch2, Label* miss_label) {
+ DCHECK(!FLAG_vector_ics);
__ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
__ mov(eax, scratch1);
__ ret(0);
Label success;
__ jmp(&success);
__ bind(miss);
+ if (IC::ICUseVector(kind())) {
+ DCHECK(kind() == Code::LOAD_IC);
+ PopVectorAndSlot();
+ }
TailCallBuiltin(masm(), MissBuiltin(kind()));
__ bind(&success);
}
}
__ push(holder_reg);
__ push(this->name());
-
+ InterceptorVectorSlotPush(holder_reg);
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
// of this method.)
__ mov(this->name(), Immediate(bit_cast<int32_t>(kZapValue)));
}
+ InterceptorVectorSlotPop(holder_reg);
__ pop(this->name());
__ pop(holder_reg);
if (must_preserve_receiver_reg) {
Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
Handle<JSObject> object, Handle<Name> name,
Handle<ExecutableAccessorInfo> callback) {
- Register holder_reg = Frontend(receiver(), name);
+ Register holder_reg = Frontend(name);
__ pop(scratch1()); // remove the return address
__ push(receiver());
Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
Label miss;
-
+ if (IC::ICUseVector(kind())) {
+ PushVectorAndSlot();
+ }
FrontendHeader(receiver(), name, &miss);
// Get the value from the cell.
Register result = StoreDescriptor::ValueRegister();
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_load_global_stub(), 1);
// The code above already loads the result into the return register.
+ if (IC::ICUseVector(kind())) {
+ DiscardVectorAndSlot();
+ }
__ ret(0);
FrontendFooter(name, &miss);
Label miss;
if (check == PROPERTY &&
- (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
- // In case we are compiling an IC for dictionary loads and stores, just
+ (kind() == Code::KEYED_STORE_IC || kind() == Code::KEYED_LOAD_IC)) {
+ // In case we are compiling an IC for dictionary loads or stores, just
// check whether the name is unique.
if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
+ // Keyed loads with dictionaries shouldn't be here, they go generic.
+ // The DCHECK is to protect assumptions when --vector-ics is on.
+ DCHECK(kind() != Code::KEYED_LOAD_IC);
Register tmp = scratch1();
__ JumpIfSmi(this->name(), &miss);
__ mov(tmp, FieldOperand(this->name(), HeapObject::kMapOffset));
__ JumpIfNotUniqueNameInstanceType(ebx, &slow);
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
- key, ebx, no_reg);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, Code::STORE_IC, flags, false, receiver, key, ebx, no_reg);
// Cache miss.
__ jmp(&miss);
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
masm->isolate()->stub_cache()->GenerateProbe(
- masm, flags, false, StoreDescriptor::ReceiverRegister(),
+ masm, Code::STORE_IC, flags, false, StoreDescriptor::ReceiverRegister(),
StoreDescriptor::NameRegister(), ebx, no_reg);
// Cache miss: Jump to runtime.
#if V8_TARGET_ARCH_X87
#include "src/codegen.h"
+#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
+#include "src/interface-descriptors.h"
namespace v8 {
namespace internal {
static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
- Code::Flags flags, bool leave_frame,
+ Code::Kind ic_kind, Code::Flags flags, bool leave_frame,
StubCache::Table table, Register name, Register receiver,
// Number of the cache entry pointer-size scaled.
Register offset, Register extra) {
}
#endif
+ if (IC::ICUseVector(ic_kind)) {
+ // The vector and slot were pushed onto the stack before starting the
+ // probe, and need to be dropped before calling the handler.
+ __ pop(VectorLoadICDescriptor::VectorRegister());
+ __ pop(VectorLoadICDescriptor::SlotRegister());
+ }
+
if (leave_frame) __ leave();
// Jump to the first instruction in the code stub.
__ pop(offset);
__ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
+ if (IC::ICUseVector(ic_kind)) {
+ // The vector and slot were pushed onto the stack before starting the
+ // probe, and need to be dropped before calling the handler.
+ Register vector = VectorLoadICDescriptor::VectorRegister();
+ Register slot = VectorLoadICDescriptor::SlotRegister();
+ DCHECK(!offset.is(vector) && !offset.is(slot));
+
+ __ pop(vector);
+ __ pop(slot);
+ }
+
+
if (leave_frame) __ leave();
// Jump to the first instruction in the code stub.
}
-void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
- bool leave_frame, Register receiver,
- Register name, Register scratch, Register extra,
- Register extra2, Register extra3) {
+void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
+ Code::Flags flags, bool leave_frame,
+ Register receiver, Register name,
+ Register scratch, Register extra, Register extra2,
+ Register extra3) {
Label miss;
// Assert that code is valid. The multiplying code relies on the entry size
DCHECK(kCacheIndexShift == kPointerSizeLog2);
// Probe the primary table.
- ProbeTable(isolate(), masm, flags, leave_frame, kPrimary, name, receiver,
- offset, extra);
+ ProbeTable(isolate(), masm, ic_kind, flags, leave_frame, kPrimary, name,
+ receiver, offset, extra);
// Primary miss: Compute hash for secondary probe.
__ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
__ and_(offset, (kSecondaryTableSize - 1) << kCacheIndexShift);
// Probe the secondary table.
- ProbeTable(isolate(), masm, flags, leave_frame, kSecondary, name, receiver,
- offset, extra);
+ ProbeTable(isolate(), masm, ic_kind, flags, leave_frame, kSecondary, name,
+ receiver, offset, extra);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
Register receiver = LoadDescriptor::ReceiverRegister();
Register index = LoadDescriptor::NameRegister();
- Register scratch = a3;
+ Register scratch = t1;
Register result = v0;
DCHECK(!scratch.is(receiver) && !scratch.is(index));
+ DCHECK(!FLAG_vector_ics ||
+ (!scratch.is(VectorLoadICDescriptor::VectorRegister()) &&
+ result.is(VectorLoadICDescriptor::SlotRegister())));
+ // StringCharAtGenerator doesn't use the result register until it's passed
+ // the different miss possibilities. If it did, we would have a conflict
+ // when FLAG_vector_ics is true.
StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
&miss, // When not a string.
&miss, // When not a number.
void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
Label miss;
Register receiver = LoadDescriptor::ReceiverRegister();
- NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, a3,
- t0, &miss);
+ // Ensure that the vector and slot registers won't be clobbered before
+ // calling the miss handler.
+ DCHECK(!FLAG_vector_ics ||
+ !AreAliased(t0, t1, VectorLoadICDescriptor::VectorRegister(),
+ VectorLoadICDescriptor::SlotRegister()));
+
+ NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, t0,
+ t1, &miss);
__ bind(&miss);
PropertyAccessCompiler::TailCallBuiltin(
masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
DCHECK(FLAG_vector_ics);
Register vector_register = ToRegister(instr->temp_vector());
+ Register slot_register = VectorLoadICDescriptor::SlotRegister();
DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
+ DCHECK(slot_register.is(a0));
+
+ AllowDeferredHandleDereference vector_structure_check;
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ li(vector_register, vector);
// No need to allocate this register.
- DCHECK(VectorLoadICDescriptor::SlotRegister().is(a0));
- int index = vector->GetIndex(instr->hydrogen()->slot());
- __ li(VectorLoadICDescriptor::SlotRegister(), Operand(Smi::FromInt(index)));
+ FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ int index = vector->GetIndex(slot);
+ __ li(slot_register, Operand(Smi::FromInt(index)));
}
DCHECK(receiver.is(a1));
DCHECK(name.is(a2));
- Register scratch = a3;
- Register extra = t0;
- Register extra2 = t1;
- Register extra3 = t2;
+ Register scratch = t0;
+ Register extra = t1;
+ Register extra2 = t2;
+ Register extra3 = t5;
+#ifdef DEBUG
+ Register slot = FLAG_vector_ics ? ToRegister(instr->slot()) : no_reg;
+ Register vector = FLAG_vector_ics ? ToRegister(instr->vector()) : no_reg;
+ DCHECK(!FLAG_vector_ics ||
+ !AreAliased(slot, vector, scratch, extra, extra2, extra3));
+#endif
// Important for the tail-call.
bool must_teardown_frame = NeedsEagerFrame();
- // The probe will tail call to a handler if found.
- isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
- must_teardown_frame, receiver, name,
- scratch, extra, extra2, extra3);
+ if (!instr->hydrogen()->is_just_miss()) {
+ DCHECK(!instr->hydrogen()->is_keyed_load());
+
+ // The probe will tail call to a handler if found.
+ isolate()->stub_cache()->GenerateProbe(
+ masm(), Code::LOAD_IC, instr->hydrogen()->flags(), must_teardown_frame,
+ receiver, name, scratch, extra, extra2, extra3);
+ }
// Tail call to miss if we ended up here.
if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
- LoadIC::GenerateMiss(masm());
+ if (instr->hydrogen()->is_keyed_load()) {
+ KeyedLoadIC::GenerateMiss(masm());
+ } else {
+ LoadIC::GenerateMiss(masm());
+ }
}
void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
DCHECK(ToRegister(instr->result()).is(v0));
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+ if (instr->hydrogen()->IsTailCall()) {
+ if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
- if (instr->target()->IsConstantOperand()) {
- LConstantOperand* target = LConstantOperand::cast(instr->target());
- Handle<Code> code = Handle<Code>::cast(ToHandle(target));
- generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
- __ Call(code, RelocInfo::CODE_TARGET);
+ if (instr->target()->IsConstantOperand()) {
+ LConstantOperand* target = LConstantOperand::cast(instr->target());
+ Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+ __ Jump(code, RelocInfo::CODE_TARGET);
+ } else {
+ DCHECK(instr->target()->IsRegister());
+ Register target = ToRegister(instr->target());
+ __ Addu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(target);
+ }
} else {
- DCHECK(instr->target()->IsRegister());
- Register target = ToRegister(instr->target());
- generator.BeforeCall(__ CallSize(target));
- __ Addu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Call(target);
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+ if (instr->target()->IsConstantOperand()) {
+ LConstantOperand* target = LConstantOperand::cast(instr->target());
+ Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+ generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
+ __ Call(code, RelocInfo::CODE_TARGET);
+ } else {
+ DCHECK(instr->target()->IsRegister());
+ Register target = ToRegister(instr->target());
+ generator.BeforeCall(__ CallSize(target));
+ __ Addu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(target);
+ }
+ generator.AfterCall();
}
- generator.AfterCall();
}
UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
LOperand* name_register =
UseFixed(instr->name(), LoadDescriptor::NameRegister());
+ LOperand* slot = NULL;
+ LOperand* vector = NULL;
+ if (FLAG_vector_ics) {
+ slot = UseFixed(instr->slot(), VectorLoadICDescriptor::SlotRegister());
+ vector =
+ UseFixed(instr->vector(), VectorLoadICDescriptor::VectorRegister());
+ }
+
// Not marked as call. It can't deoptimize, and it never returns.
return new (zone()) LTailCallThroughMegamorphicCache(
- context, receiver_register, name_register);
+ context, receiver_register, name_register, slot, vector);
}
LOperand* global_object =
UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
- if (FLAG_vector_ics) {
+ if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
LLoadGlobalGeneric* result =
LOperand* object =
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
- if (FLAG_vector_ics) {
+ if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
LOperand* vector = NULL;
- if (FLAG_vector_ics) {
+ if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
class LTailCallThroughMegamorphicCache FINAL
- : public LTemplateInstruction<0, 3, 0> {
+ : public LTemplateInstruction<0, 5, 0> {
public:
- explicit LTailCallThroughMegamorphicCache(LOperand* context,
- LOperand* receiver,
- LOperand* name) {
+ LTailCallThroughMegamorphicCache(LOperand* context, LOperand* receiver,
+ LOperand* name, LOperand* slot,
+ LOperand* vector) {
inputs_[0] = context;
inputs_[1] = receiver;
inputs_[2] = name;
+ inputs_[3] = slot;
+ inputs_[4] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* receiver() { return inputs_[1]; }
LOperand* name() { return inputs_[2]; }
+ LOperand* slot() { return inputs_[3]; }
+ LOperand* vector() { return inputs_[4]; }
DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
"tail-call-through-megamorphic-cache")
const CallInterfaceDescriptor descriptor() { return descriptor_; }
+ DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
+
private:
DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
- DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
void PrintDataTo(StringStream* stream) OVERRIDE;
Register receiver = LoadDescriptor::ReceiverRegister();
Register index = LoadDescriptor::NameRegister();
- Register scratch = a3;
+ Register scratch = a4;
Register result = v0;
DCHECK(!scratch.is(receiver) && !scratch.is(index));
+ DCHECK(!FLAG_vector_ics ||
+ (!scratch.is(VectorLoadICDescriptor::VectorRegister()) &&
+ result.is(VectorLoadICDescriptor::SlotRegister())));
+ // StringCharAtGenerator doesn't use the result register until it's passed
+ // the different miss possibilities. If it did, we would have a conflict
+ // when FLAG_vector_ics is true.
StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
&miss, // When not a string.
&miss, // When not a number.
void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
Label miss;
Register receiver = LoadDescriptor::ReceiverRegister();
- NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, a3,
- a4, &miss);
+ // Ensure that the vector and slot registers won't be clobbered before
+ // calling the miss handler.
+ DCHECK(!FLAG_vector_ics ||
+ !AreAliased(a4, a5, VectorLoadICDescriptor::VectorRegister(),
+ VectorLoadICDescriptor::SlotRegister()));
+
+ NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, a4,
+ a5, &miss);
__ bind(&miss);
PropertyAccessCompiler::TailCallBuiltin(
masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
DCHECK(FLAG_vector_ics);
Register vector_register = ToRegister(instr->temp_vector());
+ Register slot_register = VectorLoadICDescriptor::SlotRegister();
DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
+ DCHECK(slot_register.is(a0));
+
+ AllowDeferredHandleDereference vector_structure_check;
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ li(vector_register, vector);
// No need to allocate this register.
- DCHECK(VectorLoadICDescriptor::SlotRegister().is(a0));
- int index = vector->GetIndex(instr->hydrogen()->slot());
- __ li(VectorLoadICDescriptor::SlotRegister(), Operand(Smi::FromInt(index)));
+ FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ int index = vector->GetIndex(slot);
+ __ li(slot_register, Operand(Smi::FromInt(index)));
}
DCHECK(receiver.is(a1));
DCHECK(name.is(a2));
- Register scratch = a3;
- Register extra = a4;
- Register extra2 = a5;
- Register extra3 = a6;
+ Register scratch = a4;
+ Register extra = a5;
+ Register extra2 = a6;
+ Register extra3 = t1;
+#ifdef DEBUG
+ Register slot = FLAG_vector_ics ? ToRegister(instr->slot()) : no_reg;
+ Register vector = FLAG_vector_ics ? ToRegister(instr->vector()) : no_reg;
+ DCHECK(!FLAG_vector_ics ||
+ !AreAliased(slot, vector, scratch, extra, extra2, extra3));
+#endif
// Important for the tail-call.
bool must_teardown_frame = NeedsEagerFrame();
- // The probe will tail call to a handler if found.
- isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
- must_teardown_frame, receiver, name,
- scratch, extra, extra2, extra3);
+ if (!instr->hydrogen()->is_just_miss()) {
+ DCHECK(!instr->hydrogen()->is_keyed_load());
+
+ // The probe will tail call to a handler if found.
+ isolate()->stub_cache()->GenerateProbe(
+ masm(), Code::LOAD_IC, instr->hydrogen()->flags(), must_teardown_frame,
+ receiver, name, scratch, extra, extra2, extra3);
+ }
// Tail call to miss if we ended up here.
if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
- LoadIC::GenerateMiss(masm());
+ if (instr->hydrogen()->is_keyed_load()) {
+ KeyedLoadIC::GenerateMiss(masm());
+ } else {
+ LoadIC::GenerateMiss(masm());
+ }
}
void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
DCHECK(ToRegister(instr->result()).is(v0));
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+ if (instr->hydrogen()->IsTailCall()) {
+ if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
- if (instr->target()->IsConstantOperand()) {
- LConstantOperand* target = LConstantOperand::cast(instr->target());
- Handle<Code> code = Handle<Code>::cast(ToHandle(target));
- generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
- __ Call(code, RelocInfo::CODE_TARGET);
+ if (instr->target()->IsConstantOperand()) {
+ LConstantOperand* target = LConstantOperand::cast(instr->target());
+ Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+ __ Jump(code, RelocInfo::CODE_TARGET);
+ } else {
+ DCHECK(instr->target()->IsRegister());
+ Register target = ToRegister(instr->target());
+ __ Daddu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(target);
+ }
} else {
- DCHECK(instr->target()->IsRegister());
- Register target = ToRegister(instr->target());
- generator.BeforeCall(__ CallSize(target));
- __ Daddu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Call(target);
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+ if (instr->target()->IsConstantOperand()) {
+ LConstantOperand* target = LConstantOperand::cast(instr->target());
+ Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+ generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
+ __ Call(code, RelocInfo::CODE_TARGET);
+ } else {
+ DCHECK(instr->target()->IsRegister());
+ Register target = ToRegister(instr->target());
+ generator.BeforeCall(__ CallSize(target));
+ __ Daddu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(target);
+ }
+ generator.AfterCall();
}
- generator.AfterCall();
}
UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
LOperand* name_register =
UseFixed(instr->name(), LoadDescriptor::NameRegister());
+ LOperand* slot = NULL;
+ LOperand* vector = NULL;
+ if (FLAG_vector_ics) {
+ slot = UseFixed(instr->slot(), VectorLoadICDescriptor::SlotRegister());
+ vector =
+ UseFixed(instr->vector(), VectorLoadICDescriptor::VectorRegister());
+ }
+
// Not marked as call. It can't deoptimize, and it never returns.
return new (zone()) LTailCallThroughMegamorphicCache(
- context, receiver_register, name_register);
+ context, receiver_register, name_register, slot, vector);
}
LOperand* global_object =
UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
- if (FLAG_vector_ics) {
+ if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
LLoadGlobalGeneric* result =
LOperand* object =
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
- if (FLAG_vector_ics) {
+ if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
LOperand* vector = NULL;
- if (FLAG_vector_ics) {
+ if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
class LTailCallThroughMegamorphicCache FINAL
- : public LTemplateInstruction<0, 3, 0> {
+ : public LTemplateInstruction<0, 5, 0> {
public:
- explicit LTailCallThroughMegamorphicCache(LOperand* context,
- LOperand* receiver,
- LOperand* name) {
+ LTailCallThroughMegamorphicCache(LOperand* context, LOperand* receiver,
+ LOperand* name, LOperand* slot,
+ LOperand* vector) {
inputs_[0] = context;
inputs_[1] = receiver;
inputs_[2] = name;
+ inputs_[3] = slot;
+ inputs_[4] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* receiver() { return inputs_[1]; }
LOperand* name() { return inputs_[2]; }
+ LOperand* slot() { return inputs_[3]; }
+ LOperand* vector() { return inputs_[4]; }
DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
"tail-call-through-megamorphic-cache")
const CallInterfaceDescriptor descriptor() { return descriptor_; }
+ DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
+
private:
DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
- DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
void PrintDataTo(StringStream* stream) OVERRIDE;
}
array->set(kWithTypesIndex, Smi::FromInt(0));
array->set(kGenericCountIndex, Smi::FromInt(0));
+ // Fill the indexes with zeros.
+ for (int i = 0; i < index_count; i++) {
+ array->set(kReservedIndexCount + i, Smi::FromInt(0));
+ }
// Ensure we can skip the write barrier
Handle<Object> uninitialized_sentinel = UninitializedSentinel(isolate);
static bool ClearLogic(Heap* heap, int ic_age, Code::Kind kind,
InlineCacheState state) {
if (FLAG_cleanup_code_caches_at_gc &&
- (kind == Code::CALL_IC || state == MEGAMORPHIC || state == GENERIC ||
- state == POLYMORPHIC || heap->flush_monomorphic_ics() ||
+ (kind == Code::CALL_IC || heap->flush_monomorphic_ics() ||
// TODO(mvstanton): is this ic_age granular enough? it comes from
// the SharedFunctionInfo which may change on a different schedule
// than ic targets.
return UNINITIALIZED;
} else if (feedback == *vector()->PremonomorphicSentinel(isolate)) {
return PREMONOMORPHIC;
- } else if (feedback == *vector()->MegamorphicSentinel(isolate)) {
- return MEGAMORPHIC;
} else if (feedback == *vector()->GenericSentinel(isolate)) {
return GENERIC;
} else if (feedback->IsFixedArray()) {
}
-void KeyedLoadICNexus::ConfigureMegamorphic() {
- SetFeedback(*vector()->MegamorphicSentinel(GetIsolate()), SKIP_WRITE_BARRIER);
-}
-
-
void LoadICNexus::ConfigureMegamorphic() {
SetFeedback(*vector()->MegamorphicSentinel(GetIsolate()), SKIP_WRITE_BARRIER);
}
void Clear(Code* host);
- void ConfigureMegamorphic();
void ConfigureGeneric();
void ConfigurePremonomorphic();
// name can be a null handle for element loads.
void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
Label miss;
Register receiver = LoadDescriptor::ReceiverRegister();
+ // Ensure that the vector and slot registers won't be clobbered before
+ // calling the miss handler.
+ DCHECK(!FLAG_vector_ics ||
+ !AreAliased(r8, r9, VectorLoadICDescriptor::VectorRegister(),
+ VectorLoadICDescriptor::SlotRegister()));
NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r8,
r9, &miss);
Register receiver = LoadDescriptor::ReceiverRegister();
Register index = LoadDescriptor::NameRegister();
- Register scratch = rbx;
+ Register scratch = rdi;
Register result = rax;
DCHECK(!scratch.is(receiver) && !scratch.is(index));
+ DCHECK(!FLAG_vector_ics ||
+ (!scratch.is(VectorLoadICDescriptor::VectorRegister()) &&
+ result.is(VectorLoadICDescriptor::SlotRegister())));
+ // StringCharAtGenerator doesn't use the result register until it's passed
+ // the different miss possibilities. If it did, we would have a conflict
+ // when FLAG_vector_ics is true.
StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
&miss, // When not a string.
&miss, // When not a number.
void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
DCHECK(FLAG_vector_ics);
Register vector_register = ToRegister(instr->temp_vector());
+ Register slot_register = VectorLoadICDescriptor::SlotRegister();
DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
+ DCHECK(slot_register.is(rax));
+
+ AllowDeferredHandleDereference vector_structure_check;
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ Move(vector_register, vector);
// No need to allocate this register.
- DCHECK(VectorLoadICDescriptor::SlotRegister().is(rax));
- int index = vector->GetIndex(instr->hydrogen()->slot());
- __ Move(VectorLoadICDescriptor::SlotRegister(), Smi::FromInt(index));
+ FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ int index = vector->GetIndex(slot);
+ __ Move(slot_register, Smi::FromInt(index));
}
Register name = ToRegister(instr->name());
DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
DCHECK(name.is(LoadDescriptor::NameRegister()));
-
- Register scratch = rbx;
+ Register scratch = rdi;
DCHECK(!scratch.is(receiver) && !scratch.is(name));
+ DCHECK(!FLAG_vector_ics ||
+ !AreAliased(ToRegister(instr->slot()), ToRegister(instr->vector()),
+ scratch));
// Important for the tail-call.
bool must_teardown_frame = NeedsEagerFrame();
- // The probe will tail call to a handler if found.
- isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
- must_teardown_frame, receiver, name,
- scratch, no_reg);
+ if (!instr->hydrogen()->is_just_miss()) {
+ // The probe will tail call to a handler if found.
+ DCHECK(!instr->hydrogen()->is_keyed_load());
+ isolate()->stub_cache()->GenerateProbe(
+ masm(), Code::LOAD_IC, instr->hydrogen()->flags(), must_teardown_frame,
+ receiver, name, scratch, no_reg);
+ }
// Tail call to miss if we ended up here.
if (must_teardown_frame) __ leave();
- LoadIC::GenerateMiss(masm());
+ if (instr->hydrogen()->is_keyed_load()) {
+ KeyedLoadIC::GenerateMiss(masm());
+ } else {
+ LoadIC::GenerateMiss(masm());
+ }
}
void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
DCHECK(ToRegister(instr->result()).is(rax));
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+ if (instr->hydrogen()->IsTailCall()) {
+ if (NeedsEagerFrame()) __ leave();
- if (instr->target()->IsConstantOperand()) {
- LConstantOperand* target = LConstantOperand::cast(instr->target());
- Handle<Code> code = Handle<Code>::cast(ToHandle(target));
- generator.BeforeCall(__ CallSize(code));
- __ call(code, RelocInfo::CODE_TARGET);
+ if (instr->target()->IsConstantOperand()) {
+ LConstantOperand* target = LConstantOperand::cast(instr->target());
+ Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+ __ jmp(code, RelocInfo::CODE_TARGET);
+ } else {
+ DCHECK(instr->target()->IsRegister());
+ Register target = ToRegister(instr->target());
+ __ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(target);
+ }
} else {
- DCHECK(instr->target()->IsRegister());
- Register target = ToRegister(instr->target());
- generator.BeforeCall(__ CallSize(target));
- __ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ call(target);
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+ if (instr->target()->IsConstantOperand()) {
+ LConstantOperand* target = LConstantOperand::cast(instr->target());
+ Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+ generator.BeforeCall(__ CallSize(code));
+ __ call(code, RelocInfo::CODE_TARGET);
+ } else {
+ DCHECK(instr->target()->IsRegister());
+ Register target = ToRegister(instr->target());
+ generator.BeforeCall(__ CallSize(target));
+ __ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ call(target);
+ }
+ generator.AfterCall();
}
- generator.AfterCall();
}
UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
LOperand* name_register =
UseFixed(instr->name(), LoadDescriptor::NameRegister());
+ LOperand* slot = NULL;
+ LOperand* vector = NULL;
+ if (FLAG_vector_ics) {
+ slot = UseFixed(instr->slot(), VectorLoadICDescriptor::SlotRegister());
+ vector =
+ UseFixed(instr->vector(), VectorLoadICDescriptor::VectorRegister());
+ }
+
// Not marked as call. It can't deoptimize, and it never returns.
return new (zone()) LTailCallThroughMegamorphicCache(
- context, receiver_register, name_register);
+ context, receiver_register, name_register, slot, vector);
}
LOperand* global_object =
UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
- if (FLAG_vector_ics) {
+ if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
LOperand* object =
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
- if (FLAG_vector_ics) {
+ if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
LOperand* vector = NULL;
- if (FLAG_vector_ics) {
+ if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
class LTailCallThroughMegamorphicCache FINAL
- : public LTemplateInstruction<0, 3, 0> {
+ : public LTemplateInstruction<0, 5, 0> {
public:
explicit LTailCallThroughMegamorphicCache(LOperand* context,
- LOperand* receiver,
- LOperand* name) {
+ LOperand* receiver, LOperand* name,
+ LOperand* slot, LOperand* vector) {
inputs_[0] = context;
inputs_[1] = receiver;
inputs_[2] = name;
+ inputs_[3] = slot;
+ inputs_[4] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* receiver() { return inputs_[1]; }
LOperand* name() { return inputs_[2]; }
+ LOperand* slot() { return inputs_[3]; }
+ LOperand* vector() { return inputs_[4]; }
DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
"tail-call-through-megamorphic-cache")
Label miss;
Register receiver = LoadDescriptor::ReceiverRegister();
- NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, eax,
- ebx, &miss);
+ if (FLAG_vector_ics) {
+ // With careful management, we won't have to save slot and vector on
+ // the stack. Simply handle the possibly missing case first.
+ // TODO(mvstanton): this code can be more efficient.
+ __ cmp(FieldOperand(receiver, JSFunction::kPrototypeOrInitialMapOffset),
+ Immediate(isolate()->factory()->the_hole_value()));
+ __ j(equal, &miss);
+ __ TryGetFunctionPrototype(receiver, eax, ebx, &miss);
+ __ ret(0);
+ } else {
+ NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, eax,
+ ebx, &miss);
+ }
__ bind(&miss);
PropertyAccessCompiler::TailCallBuiltin(
masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
Register receiver = LoadDescriptor::ReceiverRegister();
Register index = LoadDescriptor::NameRegister();
- Register scratch = ebx;
+ Register scratch = edi;
DCHECK(!scratch.is(receiver) && !scratch.is(index));
Register result = eax;
DCHECK(!result.is(scratch));
+ DCHECK(!FLAG_vector_ics ||
+ (!scratch.is(VectorLoadICDescriptor::VectorRegister()) &&
+ result.is(VectorLoadICDescriptor::SlotRegister())));
+
+ // StringCharAtGenerator doesn't use the result register until it's passed
+ // the different miss possibilities. If it did, we would have a conflict
+ // when FLAG_vector_ics is true.
StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
&miss, // When not a string.
void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
DCHECK(FLAG_vector_ics);
Register vector_register = ToRegister(instr->temp_vector());
+ Register slot_register = VectorLoadICDescriptor::SlotRegister();
DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
+ DCHECK(slot_register.is(eax));
+
+ AllowDeferredHandleDereference vector_structure_check;
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ mov(vector_register, vector);
// No need to allocate this register.
- DCHECK(VectorLoadICDescriptor::SlotRegister().is(eax));
- int index = vector->GetIndex(instr->hydrogen()->slot());
- __ mov(VectorLoadICDescriptor::SlotRegister(),
- Immediate(Smi::FromInt(index)));
+ FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ int index = vector->GetIndex(slot);
+ __ mov(slot_register, Immediate(Smi::FromInt(index)));
}
Register name = ToRegister(instr->name());
DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
DCHECK(name.is(LoadDescriptor::NameRegister()));
+ Register slot = FLAG_vector_ics ? ToRegister(instr->slot()) : no_reg;
+ Register vector = FLAG_vector_ics ? ToRegister(instr->vector()) : no_reg;
Register scratch = ebx;
- Register extra = eax;
+ Register extra = edi;
+ DCHECK(!extra.is(slot) && !extra.is(vector));
DCHECK(!scratch.is(receiver) && !scratch.is(name));
DCHECK(!extra.is(receiver) && !extra.is(name));
// Important for the tail-call.
bool must_teardown_frame = NeedsEagerFrame();
- // The probe will tail call to a handler if found.
- isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
- must_teardown_frame, receiver, name,
- scratch, extra);
+ if (!instr->hydrogen()->is_just_miss()) {
+ if (FLAG_vector_ics) {
+ __ push(slot);
+ __ push(vector);
+ }
+
+ // The probe will tail call to a handler if found.
+ // If --vector-ics is on, then it knows to pop the two args first.
+ DCHECK(!instr->hydrogen()->is_keyed_load());
+ isolate()->stub_cache()->GenerateProbe(
+ masm(), Code::LOAD_IC, instr->hydrogen()->flags(), must_teardown_frame,
+ receiver, name, scratch, extra);
+
+ if (FLAG_vector_ics) {
+ __ pop(vector);
+ __ pop(slot);
+ }
+ }
// Tail call to miss if we ended up here.
if (must_teardown_frame) __ leave();
- LoadIC::GenerateMiss(masm());
+ if (instr->hydrogen()->is_keyed_load()) {
+ KeyedLoadIC::GenerateMiss(masm());
+ } else {
+ LoadIC::GenerateMiss(masm());
+ }
}
void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
DCHECK(ToRegister(instr->result()).is(eax));
- LPointerMap* pointers = instr->pointer_map();
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+ if (instr->hydrogen()->IsTailCall()) {
+ if (NeedsEagerFrame()) __ leave();
- if (instr->target()->IsConstantOperand()) {
- LConstantOperand* target = LConstantOperand::cast(instr->target());
- Handle<Code> code = Handle<Code>::cast(ToHandle(target));
- generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
- __ call(code, RelocInfo::CODE_TARGET);
+ if (instr->target()->IsConstantOperand()) {
+ LConstantOperand* target = LConstantOperand::cast(instr->target());
+ Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+ __ jmp(code, RelocInfo::CODE_TARGET);
+ } else {
+ DCHECK(instr->target()->IsRegister());
+ Register target = ToRegister(instr->target());
+ __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(target);
+ }
} else {
- DCHECK(instr->target()->IsRegister());
- Register target = ToRegister(instr->target());
- generator.BeforeCall(__ CallSize(Operand(target)));
- __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ call(target);
+ LPointerMap* pointers = instr->pointer_map();
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+ if (instr->target()->IsConstantOperand()) {
+ LConstantOperand* target = LConstantOperand::cast(instr->target());
+ Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+ generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
+ __ call(code, RelocInfo::CODE_TARGET);
+ } else {
+ DCHECK(instr->target()->IsRegister());
+ Register target = ToRegister(instr->target());
+ generator.BeforeCall(__ CallSize(Operand(target)));
+ __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ call(target);
+ }
+ generator.AfterCall();
}
- generator.AfterCall();
}
UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
LOperand* name_register =
UseFixed(instr->name(), LoadDescriptor::NameRegister());
+ LOperand* slot = NULL;
+ LOperand* vector = NULL;
+ if (FLAG_vector_ics) {
+ slot = UseFixed(instr->slot(), VectorLoadICDescriptor::SlotRegister());
+ vector =
+ UseFixed(instr->vector(), VectorLoadICDescriptor::VectorRegister());
+ }
+
// Not marked as call. It can't deoptimize, and it never returns.
return new (zone()) LTailCallThroughMegamorphicCache(
- context, receiver_register, name_register);
+ context, receiver_register, name_register, slot, vector);
}
LOperand* global_object =
UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
- if (FLAG_vector_ics) {
+ if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
LOperand* object =
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
- if (FLAG_vector_ics) {
+ if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
LOperand* vector = NULL;
- if (FLAG_vector_ics) {
+ if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
LLoadKeyedGeneric* result =
class LTailCallThroughMegamorphicCache FINAL
- : public LTemplateInstruction<0, 3, 0> {
+ : public LTemplateInstruction<0, 5, 0> {
public:
- explicit LTailCallThroughMegamorphicCache(LOperand* context,
- LOperand* receiver,
- LOperand* name) {
+ LTailCallThroughMegamorphicCache(LOperand* context, LOperand* receiver,
+ LOperand* name, LOperand* slot,
+ LOperand* vector) {
inputs_[0] = context;
inputs_[1] = receiver;
inputs_[2] = name;
+ inputs_[3] = slot;
+ inputs_[4] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* receiver() { return inputs_[1]; }
LOperand* name() { return inputs_[2]; }
+ LOperand* slot() { return inputs_[3]; }
+ LOperand* vector() { return inputs_[4]; }
DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
"tail-call-through-megamorphic-cache")
TEST(ExternalInternalizedStringCollectedAtGC) {
+ // TODO(mvstanton): vector ics need weak support.
+ if (i::FLAG_vector_ics) return;
+
int destroyed = 0;
{ LocalContext env;
v8::HandleScope handle_scope(env->GetIsolate());
CHECK_EQ(0, vector->ICSlots());
FeedbackVectorSpec one_icslot(0, 1);
+ if (FLAG_vector_ics) {
+ one_icslot.SetKind(0, Code::CALL_IC);
+ }
vector = factory->NewTypeFeedbackVector(one_icslot);
CHECK_EQ(0, vector->Slots());
CHECK_EQ(1, vector->ICSlots());
FeedbackVectorSpec spec(3, 5);
+ if (FLAG_vector_ics) {
+ for (int i = 0; i < 5; i++) spec.SetKind(i, Code::CALL_IC);
+ }
vector = factory->NewTypeFeedbackVector(spec);
CHECK_EQ(3, vector->Slots());
CHECK_EQ(5, vector->ICSlots());
CHECK_EQ(MEGAMORPHIC, nexus.StateFromFeedback());
CHECK_EQ(NULL, nexus.FindFirstMap());
- // After a collection, state should be reset to PREMONOMORPHIC.
+ // After a collection, state should not be reset to PREMONOMORPHIC.
heap->CollectAllGarbage(i::Heap::kNoGCFlags);
- CHECK_EQ(PREMONOMORPHIC, nexus.StateFromFeedback());
+ CHECK_EQ(MEGAMORPHIC, nexus.StateFromFeedback());
}
}
TEST(IncrementalMarkingPreservesMonomorphicIC) {
if (i::FLAG_always_opt) return;
+ // TODO(mvstanton): vector-ics need to treat maps weakly.
+ if (i::FLAG_vector_ics) return;
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
*v8::Handle<v8::Function>::Cast(CcTest::global()->Get(v8_str("f"))));
Code* ic_before = FindFirstIC(f->shared()->code(), Code::LOAD_IC);
- CHECK(ic_before->ic_state() == POLYMORPHIC);
+ if (FLAG_vector_ics) {
+ CheckVectorIC(f, 0, POLYMORPHIC);
+ CHECK(ic_before->ic_state() == DEFAULT);
+ } else {
+ CHECK(ic_before->ic_state() == POLYMORPHIC);
+ }
// Fire context dispose notification.
SimulateIncrementalMarking(CcTest::heap());
CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
Code* ic_after = FindFirstIC(f->shared()->code(), Code::LOAD_IC);
- CHECK(ic_after->ic_state() == POLYMORPHIC);
+ if (FLAG_vector_ics) {
+ CheckVectorIC(f, 0, POLYMORPHIC);
+ CHECK(ic_after->ic_state() == DEFAULT);
+ } else {
+ CHECK(ic_after->ic_state() == POLYMORPHIC);
+ }
}
i::FLAG_stress_compaction = false;
i::FLAG_allow_natives_syntax = true;
i::FLAG_flush_code_incrementally = true;
+ // TODO(mvstanton): vector ics need weak support.
+ if (i::FLAG_vector_ics) return;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
TEST(EnsureAllocationSiteDependentCodesProcessed) {
+ // TODO(mvstanton): vector ics need weak support!
+ if (FLAG_vector_ics) return;
if (i::FLAG_always_opt || !i::FLAG_crankshaft) return;
i::FLAG_allow_natives_syntax = true;
CcTest::InitializeVM();
TEST(CellsInOptimizedCodeAreWeak) {
if (i::FLAG_always_opt || !i::FLAG_crankshaft) return;
+ // TODO(mvstanton): vector-ics need to treat maps weakly.
+ if (i::FLAG_vector_ics) return;
i::FLAG_weak_embedded_objects_in_optimized_code = true;
i::FLAG_allow_natives_syntax = true;
CcTest::InitializeVM();
TEST(ObjectsInOptimizedCodeAreWeak) {
+ // TODO(mvstanton): vector ics need weak support!
+ if (FLAG_vector_ics) return;
if (i::FLAG_always_opt || !i::FLAG_crankshaft) return;
i::FLAG_weak_embedded_objects_in_optimized_code = true;
i::FLAG_allow_natives_syntax = true;
TEST(NoWeakHashTableLeakWithIncrementalMarking) {
if (i::FLAG_always_opt || !i::FLAG_crankshaft) return;
if (!i::FLAG_incremental_marking) return;
+ // TODO(mvstanton): vector ics need weak support.
+ if (FLAG_vector_ics) return;
i::FLAG_weak_embedded_objects_in_optimized_code = true;
i::FLAG_allow_natives_syntax = true;
i::FLAG_compilation_cache = false;
"bar%d();"
"bar%d();"
"bar%d();"
- "%%OptimizeFunctionOnNextCall(bar%d);"
- "bar%d();", i, i, i, i, i, i, i, i);
+ "%%OptimizeFwunctionOnNextCall(bar%d);"
+ "bar%d();",
+ i, i, i, i, i, i, i, i);
CompileRun(source.start());
}
heap->CollectAllGarbage(i::Heap::kNoGCFlags);
TEST(WeakMapInPolymorphicLoadIC) {
+ // TODO(mvstanton): vector-ics need to treat maps weakly.
+ if (i::FLAG_vector_ics) return;
CheckWeakness(
"function loadIC(obj) {"
" return obj.name;"