StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
+ CreateWeakCellStub::GenerateAheadOfTime(isolate);
BinaryOpICStub::GenerateAheadOfTime(isolate);
BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
}
// The checks. First, does r1 match the recorded monomorphic target?
__ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
__ ldr(r4, FieldMemOperand(r4, FixedArray::kHeaderSize));
- __ cmp(r1, r4);
+
+ // We don't know that we have a weak cell. We might have a private symbol
+ // or an AllocationSite, but the memory is safe to examine.
+ // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
+ // FixedArray.
+ // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
+ // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
+ // computed, meaning that it can't appear to be a pointer. If the low bit is
+ // 0, then hash is computed, but the 0 bit prevents the field from appearing
+ // to be a pointer.
+ STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
+ STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
+ WeakCell::kValueOffset &&
+ WeakCell::kValueOffset == Symbol::kHashFieldSlot);
+
+ __ ldr(r5, FieldMemOperand(r4, WeakCell::kValueOffset));
+ __ cmp(r1, r5);
__ b(ne, &extra_checks_or_miss);
+ // The compare above could have been a SMI/SMI comparison. Guard against this
+ // convincing us that we have a monomorphic JSFunction.
+ __ JumpIfSmi(r1, &extra_checks_or_miss);
+
__ bind(&have_js_function);
if (CallAsMethod()) {
EmitContinueIfStrictOrNative(masm, &cont);
__ add(r4, r4, Operand(Smi::FromInt(1)));
__ str(r4, FieldMemOperand(r2, with_types_offset));
- // Store the function.
- __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
- __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ str(r1, MemOperand(r4, 0));
+ // Store the function. Use a stub since we need a frame for allocation.
+ // r2 - vector
+ // r3 - slot
+ // r1 - function
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ CreateWeakCellStub create_stub(masm->isolate());
+ __ Push(r1);
+ __ CallStub(&create_stub);
+ __ Pop(r1);
+ }
- // Update the write barrier.
- __ mov(r5, r1);
- __ RecordWrite(r2, r4, r5, kLRHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ jmp(&have_js_function);
// We are here because tracing is on or we encountered a MISS case we can't
void CreateAllocationSiteDescriptor::Initialize(
CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r2, r3};
- data->Initialize(arraysize(registers), registers, NULL);
+ Representation representations[] = {Representation::Tagged(),
+ Representation::Tagged(),
+ Representation::Smi()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CreateWeakCellDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r2, r3, r1};
+ Representation representations[] = {
+ Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
+ Representation::Tagged()};
+ data->Initialize(arraysize(registers), registers, representations);
}
StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
+ CreateWeakCellStub::GenerateAheadOfTime(isolate);
BinaryOpICStub::GenerateAheadOfTime(isolate);
StoreRegistersStateStub::GenerateAheadOfTime(isolate);
RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
Operand::UntagSmiAndScale(index, kPointerSizeLog2));
__ Ldr(x4, FieldMemOperand(x4, FixedArray::kHeaderSize));
- __ Cmp(x4, function);
+ // We don't know that we have a weak cell. We might have a private symbol
+ // or an AllocationSite, but the memory is safe to examine.
+ // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
+ // FixedArray.
+ // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
+ // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
+ // computed, meaning that it can't appear to be a pointer. If the low bit is
+ // 0, then hash is computed, but the 0 bit prevents the field from appearing
+ // to be a pointer.
+ STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
+ STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
+ WeakCell::kValueOffset &&
+ WeakCell::kValueOffset == Symbol::kHashFieldSlot);
+
+ __ Ldr(x5, FieldMemOperand(x4, WeakCell::kValueOffset));
+ __ Cmp(x5, function);
__ B(ne, &extra_checks_or_miss);
+ // The compare above could have been a SMI/SMI comparison. Guard against this
+ // convincing us that we have a monomorphic JSFunction.
+ __ JumpIfSmi(function, &extra_checks_or_miss);
+
__ bind(&have_js_function);
if (CallAsMethod()) {
EmitContinueIfStrictOrNative(masm, &cont);
__ Adds(x4, x4, Operand(Smi::FromInt(1)));
__ Str(x4, FieldMemOperand(feedback_vector, with_types_offset));
- // Store the function.
- __ Add(x4, feedback_vector,
- Operand::UntagSmiAndScale(index, kPointerSizeLog2));
- __ Str(function, FieldMemOperand(x4, FixedArray::kHeaderSize));
-
- __ Add(x4, feedback_vector,
- Operand::UntagSmiAndScale(index, kPointerSizeLog2));
- __ Add(x4, x4, FixedArray::kHeaderSize - kHeapObjectTag);
- __ Str(function, MemOperand(x4, 0));
+ // Store the function. Use a stub since we need a frame for allocation.
+ // x2 - vector
+ // x3 - slot
+ // x1 - function
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ CreateWeakCellStub create_stub(masm->isolate());
+ __ Push(function);
+ __ CallStub(&create_stub);
+ __ Pop(function);
+ }
- // Update the write barrier.
- __ Mov(x5, function);
- __ RecordWrite(feedback_vector, x4, x5, kLRHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ B(&have_js_function);
// We are here because tracing is on or we encountered a MISS case we can't
// x2: feedback vector
// x3: call feedback slot
Register registers[] = {cp, x2, x3};
- data->Initialize(arraysize(registers), registers, NULL);
+ Representation representations[] = {Representation::Tagged(),
+ Representation::Tagged(),
+ Representation::Smi()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CreateWeakCellDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ // cp: context
+ // x2: feedback vector
+ // x3: call feedback slot
+ // x1: tagged value to put in the weak cell
+ Register registers[] = {cp, x2, x3, x1};
+ Representation representations[] = {
+ Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
+ Representation::Tagged()};
+ data->Initialize(arraysize(registers), registers, representations);
}
template <>
HValue* CodeStubGraphBuilder<CreateAllocationSiteStub>::BuildCodeStub() {
+ // This stub is performance sensitive, the generated code must be tuned
+ // so that it doesn't build an eager frame.
+ info()->MarkMustNotHaveEagerFrame();
+
HValue* size = Add<HConstant>(AllocationSite::kSize);
HInstruction* object = Add<HAllocate>(size, HType::JSObject(), TENURED,
JS_OBJECT_TYPE);
}
+template <>
+HValue* CodeStubGraphBuilder<CreateWeakCellStub>::BuildCodeStub() {
+ // This stub is performance sensitive, the generated code must be tuned
+ // so that it doesn't build an eager frame.
+ info()->MarkMustNotHaveEagerFrame();
+
+ HValue* size = Add<HConstant>(WeakCell::kSize);
+ HInstruction* object =
+ Add<HAllocate>(size, HType::JSObject(), TENURED, JS_OBJECT_TYPE);
+
+ Handle<Map> weak_cell_map = isolate()->factory()->weak_cell_map();
+ AddStoreMapConstant(object, weak_cell_map);
+
+ HInstruction* value = GetParameter(CreateWeakCellDescriptor::kValueIndex);
+ Add<HStoreNamedField>(object, HObjectAccess::ForWeakCellValue(), value);
+ Add<HStoreNamedField>(object, HObjectAccess::ForWeakCellNext(),
+ graph()->GetConstantUndefined());
+
+ HInstruction* feedback_vector =
+ GetParameter(CreateWeakCellDescriptor::kVectorIndex);
+ HInstruction* slot = GetParameter(CreateWeakCellDescriptor::kSlotIndex);
+ Add<HStoreKeyed>(feedback_vector, slot, object, FAST_ELEMENTS,
+ INITIALIZING_STORE);
+ return graph()->GetConstant0();
+}
+
+
+Handle<Code> CreateWeakCellStub::GenerateCode() { return DoGenerateCode(this); }
+
+
template <>
HValue* CodeStubGraphBuilder<LoadScriptContextFieldStub>::BuildCodeStub() {
int context_index = casted_stub()->context_index();
void CreateAllocationSiteStub::InitializeDescriptor(CodeStubDescriptor* d) {}
+void CreateWeakCellStub::InitializeDescriptor(CodeStubDescriptor* d) {}
+
+
void RegExpConstructResultStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
descriptor->Initialize(
}
+void CreateWeakCellStub::GenerateAheadOfTime(Isolate* isolate) {
+ CreateWeakCellStub stub(isolate);
+ stub.GetCode();
+}
+
+
void StoreElementStub::Generate(MacroAssembler* masm) {
switch (elements_kind()) {
case FAST_ELEMENTS:
V(BinaryOpWithAllocationSite) \
V(CompareNilIC) \
V(CreateAllocationSite) \
+ V(CreateWeakCell) \
V(ElementsTransitionAndStore) \
V(FastCloneShallowArray) \
V(FastCloneShallowObject) \
};
+class CreateWeakCellStub : public HydrogenCodeStub {
+ public:
+ explicit CreateWeakCellStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
+
+ static void GenerateAheadOfTime(Isolate* isolate);
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(CreateWeakCell);
+ DEFINE_HYDROGEN_CODE_STUB(CreateWeakCell, HydrogenCodeStub);
+};
+
+
class InstanceofStub: public PlatformCodeStub {
public:
enum Flags {
shared->ResetForNewContext(heap->global_ic_age());
}
if (FLAG_cleanup_code_caches_at_gc) {
- shared->ClearTypeFeedbackInfo();
+ shared->ClearTypeFeedbackInfoAtGCTime();
}
if (FLAG_cache_optimized_code && FLAG_flush_optimized_code_cache &&
!shared->optimized_code_map()->IsSmi()) {
return HObjectAccess(kInobject, WeakCell::kValueOffset);
}
+ static HObjectAccess ForWeakCellNext() {
+ return HObjectAccess(kInobject, WeakCell::kNextOffset);
+ }
+
static HObjectAccess ForAllocationMementoSite() {
return HObjectAccess(kInobject, AllocationMemento::kAllocationSiteOffset);
}
ParameterCount actual(argc);
// The checks. First, does edi match the recorded monomorphic target?
- __ cmp(edi, FieldOperand(ebx, edx, times_half_pointer_size,
+ __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
FixedArray::kHeaderSize));
+
+ // We don't know that we have a weak cell. We might have a private symbol
+ // or an AllocationSite, but the memory is safe to examine.
+ // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
+ // FixedArray.
+ // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
+ // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
+ // computed, meaning that it can't appear to be a pointer. If the low bit is
+ // 0, then hash is computed, but the 0 bit prevents the field from appearing
+ // to be a pointer.
+ STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
+ STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
+ WeakCell::kValueOffset &&
+ WeakCell::kValueOffset == Symbol::kHashFieldSlot);
+
+ __ cmp(edi, FieldOperand(ecx, WeakCell::kValueOffset));
__ j(not_equal, &extra_checks_or_miss);
+ // The compare above could have been a SMI/SMI comparison. Guard against this
+ // convincing us that we have a monomorphic JSFunction.
+ __ JumpIfSmi(edi, &extra_checks_or_miss);
+
__ bind(&have_js_function);
if (CallAsMethod()) {
EmitContinueIfStrictOrNative(masm, &cont);
__ bind(&extra_checks_or_miss);
Label uninitialized, miss;
- __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
- FixedArray::kHeaderSize));
__ cmp(ecx, Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
__ j(equal, &slow_start);
// Update stats.
__ add(FieldOperand(ebx, with_types_offset), Immediate(Smi::FromInt(1)));
- // Store the function.
- __ mov(
- FieldOperand(ebx, edx, times_half_pointer_size, FixedArray::kHeaderSize),
- edi);
+ // Store the function. Use a stub since we need a frame for allocation.
+ // ebx - vector
+ // edx - slot
+ // edi - function
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ CreateWeakCellStub create_stub(isolate);
+ __ push(edi);
+ __ CallStub(&create_stub);
+ __ pop(edi);
+ }
- // Update the write barrier.
- __ mov(eax, edi);
- __ RecordWriteArray(ebx, eax, edx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
__ jmp(&have_js_function);
// We are here because tracing is on or we encountered a MISS case we can't
// It is important that the store buffer overflow stubs are generated first.
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
+ CreateWeakCellStub::GenerateAheadOfTime(isolate);
BinaryOpICStub::GenerateAheadOfTime(isolate);
BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
}
void CreateAllocationSiteDescriptor::Initialize(
CallInterfaceDescriptorData* data) {
Register registers[] = {esi, ebx, edx};
- data->Initialize(arraysize(registers), registers, NULL);
+ Representation representations[] = {Representation::Tagged(),
+ Representation::Tagged(),
+ Representation::Smi()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CreateWeakCellDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {esi, ebx, edx, edi};
+ Representation representations[] = {
+ Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
+ Representation::Tagged()};
+ data->Initialize(arraysize(registers), registers, representations);
}
// Hand-coded MISS handling is easier if CallIC slots don't contain smis.
DCHECK(!feedback->IsSmi());
- if (feedback->IsJSFunction() || !function->IsJSFunction()) {
+ if (feedback->IsWeakCell() || !function->IsJSFunction()) {
// We are going generic.
nexus->ConfigureGeneric();
} else {
V(FastCloneShallowArray) \
V(FastCloneShallowObject) \
V(CreateAllocationSite) \
+ V(CreateWeakCell) \
V(CallFunction) \
V(CallFunctionWithFeedback) \
V(CallFunctionWithFeedbackAndVector) \
};
+class CreateWeakCellDescriptor : public CallInterfaceDescriptor {
+ public:
+ enum ParameterIndices {
+ kVectorIndex,
+ kSlotIndex,
+ kValueIndex,
+ kParameterCount
+ };
+
+ DECLARE_DESCRIPTOR(CreateWeakCellDescriptor, CallInterfaceDescriptor)
+};
+
+
class CallFunctionDescriptor : public CallInterfaceDescriptor {
public:
DECLARE_DESCRIPTOR(CallFunctionDescriptor, CallInterfaceDescriptor)
void SharedFunctionInfo::ClearTypeFeedbackInfo() {
feedback_vector()->ClearSlots(this);
+ feedback_vector()->ClearICSlots(this);
+}
+
+
+void SharedFunctionInfo::ClearTypeFeedbackInfoAtGCTime() {
+ feedback_vector()->ClearSlots(this);
+ feedback_vector()->ClearICSlotsAtGCTime(this);
}
// Removed a specific optimized code object from the optimized code map.
void EvictFromOptimizedCodeMap(Code* optimized_code, const char* reason);
+ // Unconditionally clear the type feedback vector (including vector ICs).
void ClearTypeFeedbackInfo();
+ // Clear the type feedback vector with a more subtle policy at GC time.
+ void ClearTypeFeedbackInfoAtGCTime();
+
// Trims the optimized code map after entries have been removed.
void TrimOptimizedCodeMap(int shrink_by);
// This logic is copied from
// StaticMarkingVisitor<StaticVisitor>::VisitCodeTarget.
-// TODO(mvstanton): with weak handling of all vector ics, this logic should
-// actually be completely eliminated and we no longer need to clear the
-// vector ICs.
-static bool ClearLogic(Heap* heap, int ic_age, Code::Kind kind,
- InlineCacheState state) {
+static bool ClearLogic(Heap* heap, int ic_age) {
if (FLAG_cleanup_code_caches_at_gc &&
- (kind == Code::CALL_IC || heap->flush_monomorphic_ics() ||
+ (heap->flush_monomorphic_ics() ||
// TODO(mvstanton): is this ic_age granular enough? it comes from
// the SharedFunctionInfo which may change on a different schedule
// than ic targets.
}
}
}
+}
- slots = ICSlots();
- if (slots == 0) return;
- // Now clear vector-based ICs.
- // Try and pass the containing code (the "host").
- Heap* heap = isolate->heap();
- Code* host = shared->code();
+void TypeFeedbackVector::ClearICSlotsImpl(SharedFunctionInfo* shared,
+ bool force_clear) {
+ Heap* heap = GetIsolate()->heap();
+
// I'm not sure yet if this ic age is the correct one.
int ic_age = shared->ic_age();
+
+ if (!force_clear && !ClearLogic(heap, ic_age)) return;
+
+ int slots = ICSlots();
+ Code* host = shared->code();
+ Object* uninitialized_sentinel =
+ TypeFeedbackVector::RawUninitializedSentinel(heap);
for (int i = 0; i < slots; i++) {
FeedbackVectorICSlot slot(i);
Object* obj = Get(slot);
Code::Kind kind = GetKind(slot);
if (kind == Code::CALL_IC) {
CallICNexus nexus(this, slot);
- if (ClearLogic(heap, ic_age, kind, nexus.StateFromFeedback())) {
- nexus.Clear(host);
- }
+ nexus.Clear(host);
} else if (kind == Code::LOAD_IC) {
LoadICNexus nexus(this, slot);
- if (ClearLogic(heap, ic_age, kind, nexus.StateFromFeedback())) {
- nexus.Clear(host);
- }
+ nexus.Clear(host);
} else if (kind == Code::KEYED_LOAD_IC) {
KeyedLoadICNexus nexus(this, slot);
- if (ClearLogic(heap, ic_age, kind, nexus.StateFromFeedback())) {
- nexus.Clear(host);
- }
+ nexus.Clear(host);
}
}
}
if (feedback == *vector()->MegamorphicSentinel(isolate)) {
return GENERIC;
- } else if (feedback->IsAllocationSite() || feedback->IsJSFunction()) {
+ } else if (feedback->IsAllocationSite() || feedback->IsWeakCell()) {
return MONOMORPHIC;
}
void CallICNexus::ConfigureMonomorphic(Handle<JSFunction> function) {
- SetFeedback(*function);
+ Handle<WeakCell> new_cell = GetIsolate()->factory()->NewWeakCell(function);
+ SetFeedback(*new_cell);
}
// Clears the vector slots and the vector ic slots.
void ClearSlots(SharedFunctionInfo* shared);
+ void ClearICSlots(SharedFunctionInfo* shared) {
+ ClearICSlotsImpl(shared, true);
+ }
+ void ClearICSlotsAtGCTime(SharedFunctionInfo* shared) {
+ ClearICSlotsImpl(shared, false);
+ }
// The object that indicates an uninitialized cache.
static inline Handle<Object> UninitializedSentinel(Isolate* isolate);
typedef BitSetComputer<VectorICKind, kVectorICKindBits, kSmiValueSize,
uint32_t> VectorICComputer;
+ void ClearICSlotsImpl(SharedFunctionInfo* shared, bool force_clear);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(TypeFeedbackVector);
};
Handle<Object> TypeFeedbackOracle::GetInfo(FeedbackVectorICSlot slot) {
DCHECK(slot.ToInt() >= 0 && slot.ToInt() < feedback_vector_->length());
+ Handle<Object> undefined =
+ Handle<Object>::cast(isolate()->factory()->undefined_value());
Object* obj = feedback_vector_->Get(slot);
+
+ // Vector-based ICs do not embed direct pointers to maps, functions.
+ // Instead a WeakCell is always used.
+ if (obj->IsWeakCell()) {
+ WeakCell* cell = WeakCell::cast(obj);
+ if (cell->cleared()) return undefined;
+ obj = cell->value();
+ }
+
if (!obj->IsJSFunction() ||
!CanRetainOtherContext(JSFunction::cast(obj), *native_context_)) {
return Handle<Object>(obj, isolate());
}
- return Handle<Object>::cast(isolate()->factory()->undefined_value());
+ return undefined;
}
// The checks. First, does rdi match the recorded monomorphic target?
__ SmiToInteger32(rdx, rdx);
- __ cmpp(rdi, FieldOperand(rbx, rdx, times_pointer_size,
- FixedArray::kHeaderSize));
+ __ movp(rcx,
+ FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize));
+
+ // We don't know that we have a weak cell. We might have a private symbol
+ // or an AllocationSite, but the memory is safe to examine.
+ // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
+ // FixedArray.
+ // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
+ // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
+ // computed, meaning that it can't appear to be a pointer. If the low bit is
+ // 0, then hash is computed, but the 0 bit prevents the field from appearing
+ // to be a pointer.
+ STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
+ STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
+ WeakCell::kValueOffset &&
+ WeakCell::kValueOffset == Symbol::kHashFieldSlot);
+
+ __ cmpp(rdi, FieldOperand(rcx, WeakCell::kValueOffset));
__ j(not_equal, &extra_checks_or_miss);
+ // The compare above could have been a SMI/SMI comparison. Guard against this
+ // convincing us that we have a monomorphic JSFunction.
+ __ JumpIfSmi(rdi, &extra_checks_or_miss);
+
__ bind(&have_js_function);
if (CallAsMethod()) {
EmitContinueIfStrictOrNative(masm, &cont);
__ bind(&extra_checks_or_miss);
Label uninitialized, miss;
- __ movp(rcx, FieldOperand(rbx, rdx, times_pointer_size,
- FixedArray::kHeaderSize));
__ Cmp(rcx, TypeFeedbackVector::MegamorphicSentinel(isolate));
__ j(equal, &slow_start);
// Update stats.
__ SmiAddConstant(FieldOperand(rbx, with_types_offset), Smi::FromInt(1));
- // Store the function.
- __ movp(FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize),
- rdi);
+ // Store the function. Use a stub since we need a frame for allocation.
+ // rbx - vector
+ // rdx - slot (needs to be in smi form)
+ // rdi - function
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ CreateWeakCellStub create_stub(isolate);
+
+ __ Integer32ToSmi(rdx, rdx);
+ __ Push(rdi);
+ __ CallStub(&create_stub);
+ __ Pop(rdi);
+ }
- // Update the write barrier.
- __ movp(rax, rdi);
- __ RecordWriteArray(rbx, rax, rdx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
__ jmp(&have_js_function);
// We are here because tracing is on or we encountered a MISS case we can't
// It is important that the store buffer overflow stubs are generated first.
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
+ CreateWeakCellStub::GenerateAheadOfTime(isolate);
BinaryOpICStub::GenerateAheadOfTime(isolate);
BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
}
void CreateAllocationSiteDescriptor::Initialize(
CallInterfaceDescriptorData* data) {
Register registers[] = {rsi, rbx, rdx};
- data->Initialize(arraysize(registers), registers, NULL);
+ Representation representations[] = {Representation::Tagged(),
+ Representation::Tagged(),
+ Representation::Smi()};
+ data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CreateWeakCellDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {rsi, rbx, rdx, rdi};
+ Representation representations[] = {
+ Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
+ Representation::Tagged()};
+ data->Initialize(arraysize(registers), registers, representations);
}
CHECK_EQ(expected_slots, feedback_vector->Slots());
CHECK_EQ(expected_ic_slots, feedback_vector->ICSlots());
FeedbackVectorICSlot slot_for_a(0);
- CHECK(feedback_vector->Get(slot_for_a)->IsJSFunction());
+ Object* object = feedback_vector->Get(slot_for_a);
+ CHECK(object->IsWeakCell() &&
+ WeakCell::cast(object)->value()->IsJSFunction());
CompileRun("%OptimizeFunctionOnNextCall(f); f(fun1);");
// of the full code.
CHECK(f->IsOptimized());
CHECK(f->shared()->has_deoptimization_support());
- CHECK(f->shared()->feedback_vector()->Get(slot_for_a)->IsJSFunction());
+ object = f->shared()->feedback_vector()->Get(slot_for_a);
+ CHECK(object->IsWeakCell() &&
+ WeakCell::cast(object)->value()->IsJSFunction());
}
Handle<JSFunction> f = v8::Utils::OpenHandle(
*v8::Handle<v8::Function>::Cast(CcTest::global()->Get(v8_str("f"))));
// There should be one IC.
- Code* code = f->shared()->code();
+ Handle<Code> code = handle(f->shared()->code(), isolate);
TypeFeedbackInfo* feedback_info =
TypeFeedbackInfo::cast(code->type_feedback_info());
CHECK_EQ(1, feedback_info->ic_total_count());
CHECK_EQ(0, feedback_info->ic_with_type_info_count());
CHECK_EQ(0, feedback_info->ic_generic_count());
- TypeFeedbackVector* feedback_vector = f->shared()->feedback_vector();
+ Handle<TypeFeedbackVector> feedback_vector =
+ handle(f->shared()->feedback_vector(), isolate);
+ int ic_slot = 0;
+ CallICNexus nexus(feedback_vector, FeedbackVectorICSlot(ic_slot));
CHECK_EQ(1, feedback_vector->ic_with_type_info_count());
CHECK_EQ(0, feedback_vector->ic_generic_count());
// Now send the information generic.
CompileRun("f(Object);");
- feedback_vector = f->shared()->feedback_vector();
CHECK_EQ(0, feedback_vector->ic_with_type_info_count());
CHECK_EQ(1, feedback_vector->ic_generic_count());
- // A collection will make the site uninitialized again.
+ // A collection will not affect the site.
heap->CollectAllGarbage(i::Heap::kNoGCFlags);
- feedback_vector = f->shared()->feedback_vector();
CHECK_EQ(0, feedback_vector->ic_with_type_info_count());
- CHECK_EQ(0, feedback_vector->ic_generic_count());
+ CHECK_EQ(1, feedback_vector->ic_generic_count());
// The Array function is special. A call to array remains monomorphic
// and isn't cleared by gc because an AllocationSite is being held.
+ // Clear the IC manually in order to test this case.
+ nexus.Clear(*code);
CompileRun("f(Array);");
- feedback_vector = f->shared()->feedback_vector();
CHECK_EQ(1, feedback_vector->ic_with_type_info_count());
CHECK_EQ(0, feedback_vector->ic_generic_count());
- int ic_slot = 0;
- CHECK(
- feedback_vector->Get(FeedbackVectorICSlot(ic_slot))->IsAllocationSite());
+
+ CHECK(nexus.GetFeedback()->IsAllocationSite());
heap->CollectAllGarbage(i::Heap::kNoGCFlags);
- feedback_vector = f->shared()->feedback_vector();
CHECK_EQ(1, feedback_vector->ic_with_type_info_count());
CHECK_EQ(0, feedback_vector->ic_generic_count());
- CHECK(
- feedback_vector->Get(FeedbackVectorICSlot(ic_slot))->IsAllocationSite());
+ CHECK(nexus.GetFeedback()->IsAllocationSite());
}
CompileRun("f(function() { return 16; })");
CHECK_EQ(GENERIC, nexus.StateFromFeedback());
- // After a collection, state should be reset to UNINITIALIZED.
+ // After a collection, state should remain GENERIC.
heap->CollectAllGarbage(i::Heap::kNoGCFlags);
- CHECK_EQ(UNINITIALIZED, nexus.StateFromFeedback());
+ CHECK_EQ(GENERIC, nexus.StateFromFeedback());
- // Array is special. It will remain monomorphic across gcs and it contains an
- // AllocationSite.
+ // A call to Array is special, it contains an AllocationSite as feedback.
+ // Clear the IC manually in order to test this case.
+ nexus.Clear(f->shared()->code());
CompileRun("f(Array)");
CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
- CHECK(feedback_vector->Get(FeedbackVectorICSlot(slot))->IsAllocationSite());
+ CHECK(nexus.GetFeedback()->IsAllocationSite());
heap->CollectAllGarbage(i::Heap::kNoGCFlags);
CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
}
-TEST(IncrementalMarkingClearsTypeFeedbackInfo) {
+TEST(IncrementalMarkingPreservesMonomorphicCallIC) {
if (i::FLAG_always_opt) return;
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
CHECK_EQ(expected_slots, feedback_vector->ICSlots());
int slot1 = 0;
int slot2 = 1;
- CHECK(feedback_vector->Get(FeedbackVectorICSlot(slot1))->IsJSFunction());
- CHECK(feedback_vector->Get(FeedbackVectorICSlot(slot2))->IsJSFunction());
+ CHECK(feedback_vector->Get(FeedbackVectorICSlot(slot1))->IsWeakCell());
+ CHECK(feedback_vector->Get(FeedbackVectorICSlot(slot2))->IsWeakCell());
SimulateIncrementalMarking(CcTest::heap());
CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
- CHECK_EQ(feedback_vector->Get(FeedbackVectorICSlot(slot1)),
- *TypeFeedbackVector::UninitializedSentinel(CcTest::i_isolate()));
- CHECK_EQ(feedback_vector->Get(FeedbackVectorICSlot(slot2)),
- *TypeFeedbackVector::UninitializedSentinel(CcTest::i_isolate()));
+ CHECK(!WeakCell::cast(feedback_vector->Get(FeedbackVectorICSlot(slot1)))
+ ->cleared());
+ CHECK(!WeakCell::cast(feedback_vector->Get(FeedbackVectorICSlot(slot2)))
+ ->cleared());
}