{
JSInterfaceJIT jit;
JumpList failures;
- failures.append(jit.branchPtr(NotEqual, Address(regT0), TrustedImmPtr(globalData->jsStringVPtr)));
- failures.append(jit.branchTest32(NonZero, Address(regT0, OBJECT_OFFSETOF(JSString, m_fiberCount))));
+ failures.append(jit.branchPtr(NotEqual, Address(regT0, JSCell::classInfoOffset()), TrustedImmPtr(&JSString::s_info)));
// Load string length to regT2, and start the process of loading the data pointer into regT0
jit.load32(Address(regT0, ThunkHelpers::jsStringLengthOffset()), regT2);
jit.loadPtr(Address(regT0, ThunkHelpers::jsStringValueOffset()), regT0);
- jit.loadPtr(Address(regT0, ThunkHelpers::stringImplDataOffset()), regT0);
-
+ failures.append(jit.branchTest32(Zero, regT0));
+
// Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
failures.append(jit.branch32(AboveOrEqual, regT1, regT2));
// Load the character
+ JumpList is16Bit;
+ JumpList cont8Bit;
+ // Load the string flags
+ jit.loadPtr(Address(regT0, ThunkHelpers::stringImplFlagsOffset()), regT2);
+ jit.loadPtr(Address(regT0, ThunkHelpers::stringImplDataOffset()), regT0);
+ is16Bit.append(jit.branchTest32(Zero, regT2, TrustedImm32(ThunkHelpers::stringImpl8BitFlag())));
+ jit.load8(BaseIndex(regT0, regT1, TimesOne, 0), regT0);
+ cont8Bit.append(jit.jump());
+ is16Bit.link(&jit);
jit.load16(BaseIndex(regT0, regT1, TimesTwo, 0), regT0);
-
+ cont8Bit.link(&jit);
+
failures.append(jit.branch32(AboveOrEqual, regT0, TrustedImm32(0x100)));
jit.move(TrustedImmPtr(globalData->smallStrings.singleCharacterStrings()), regT1);
jit.loadPtr(BaseIndex(regT1, regT0, ScalePtr, 0), regT0);
jit.move(TrustedImm32(0), regT0);
jit.ret();
- LinkBuffer patchBuffer(*globalData, &jit);
+ LinkBuffer patchBuffer(*globalData, &jit, GLOBAL_THUNK_ID);
return patchBuffer.finalizeCode();
}
zeroExtend32ToPtr(regT1, regT1);
emitJumpSlowCaseIfNotJSCell(regT0, base);
- addSlowCase(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsArrayVPtr)));
+ addSlowCase(branchPtr(NotEqual, Address(regT0, JSCell::classInfoOffset()), TrustedImmPtr(&JSArray::s_info)));
loadPtr(Address(regT0, JSArray::storageOffset()), regT2);
addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, JSArray::vectorLengthOffset())));
loadPtr(BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0);
addSlowCase(branchTestPtr(Zero, regT0));
- emitValueProfilingSite(FirstProfilingSite);
+ emitValueProfilingSite();
emitPutVirtualRegister(dst);
}
linkSlowCaseIfNotJSCell(iter, base); // base cell check
Jump nonCell = jump();
linkSlowCase(iter); // base array check
- Jump notString = branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsStringVPtr));
+ Jump notString = branchPtr(NotEqual, Address(regT0, JSCell::classInfoOffset()), TrustedImmPtr(&JSString::s_info));
emitNakedCall(CodeLocationLabel(m_globalData->getCTIStub(stringGetByValStubGenerator).code()));
Jump failed = branchTestPtr(Zero, regT0);
emitPutVirtualRegister(dst, regT0);
stubCall.addArgument(property, regT2);
stubCall.call(dst);
- emitValueProfilingSite(SubsequentProfilingSite);
+ emitValueProfilingSite();
}
void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID offset, RegisterID scratch)
// See comment in op_get_by_val.
zeroExtend32ToPtr(regT1, regT1);
emitJumpSlowCaseIfNotJSCell(regT0, base);
- addSlowCase(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsArrayVPtr)));
+ addSlowCase(branchPtr(NotEqual, Address(regT0, JSCell::classInfoOffset()), TrustedImmPtr(&JSArray::s_info)));
addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, JSArray::vectorLengthOffset())));
loadPtr(Address(regT0, JSArray::storageOffset()), regT2);
stubCall.call();
}
-void JIT::emit_op_put_getter(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_put_getter);
- stubCall.addArgument(currentInstruction[1].u.operand, regT2);
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.addArgument(currentInstruction[3].u.operand, regT2);
- stubCall.call();
-}
-
-void JIT::emit_op_put_setter(Instruction* currentInstruction)
+void JIT::emit_op_put_getter_setter(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, cti_op_put_setter);
+ JITStubCall stubCall(this, cti_op_put_getter_setter);
stubCall.addArgument(currentInstruction[1].u.operand, regT2);
stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
stubCall.addArgument(currentInstruction[3].u.operand, regT2);
+ stubCall.addArgument(currentInstruction[4].u.operand, regT2);
stubCall.call();
}
Jump match = jump();
- ASSERT_JIT_OFFSET_UNUSED(protoObj, differenceBetween(info.structureToCompare, protoObj), patchOffsetMethodCheckProtoObj);
- ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, protoStructureToCompare), patchOffsetMethodCheckProtoStruct);
- ASSERT_JIT_OFFSET_UNUSED(putFunction, differenceBetween(info.structureToCompare, putFunction), patchOffsetMethodCheckPutFunction);
-
// Link the failure cases here.
notCell.link(this);
structureCheck.link(this);
compileGetByIdHotPath(baseVReg, ident);
match.link(this);
- emitValueProfilingSite(FirstProfilingSite);
+ emitValueProfilingSite(m_bytecodeOffset + OPCODE_LENGTH(op_method_check));
emitPutVirtualRegister(resultVReg);
// We've already generated the following get_by_id, so make sure it's skipped over.
m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id);
+
+ m_propertyAccessCompilationInfo.last().addMethodCheckInfo(info.structureToCompare, protoObj, protoStructureToCompare, putFunction);
}
void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, true);
+ emitValueProfilingSite(m_bytecodeOffset + OPCODE_LENGTH(op_method_check));
// We've already generated the following get_by_id, so make sure it's skipped over.
m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id);
emitGetVirtualRegister(baseVReg, regT0);
compileGetByIdHotPath(baseVReg, ident);
- emitValueProfilingSite(FirstProfilingSite);
+ emitValueProfilingSite();
emitPutVirtualRegister(resultVReg);
}
BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
Label hotPathBegin(this);
- m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo());
- m_propertyAccessCompilationInfo.last().bytecodeIndex = m_bytecodeOffset;
- m_propertyAccessCompilationInfo.last().hotPathBegin = hotPathBegin;
DataLabelPtr structureToCompare;
- Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
+ PatchableJump structureCheck = patchableBranchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
addSlowCase(structureCheck);
- ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetGetByIdStructure);
- ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureCheck), patchOffsetGetByIdBranchToSlowCase)
loadPtr(Address(regT0, JSObject::offsetOfPropertyStorage()), regT0);
DataLabelCompact displacementLabel = loadPtrWithCompactAddressOffsetPatch(Address(regT0, patchGetByIdDefaultOffset), regT0);
- ASSERT_JIT_OFFSET_UNUSED(displacementLabel, differenceBetween(hotPathBegin, displacementLabel), patchOffsetGetByIdPropertyMapOffset);
Label putResult(this);
END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
- ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, putResult), patchOffsetGetByIdPutResult);
+ m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo(PropertyStubGetById, m_bytecodeOffset, hotPathBegin, structureToCompare, structureCheck, displacementLabel, putResult));
}
void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, false);
- emitValueProfilingSite(SubsequentProfilingSite);
+ emitValueProfilingSite();
}
void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck)
BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
-#ifndef NDEBUG
Label coldPathBegin(this);
-#endif
JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id);
stubCall.addArgument(regT0);
stubCall.addArgument(TrustedImmPtr(ident));
END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
- ASSERT_JIT_OFFSET(differenceBetween(coldPathBegin, call), patchOffsetGetByIdSlowCaseCall);
-
// Track the location of the call; this will be used to recover patch information.
- m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex++].callReturnLocation = call;
+ m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex++].slowCaseInfo(PropertyStubGetById, coldPathBegin, call);
}
void JIT::emit_op_put_by_id(Instruction* currentInstruction)
BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById);
Label hotPathBegin(this);
- m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo());
- m_propertyAccessCompilationInfo.last().bytecodeIndex = m_bytecodeOffset;
- m_propertyAccessCompilationInfo.last().hotPathBegin = hotPathBegin;
// It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
DataLabelPtr structureToCompare;
addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
- ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetPutByIdStructure);
loadPtr(Address(regT0, JSObject::offsetOfPropertyStorage()), regT2);
DataLabel32 displacementLabel = storePtrWithAddressOffsetPatch(regT1, Address(regT2, patchPutByIdDefaultOffset));
emitWriteBarrier(regT0, regT1, regT2, regT3, ShouldFilterImmediates, WriteBarrierForPropertyAccess);
- ASSERT_JIT_OFFSET_UNUSED(displacementLabel, differenceBetween(hotPathBegin, displacementLabel), patchOffsetPutByIdPropertyMapOffset);
+ m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo(PropertyStubPutById, m_bytecodeOffset, hotPathBegin, structureToCompare, displacementLabel));
}
void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
Call call = stubCall.call();
// Track the location of the call; this will be used to recover patch information.
- m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex++].callReturnLocation = call;
+ m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex++].slowCaseInfo(PropertyStubPutById, call);
}
// Compile a store into an object's property storage. May overwrite the
// Check eax is an object of the right Structure.
failureCases.append(emitJumpIfNotJSCell(regT0));
failureCases.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(oldStructure)));
+
testPrototype(oldStructure->storedPrototype(), failureCases);
+
+ ASSERT(oldStructure->storedPrototype().isNull() || oldStructure->storedPrototype().asCell()->structure() == chain->head()->get());
// ecx = baseObject->m_structure
if (!direct) {
- for (WriteBarrier<Structure>* it = chain->head(); *it; ++it)
+ for (WriteBarrier<Structure>* it = chain->head(); *it; ++it) {
+ ASSERT((*it)->storedPrototype().isNull() || (*it)->storedPrototype().asCell()->structure() == it[1].get());
testPrototype((*it)->storedPrototype(), failureCases);
+ }
}
- Call callTarget;
-
+ // If we succeed in all of our checks, and the code was optimizable, then make sure we
+ // decrement the rare case counter.
+#if ENABLE(VALUE_PROFILER)
+ if (m_codeBlock->canCompileWithDFG() >= DFG::MayInline) {
+ sub32(
+ TrustedImm32(1),
+ AbsoluteAddress(&m_codeBlock->rareCaseProfileForBytecodeOffset(stubInfo->bytecodeIndex)->m_counter));
+ }
+#endif
+
// emit a call only if storage realloc is needed
bool willNeedStorageRealloc = oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
if (willNeedStorageRealloc) {
stubCall.skipArgument(); // ident
stubCall.skipArgument(); // value
stubCall.addArgument(TrustedImm32(oldStructure->propertyStorageCapacity()));
- stubCall.addArgument(TrustedImm32(newStructure->propertyStorageCapacity()));
+ stubCall.addArgument(TrustedImmPtr(newStructure));
stubCall.call(regT0);
emitGetJITStubArg(2, regT1);
// an unconditional barrier here.
emitWriteBarrier(regT0, regT1, regT2, regT3, UnconditionalWriteBarrier, WriteBarrierForPropertyAccess);
+ ASSERT(newStructure->classInfo() == oldStructure->classInfo());
storePtr(TrustedImmPtr(newStructure), Address(regT0, JSCell::structureOffset()));
compilePutDirectOffset(regT0, regT1, cachedOffset);
restoreArgumentReferenceForTrampoline();
Call failureCall = tailRecursiveCall();
- LinkBuffer patchBuffer(*m_globalData, this);
+ LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
patchBuffer.link(failureCall, FunctionPtr(direct ? cti_op_put_by_id_direct_fail : cti_op_put_by_id_fail));
int offset = sizeof(JSValue) * cachedOffset;
// Patch the offset into the propoerty map to load from, then patch the Structure to look for.
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure), structure);
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(patchOffsetGetByIdPropertyMapOffset), offset);
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.get.structureToCompare), structure);
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(stubInfo->patch.baseline.u.get.displacementLabel), offset);
}
void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress, bool direct)
int offset = sizeof(JSValue) * cachedOffset;
// Patch the offset into the propoerty map to load from, then patch the Structure to look for.
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure), structure);
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset), offset);
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.put.structureToCompare), structure);
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(stubInfo->patch.baseline.u.put.displacementLabel), offset);
}
void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);
// Check eax is an array
- Jump failureCases1 = branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsArrayVPtr));
+ Jump failureCases1 = branchPtr(NotEqual, Address(regT0, JSCell::classInfoOffset()), TrustedImmPtr(&JSArray::s_info));
// Checks out okay! - get the length from the storage
loadPtr(Address(regT0, JSArray::storageOffset()), regT3);
emitFastArithIntToImmNoCheck(regT2, regT0);
Jump success = jump();
- LinkBuffer patchBuffer(*m_globalData, this);
+ LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
// Use the patch information to link the failure cases back to the original slow case routine.
- CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
+ CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin);
patchBuffer.link(failureCases1, slowCaseBegin);
patchBuffer.link(failureCases2, slowCaseBegin);
// On success return back to the hot patch code, at a point it will perform the store to dest for us.
- patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
// Track the stub we have created so that it will be deleted later.
stubInfo->stubRoutine = patchBuffer.finalizeCode();
// Finally patch the jump to slow case back in the hot path to jump here instead.
- CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
RepatchBuffer repatchBuffer(m_codeBlock);
repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine.code()));
} else
compileGetDirectOffset(protoObject, regT0, cachedOffset);
Jump success = jump();
- LinkBuffer patchBuffer(*m_globalData, this);
+ LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
// Use the patch information to link the failure cases back to the original slow case routine.
- CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
+ CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin);
patchBuffer.link(failureCases1, slowCaseBegin);
patchBuffer.link(failureCases2, slowCaseBegin);
// On success return back to the hot patch code, at a point it will perform the store to dest for us.
- patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
if (needsStubLink) {
for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
stubInfo->stubRoutine = patchBuffer.finalizeCode();
// Finally patch the jump to slow case back in the hot path to jump here instead.
- CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
RepatchBuffer repatchBuffer(m_codeBlock);
repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine.code()));
}
Jump success = jump();
- LinkBuffer patchBuffer(*m_globalData, this);
+ LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
if (needsStubLink) {
for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
// Use the patch information to link the failure cases back to the original slow case routine.
CodeLocationLabel lastProtoBegin = CodeLocationLabel(polymorphicStructures->list[currentIndex - 1].stubRoutine.code());
if (!lastProtoBegin)
- lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
+ lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin);
patchBuffer.link(failureCase, lastProtoBegin);
// On success return back to the hot patch code, at a point it will perform the store to dest for us.
- patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
MacroAssemblerCodeRef stubCode = patchBuffer.finalizeCode();
polymorphicStructures->list[currentIndex].set(*m_globalData, m_codeBlock->ownerExecutable(), stubCode, structure, isDirect);
// Finally patch the jump to slow case back in the hot path to jump here instead.
- CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
RepatchBuffer repatchBuffer(m_codeBlock);
repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubCode.code()));
}
Jump success = jump();
- LinkBuffer patchBuffer(*m_globalData, this);
+ LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
if (needsStubLink) {
for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
patchBuffer.link(failureCases2, lastProtoBegin);
// On success return back to the hot patch code, at a point it will perform the store to dest for us.
- patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
MacroAssemblerCodeRef stubCode = patchBuffer.finalizeCode();
prototypeStructures->list[currentIndex].set(*m_globalData, m_codeBlock->ownerExecutable(), stubCode, structure, prototypeStructure, isDirect);
// Finally patch the jump to slow case back in the hot path to jump here instead.
- CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
RepatchBuffer repatchBuffer(m_codeBlock);
repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubCode.code()));
}
}
Jump success = jump();
- LinkBuffer patchBuffer(*m_globalData, this);
+ LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
if (needsStubLink) {
for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
patchBuffer.link(bucketsOfFail, lastProtoBegin);
// On success return back to the hot patch code, at a point it will perform the store to dest for us.
- patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
CodeRef stubRoutine = patchBuffer.finalizeCode();
prototypeStructures->list[currentIndex].set(callFrame->globalData(), m_codeBlock->ownerExecutable(), stubRoutine, structure, chain, isDirect);
// Finally patch the jump to slow case back in the hot path to jump here instead.
- CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
RepatchBuffer repatchBuffer(m_codeBlock);
repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code()));
}
compileGetDirectOffset(protoObject, regT0, cachedOffset);
Jump success = jump();
- LinkBuffer patchBuffer(*m_globalData, this);
+ LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
if (needsStubLink) {
for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
}
// Use the patch information to link the failure cases back to the original slow case routine.
- patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall));
+ patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin));
// On success return back to the hot patch code, at a point it will perform the store to dest for us.
- patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
// Track the stub we have created so that it will be deleted later.
CodeRef stubRoutine = patchBuffer.finalizeCode();
stubInfo->stubRoutine = stubRoutine;
// Finally patch the jump to slow case back in the hot path to jump here instead.
- CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
RepatchBuffer repatchBuffer(m_codeBlock);
repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code()));
loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, object)), regT0);
loadPtr(Address(regT0, JSVariableObject::offsetOfRegisters()), regT0);
loadPtr(Address(regT0, currentInstruction[2].u.operand * sizeof(Register)), regT0);
- emitValueProfilingSite(FirstProfilingSite);
+ emitValueProfilingSite();
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
JSVariableObject* globalObject = m_codeBlock->globalObject();
loadPtr(&globalObject->m_registers, regT0);
loadPtr(Address(regT0, currentInstruction[2].u.operand * sizeof(Register)), regT0);
- emitValueProfilingSite(FirstProfilingSite);
+ emitValueProfilingSite();
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
emitWriteBarrier(globalObject, regT0, regT2, ShouldFilterImmediates, WriteBarrierForVariableAccess);
}
+void JIT::resetPatchGetById(RepatchBuffer& repatchBuffer, StructureStubInfo* stubInfo)
+{
+ repatchBuffer.relink(stubInfo->callReturnLocation, cti_op_get_by_id);
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.get.structureToCompare), reinterpret_cast<void*>(-1));
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(stubInfo->patch.baseline.u.get.displacementLabel), 0);
+ repatchBuffer.relink(stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck), stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin));
+}
+
+void JIT::resetPatchPutById(RepatchBuffer& repatchBuffer, StructureStubInfo* stubInfo)
+{
+ if (isDirectPutById(stubInfo))
+ repatchBuffer.relink(stubInfo->callReturnLocation, cti_op_put_by_id_direct);
+ else
+ repatchBuffer.relink(stubInfo->callReturnLocation, cti_op_put_by_id);
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.put.structureToCompare), reinterpret_cast<void*>(-1));
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(stubInfo->patch.baseline.u.put.displacementLabel), 0);
+}
+
#endif // USE(JSVALUE64)
void JIT::emitWriteBarrier(RegisterID owner, RegisterID value, RegisterID scratch, RegisterID scratch2, WriteBarrierMode mode, WriteBarrierUseKind useKind)
failureCases.append(branchPtr(NotEqual, Address(regT3, JSCell::structureOffset()), TrustedImmPtr(prototype.asCell()->structure())));
}
-void JIT::patchMethodCallProto(JSGlobalData& globalData, CodeBlock* codeBlock, MethodCallLinkInfo& methodCallLinkInfo, JSObject* callee, Structure* structure, JSObject* proto, ReturnAddressPtr returnAddress)
+void JIT::patchMethodCallProto(JSGlobalData& globalData, CodeBlock* codeBlock, MethodCallLinkInfo& methodCallLinkInfo, StructureStubInfo& stubInfo, JSObject* callee, Structure* structure, JSObject* proto, ReturnAddressPtr returnAddress)
{
RepatchBuffer repatchBuffer(codeBlock);
methodCallLinkInfo.cachedStructure.set(globalData, structureLocation, codeBlock->ownerExecutable(), structure);
Structure* prototypeStructure = proto->structure();
- methodCallLinkInfo.cachedPrototypeStructure.set(globalData, structureLocation.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoStruct), codeBlock->ownerExecutable(), prototypeStructure);
- methodCallLinkInfo.cachedPrototype.set(globalData, structureLocation.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoObj), codeBlock->ownerExecutable(), proto);
- methodCallLinkInfo.cachedFunction.set(globalData, structureLocation.dataLabelPtrAtOffset(patchOffsetMethodCheckPutFunction), codeBlock->ownerExecutable(), callee);
+ methodCallLinkInfo.cachedPrototypeStructure.set(globalData, structureLocation.dataLabelPtrAtOffset(stubInfo.patch.baseline.methodCheckProtoStructureToCompare), codeBlock->ownerExecutable(), prototypeStructure);
+ methodCallLinkInfo.cachedPrototype.set(globalData, structureLocation.dataLabelPtrAtOffset(stubInfo.patch.baseline.methodCheckProtoObj), codeBlock->ownerExecutable(), proto);
+ methodCallLinkInfo.cachedFunction.set(globalData, structureLocation.dataLabelPtrAtOffset(stubInfo.patch.baseline.methodCheckPutFunction), codeBlock->ownerExecutable(), callee);
repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_method_check_update));
}
+bool JIT::isDirectPutById(StructureStubInfo* stubInfo)
+{
+ switch (stubInfo->accessType) {
+ case access_put_by_id_transition_normal:
+ return false;
+ case access_put_by_id_transition_direct:
+ return true;
+ case access_put_by_id_replace:
+ case access_put_by_id_generic: {
+ void* oldCall = MacroAssembler::readCallTarget(stubInfo->callReturnLocation).executableAddress();
+ if (oldCall == bitwise_cast<void*>(cti_op_put_by_id_direct)
+ || oldCall == bitwise_cast<void*>(cti_op_put_by_id_direct_generic)
+ || oldCall == bitwise_cast<void*>(cti_op_put_by_id_direct_fail))
+ return true;
+ ASSERT(oldCall == bitwise_cast<void*>(cti_op_put_by_id)
+ || oldCall == bitwise_cast<void*>(cti_op_put_by_id_generic)
+ || oldCall == bitwise_cast<void*>(cti_op_put_by_id_fail));
+ return false;
+ }
+ default:
+ ASSERT_NOT_REACHED();
+ return false;
+ }
+}
+
} // namespace JSC
#endif // ENABLE(JIT)