+2011-09-23 Oliver Hunt <oliver@apple.com>
+
+ Make write barriers actually do something when enabled
+ https://bugs.webkit.org/show_bug.cgi?id=68717
+
+ Reviewed by Geoffrey Garen.
+
+ Add a basic card marking style write barrier to JSC (currently
+ turned off). This requires two scratch registers in the JIT
+ so there was some register re-arranging to satisfy that requirement.
+ Happily this produced a minor perf bump in sunspider (~0.5%).
+
+ Turning the barriers on causes an overall regression of around 1.5%
+
+ * JavaScriptCore.exp:
+ * JavaScriptCore.xcodeproj/project.pbxproj:
+ * assembler/MacroAssemblerX86Common.h:
+ (JSC::MacroAssemblerX86Common::store8):
+ * assembler/X86Assembler.h:
+ (JSC::X86Assembler::movb_i8m):
+ * dfg/DFGJITCodeGenerator.cpp:
+ (JSC::DFG::JITCodeGenerator::isKnownNotCell):
+ (JSC::DFG::JITCodeGenerator::writeBarrier):
+ (JSC::DFG::JITCodeGenerator::markCellCard):
+ (JSC::DFG::JITCodeGenerator::cachedPutById):
+ * dfg/DFGJITCodeGenerator.h:
+ * dfg/DFGRepatch.cpp:
+ (JSC::DFG::tryCachePutByID):
+ * dfg/DFGSpeculativeJIT.cpp:
+ (JSC::DFG::SpeculativeJIT::compile):
+ * heap/CardSet.h: Added.
+ (JSC::CardSet::CardSet):
+ (JSC::::cardForAtom):
+ (JSC::::cardMarkedForAtom):
+ (JSC::::markCardForAtom):
+ * heap/Heap.cpp:
+ * heap/Heap.h:
+ (JSC::Heap::addressOfCardFor):
+ (JSC::Heap::writeBarrierFastCase):
+ * heap/MarkedBlock.h:
+ (JSC::MarkedBlock::setDirtyObject):
+ (JSC::MarkedBlock::addressOfCardFor):
+ (JSC::MarkedBlock::offsetOfCards):
+ * jit/JIT.h:
+ * jit/JITPropertyAccess.cpp:
+ (JSC::JIT::emit_op_put_by_val):
+ (JSC::JIT::emit_op_put_by_id):
+ (JSC::JIT::privateCompilePutByIdTransition):
+ (JSC::JIT::emit_op_put_scoped_var):
+ (JSC::JIT::emit_op_put_global_var):
+ (JSC::JIT::emitWriteBarrier):
+ * jit/JITPropertyAccess32_64.cpp:
+ (JSC::JIT::emit_op_put_by_val):
+ (JSC::JIT::emit_op_put_by_id):
+ (JSC::JIT::emitSlow_op_put_by_id):
+ (JSC::JIT::privateCompilePutByIdTransition):
+ (JSC::JIT::emit_op_put_scoped_var):
+ (JSC::JIT::emit_op_put_global_var):
+
2011-09-23 Thouraya ANDOLSI <thouraya.andolsi@st.com>
https://bugs.webkit.org/show_bug.cgi?id=68077
__ZN3JSC4Heap17isValidAllocationEm
__ZN3JSC4Heap19setActivityCallbackEN3WTF10PassOwnPtrINS_18GCActivityCallbackEEE
__ZN3JSC4Heap20protectedObjectCountEv
-__ZN3JSC4Heap20writeBarrierSlowCaseEPKNS_6JSCellEPS1_
__ZN3JSC4Heap25protectedObjectTypeCountsEv
__ZN3JSC4Heap26protectedGlobalObjectCountEv
__ZN3JSC4Heap29reportExtraMemoryCostSlowCaseEm
A7482B9411671147003B0712 /* JSWeakObjectMapRefPrivate.cpp in Sources */ = {isa = PBXBuildFile; fileRef = A7482B7A1166CDEA003B0712 /* JSWeakObjectMapRefPrivate.cpp */; };
A7482E93116A7CAD003B0712 /* JSWeakObjectMapRefInternal.h in Headers */ = {isa = PBXBuildFile; fileRef = A7482E37116A697B003B0712 /* JSWeakObjectMapRefInternal.h */; settings = {ATTRIBUTES = (Private, ); }; };
A74DE1D0120B875600D40D5B /* ARMv7Assembler.cpp in Sources */ = {isa = PBXBuildFile; fileRef = A74DE1CB120B86D600D40D5B /* ARMv7Assembler.cpp */; };
+ A7521E131429169A003C8D0C /* CardSet.h in Headers */ = {isa = PBXBuildFile; fileRef = A7521E121429169A003C8D0C /* CardSet.h */; settings = {ATTRIBUTES = (); }; };
A75706DE118A2BCF0057F88F /* JITArithmetic32_64.cpp in Sources */ = {isa = PBXBuildFile; fileRef = A75706DD118A2BCF0057F88F /* JITArithmetic32_64.cpp */; };
A766B44F0EE8DCD1009518CA /* ExecutableAllocator.h in Headers */ = {isa = PBXBuildFile; fileRef = A7B48DB50EE74CFC00DCBDB6 /* ExecutableAllocator.h */; settings = {ATTRIBUTES = (Private, ); }; };
A76C51761182748D00715B05 /* JSInterfaceJIT.h in Headers */ = {isa = PBXBuildFile; fileRef = A76C51741182748D00715B05 /* JSInterfaceJIT.h */; };
A7482B7A1166CDEA003B0712 /* JSWeakObjectMapRefPrivate.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = JSWeakObjectMapRefPrivate.cpp; sourceTree = "<group>"; };
A7482E37116A697B003B0712 /* JSWeakObjectMapRefInternal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JSWeakObjectMapRefInternal.h; sourceTree = "<group>"; };
A74DE1CB120B86D600D40D5B /* ARMv7Assembler.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ARMv7Assembler.cpp; sourceTree = "<group>"; };
+ A7521E121429169A003C8D0C /* CardSet.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CardSet.h; sourceTree = "<group>"; };
A75706DD118A2BCF0057F88F /* JITArithmetic32_64.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = JITArithmetic32_64.cpp; sourceTree = "<group>"; };
A76C51741182748D00715B05 /* JSInterfaceJIT.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JSInterfaceJIT.h; sourceTree = "<group>"; };
A76F54A213B28AAB00EF2BCE /* JITWriteBarrier.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JITWriteBarrier.h; sourceTree = "<group>"; };
children = (
0FD82F281426CA5A00179C94 /* JettisonedCodeBlocks.cpp */,
0FD82F291426CA5A00179C94 /* JettisonedCodeBlocks.h */,
- A70456AF1427FB150037DA68 /* AllocationSpace.h */,
A70456AE1427FB030037DA68 /* AllocationSpace.cpp */,
- 0F242DA513F3B1BB007ADD4C /* WeakReferenceHarvester.h */,
- 0FC815141405118D00CFA603 /* VTableSpectrum.h */,
- 0FC815121405118600CFA603 /* VTableSpectrum.cpp */,
- 0FC8150914043BD200CFA603 /* WriteBarrierSupport.h */,
- 0FC8150814043BCA00CFA603 /* WriteBarrierSupport.cpp */,
+ A70456AF1427FB150037DA68 /* AllocationSpace.h */,
+ A7521E121429169A003C8D0C /* CardSet.h */,
146B14DB12EB5B12001BEC1B /* ConservativeRoots.cpp */,
149DAAF212EB559D0083B12B /* ConservativeRoots.h */,
142E312B134FF0A600AFADB5 /* Handle.h */,
14BA78F013AAB88F005B7C2C /* SlotVisitor.h */,
142E3132134FF0A600AFADB5 /* Strong.h */,
141448CC13A1783700F5BA1A /* TinyBloomFilter.h */,
+ 0FC815121405118600CFA603 /* VTableSpectrum.cpp */,
+ 0FC815141405118D00CFA603 /* VTableSpectrum.h */,
142E3133134FF0A600AFADB5 /* Weak.h */,
+ 0F242DA513F3B1BB007ADD4C /* WeakReferenceHarvester.h */,
+ 0FC8150814043BCA00CFA603 /* WriteBarrierSupport.cpp */,
+ 0FC8150914043BD200CFA603 /* WriteBarrierSupport.h */,
);
path = heap;
sourceTree = "<group>";
1A08277A142168D70090CCAC /* BinarySemaphore.h in Headers */,
A70456B01427FB910037DA68 /* AllocationSpace.h in Headers */,
86FA9E92142BBB2E001773B7 /* JSBoundFunction.h in Headers */,
+ A7521E131429169A003C8D0C /* CardSet.h in Headers */,
);
runOnlyForDeploymentPostprocessing = 0;
};
{
m_assembler.movl_i32m(imm.m_value, address.offset, address.base);
}
+
+ void store8(TrustedImm32 imm, Address address)
+ {
+ ASSERT(-128 <= imm.m_value && imm.m_value < 128);
+ m_assembler.movb_i8m(imm.m_value, address.offset, address.base);
+ }
+
+ void store8(TrustedImm32 imm, BaseIndex address)
+ {
+ ASSERT(-128 <= imm.m_value && imm.m_value < 128);
+ m_assembler.movb_i8m(imm.m_value, address.offset, address.base, address.index, address.scale);
+ }
// Floating-point operation:
OP_MOV_EAXIv = 0xB8,
OP_GROUP2_EvIb = 0xC1,
OP_RET = 0xC3,
+ OP_GROUP11_EvIb = 0xC6,
OP_GROUP11_EvIz = 0xC7,
OP_INT3 = 0xCC,
OP_GROUP2_Ev1 = 0xD1,
m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, base, offset);
m_formatter.immediate32(imm);
}
+
+ void movb_i8m(int imm, int offset, RegisterID base)
+ {
+ ASSERT(-128 <= imm && imm < 128);
+ m_formatter.oneByteOp(OP_GROUP11_EvIb, GROUP11_MOV, base, offset);
+ m_formatter.immediate8(imm);
+ }
+
+ void movb_i8m(int imm, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ ASSERT(-128 <= imm && imm < 128);
+ m_formatter.oneByteOp(OP_GROUP11_EvIb, GROUP11_MOV, base, index, scale, offset);
+ m_formatter.immediate8(imm);
+ }
void movl_EAXm(const void* addr)
{
return m_generationInfo[m_jit.graph()[nodeIndex].virtualRegister()].isJSCell();
}
+bool JITCodeGenerator::isKnownNotCell(NodeIndex nodeIndex)
+{
+ Node& node = m_jit.graph()[nodeIndex];
+ VirtualRegister virtualRegister = node.virtualRegister();
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+ if (node.hasConstant() && !valueOfJSConstant(nodeIndex).isCell())
+ return true;
+ return !(info.isJSCell() || info.isUnknownJS());
+}
+
bool JITCodeGenerator::isKnownNotInteger(NodeIndex nodeIndex)
{
Node& node = m_jit.graph()[nodeIndex];
return functionCall;
}
-void JITCodeGenerator::writeBarrier(MacroAssembler& jit, GPRReg owner, GPRReg scratch, WriteBarrierUseKind useKind)
+void JITCodeGenerator::writeBarrier(MacroAssembler& jit, GPRReg owner, GPRReg scratch1, GPRReg scratch2, WriteBarrierUseKind useKind)
{
UNUSED_PARAM(jit);
UNUSED_PARAM(owner);
+ UNUSED_PARAM(scratch1);
+ UNUSED_PARAM(scratch2);
+ UNUSED_PARAM(useKind);
+ ASSERT(owner != scratch1);
+ ASSERT(owner != scratch2);
+ ASSERT(scratch1 != scratch2);
+
+#if ENABLE(WRITE_BARRIER_PROFILING)
+ JITCompiler::emitCount(jit, WriteBarrierCounters::jitCounterFor(useKind));
+#endif
+ markCellCard(jit, owner, scratch1, scratch2);
+}
+
+void JITCodeGenerator::markCellCard(MacroAssembler& jit, GPRReg owner, GPRReg scratch1, GPRReg scratch2)
+{
+ UNUSED_PARAM(jit);
+ UNUSED_PARAM(owner);
+ UNUSED_PARAM(scratch1);
+ UNUSED_PARAM(scratch2);
+
+#if ENABLE(GGC)
+ jit.move(owner, scratch1);
+ jit.andPtr(TrustedImm32(static_cast<int32_t>(MarkedBlock::blockMask)), scratch1);
+ jit.move(owner, scratch2);
+ jit.andPtr(TrustedImm32(static_cast<int32_t>(~MarkedBlock::blockMask)), scratch2);
+ jit.rshift32(TrustedImm32(MarkedBlock::log2CardSize), scratch2);
+ jit.store8(TrustedImm32(1), MacroAssembler::BaseIndex(scratch1, scratch2, MacroAssembler::TimesOne, MarkedBlock::offsetOfCards()));
+#endif
+}
+
+void JITCodeGenerator::writeBarrier(GPRReg ownerGPR, GPRReg valueGPR, NodeIndex valueIndex, WriteBarrierUseKind useKind, GPRReg scratch1, GPRReg scratch2)
+{
+ UNUSED_PARAM(ownerGPR);
+ UNUSED_PARAM(valueGPR);
+ UNUSED_PARAM(scratch1);
+ UNUSED_PARAM(scratch2);
+ UNUSED_PARAM(useKind);
+
+ if (isKnownNotCell(valueIndex))
+ return;
+
+#if ENABLE(WRITE_BARRIER_PROFILING)
+ JITCompiler::emitCount(jit, WriteBarrierCounters::jitCounterFor(useKind));
+#endif
+
+#if ENABLE(GGC)
+ JITCompiler::Jump rhsNotCell;
+ bool hadCellCheck = false;
+ if (!isKnownCell(valueIndex) && !isCellPrediction(m_jit.graph().getPrediction(m_jit.graph()[valueIndex]))) {
+ hadCellCheck = true;
+ rhsNotCell = m_jit.branchTestPtr(MacroAssembler::NonZero, valueGPR, GPRInfo::tagMaskRegister);
+ }
+
+ GPRTemporary temp1;
+ GPRTemporary temp2;
+ if (scratch1 == InvalidGPRReg) {
+ GPRTemporary scratchGPR(this);
+ temp1.adopt(scratchGPR);
+ scratch1 = temp1.gpr();
+ }
+ if (scratch2 == InvalidGPRReg) {
+ GPRTemporary scratchGPR(this);
+ temp2.adopt(scratchGPR);
+ scratch2 = temp2.gpr();
+ }
+
+ markCellCard(m_jit, ownerGPR, scratch1, scratch2);
+ if (hadCellCheck)
+ rhsNotCell.link(&m_jit);
+#endif
+}
+
+void JITCodeGenerator::writeBarrier(JSCell* owner, GPRReg valueGPR, NodeIndex valueIndex, WriteBarrierUseKind useKind, GPRReg scratch)
+{
+ UNUSED_PARAM(owner);
+ UNUSED_PARAM(valueGPR);
UNUSED_PARAM(scratch);
UNUSED_PARAM(useKind);
- ASSERT(owner != scratch);
+
+ if (isKnownNotCell(valueIndex))
+ return;
#if ENABLE(WRITE_BARRIER_PROFILING)
JITCompiler::emitCount(jit, WriteBarrierCounters::jitCounterFor(useKind));
#endif
+
+#if ENABLE(GGC)
+ JITCompiler::Jump rhsNotCell;
+ bool hadCellCheck = false;
+ if (!isKnownCell(valueIndex) && !isCellPrediction(m_jit.graph().getPrediction(m_jit.graph()[valueIndex]))) {
+ hadCellCheck = true;
+ rhsNotCell = m_jit.branchTestPtr(MacroAssembler::NonZero, valueGPR, GPRInfo::tagMaskRegister);
+ }
+
+ GPRTemporary temp;
+ if (scratch == InvalidGPRReg) {
+ GPRTemporary scratchGPR(this);
+ temp.adopt(scratchGPR);
+ scratch = temp.gpr();
+ }
+
+ uint8_t* cardAddress = Heap::addressOfCardFor(owner);
+ m_jit.move(JITCompiler::TrustedImmPtr(cardAddress), scratch);
+ m_jit.store8(JITCompiler::TrustedImm32(1), JITCompiler::Address(scratch));
+
+ if (hadCellCheck)
+ rhsNotCell.link(&m_jit);
+#endif
}
-void JITCodeGenerator::cachedPutById(GPRReg baseGPR, GPRReg valueGPR, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget)
+void JITCodeGenerator::cachedPutById(GPRReg baseGPR, GPRReg valueGPR, NodeIndex valueIndex, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget)
{
+
JITCompiler::DataLabelPtr structureToCompare;
JITCompiler::Jump structureCheck = m_jit.branchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(baseGPR, JSCell::structureOffset()), structureToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(-1)));
-
- writeBarrier(m_jit, baseGPR, scratchGPR, WriteBarrierForPropertyAccess);
+
+ writeBarrier(baseGPR, valueGPR, valueIndex, WriteBarrierForPropertyAccess, scratchGPR);
m_jit.loadPtr(JITCompiler::Address(baseGPR, JSObject::offsetOfPropertyStorage()), scratchGPR);
JITCompiler::DataLabel32 storeWithPatch = m_jit.storePtrWithAddressOffsetPatch(valueGPR, JITCompiler::Address(scratchGPR, 0));
}
#endif
+GPRTemporary::GPRTemporary()
+ : m_jit(0)
+ , m_gpr(InvalidGPRReg)
+{
+}
+
GPRTemporary::GPRTemporary(JITCodeGenerator* jit)
: m_jit(jit)
, m_gpr(InvalidGPRReg)
m_gpr = m_jit->allocate();
}
+void GPRTemporary::adopt(GPRTemporary& other)
+{
+ ASSERT(!m_jit);
+ ASSERT(m_gpr == InvalidGPRReg);
+ ASSERT(other.m_jit);
+ ASSERT(other.m_gpr != InvalidGPRReg);
+ m_jit = other.m_jit;
+ m_gpr = other.m_gpr;
+ other.m_jit = 0;
+ other.m_gpr = InvalidGPRReg;
+}
+
FPRTemporary::FPRTemporary(JITCodeGenerator* jit)
: m_jit(jit)
, m_fpr(InvalidFPRReg)
m_gprs.release(info.gpr());
}
- static void writeBarrier(MacroAssembler&, GPRReg ownerGPR, GPRReg scratchGPR, WriteBarrierUseKind);
+ static void markCellCard(MacroAssembler&, GPRReg ownerGPR, GPRReg scratchGPR1, GPRReg scratchGPR2);
+ static void writeBarrier(MacroAssembler&, GPRReg ownerGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, WriteBarrierUseKind);
+
+ void writeBarrier(GPRReg ownerGPR, GPRReg valueGPR, NodeIndex valueIndex, WriteBarrierUseKind, GPRReg scratchGPR1 = InvalidGPRReg, GPRReg scratchGPR2 = InvalidGPRReg);
+ void writeBarrier(JSCell* owner, GPRReg valueGPR, NodeIndex valueIndex, WriteBarrierUseKind, GPRReg scratchGPR1 = InvalidGPRReg);
static GPRReg selectScratchGPR(GPRReg preserve1 = InvalidGPRReg, GPRReg preserve2 = InvalidGPRReg, GPRReg preserve3 = InvalidGPRReg)
{
bool isKnownNotNumber(NodeIndex);
bool isKnownBoolean(NodeIndex);
+
+ bool isKnownNotCell(NodeIndex);
// Checks/accessors for constant values.
bool isConstant(NodeIndex nodeIndex) { return m_jit.isConstant(nodeIndex); }
void nonSpeculativeInstanceOf(Node&);
JITCompiler::Call cachedGetById(GPRReg baseGPR, GPRReg resultGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), NodeType = GetById);
- void cachedPutById(GPRReg baseGPR, GPRReg valueGPR, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump());
+ void cachedPutById(GPRReg base, GPRReg value, NodeIndex valueIndex, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump());
void cachedGetMethod(GPRReg baseGPR, GPRReg resultGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump());
void nonSpeculativeNonPeepholeCompareNull(NodeIndex operand, bool invert = false);
class GPRTemporary {
public:
+ GPRTemporary();
GPRTemporary(JITCodeGenerator*);
GPRTemporary(JITCodeGenerator*, GPRReg specific);
GPRTemporary(JITCodeGenerator*, SpeculateIntegerOperand&);
GPRTemporary(JITCodeGenerator*, JSValueOperand&);
GPRTemporary(JITCodeGenerator*, StorageOperand&);
+ void adopt(GPRTemporary&);
+
~GPRTemporary()
{
- m_jit->unlock(gpr());
+ if (m_jit)
+ m_jit->unlock(gpr());
}
GPRReg gpr()
for (WriteBarrier<Structure>* it = prototypeChain->head(); *it; ++it)
testPrototype(stubJit, scratchGPR, (*it)->storedPrototype(), failureCases);
}
-
- JITCodeGenerator::writeBarrier(stubJit, baseGPR, scratchGPR, WriteBarrierForPropertyAccess);
-
+
+#if ENABLE(GGC) || ENABLE(WRITE_BARRIER_PROFILING)
+ // Must always emit this write barrier as the structure transition itself requires it
+ GPRReg scratch2 = JITCodeGenerator::selectScratchGPR(baseGPR, valueGPR, scratchGPR);
+ stubJit.push(scratch2);
+ JITCodeGenerator::writeBarrier(stubJit, baseGPR, scratchGPR, scratch2, WriteBarrierForPropertyAccess);
+ stubJit.pop(scratch2);
+#endif
+
stubJit.storePtr(MacroAssembler::TrustedImmPtr(structure), MacroAssembler::Address(baseGPR, JSCell::structureOffset()));
if (structure->isUsingInlineStorage())
stubJit.storePtr(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + slot.cachedOffset() * sizeof(JSValue)));
*/
#include "config.h"
+
#include "DFGSpeculativeJIT.h"
#if ENABLE(DFG_JIT)
if (!m_compileOkay)
return;
-
- writeBarrier(m_jit, baseReg, scratchReg, WriteBarrierForPropertyAccess);
+
+ writeBarrier(baseReg, value.gpr(), node.child3(), WriteBarrierForPropertyAccess, scratchReg);
// Check that base is an array, and that property is contained within m_vector (< m_vectorLength).
// If we have predicted the base to be type array, we can skip the check.
GPRReg baseReg = base.gpr();
GPRReg scratchReg = scratch.gpr();
- writeBarrier(m_jit, baseReg, scratchReg, WriteBarrierForPropertyAccess);
+ writeBarrier(base.gpr(), value.gpr(), node.child3(), WriteBarrierForPropertyAccess, scratchReg);
// Get the array storage.
GPRReg storageReg = scratchReg;
m_jit.loadPtr(JITCompiler::Address(scopeChain.gpr(), JSVariableObject::offsetOfRegisters()), scratchGPR);
JSValueOperand value(this, node.child2());
m_jit.storePtr(value.gpr(), JITCompiler::Address(scratchGPR, node.varNumber() * sizeof(Register)));
- writeBarrier(m_jit, scopeChain.gpr(), scratchGPR, WriteBarrierForVariableAccess);
+ writeBarrier(scopeChain.gpr(), value.gpr(), node.child2(), WriteBarrierForVariableAccess, scratchGPR);
break;
}
case GetById: {
base.use();
value.use();
- cachedPutById(baseGPR, valueGPR, scratchGPR, node.identifierNumber(), NotDirect);
+ cachedPutById(baseGPR, valueGPR, node.child2(), scratchGPR, node.identifierNumber(), NotDirect);
noResult(m_compileIndex, UseChildrenCalledExplicitly);
break;
base.use();
value.use();
- cachedPutById(baseGPR, valueGPR, scratchGPR, node.identifierNumber(), Direct);
+ cachedPutById(baseGPR, valueGPR, node.child2(), scratchGPR, node.identifierNumber(), Direct);
noResult(m_compileIndex, UseChildrenCalledExplicitly);
break;
m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.codeBlock()->globalObject()), globalObjectReg);
- writeBarrier(m_jit, globalObjectReg, scratchReg, WriteBarrierForVariableAccess);
+ writeBarrier(m_jit.codeBlock()->globalObject(), value.gpr(), node.child1(), WriteBarrierForVariableAccess, scratchReg);
m_jit.loadPtr(MacroAssembler::Address(globalObjectReg, JSVariableObject::offsetOfRegisters()), scratchReg);
m_jit.storePtr(value.gpr(), JITCompiler::addressForGlobalVar(scratchReg, node.varNumber()));
--- /dev/null
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef CardSet_h
+#define CardSet_h
+
+#include <stdint.h>
+#include <wtf/Assertions.h>
+#include <wtf/Noncopyable.h>
+
+namespace JSC {
+
+template <size_t cardSize, size_t blockSize> class CardSet {
+ WTF_MAKE_NONCOPYABLE(CardSet);
+ static const size_t cardCount = (blockSize + cardSize - 1) / cardSize;
+
+public:
+ CardSet()
+ {
+ memset(m_cards, 0, cardCount);
+ }
+
+ bool isCardMarkedForAtom(const void*);
+ void markCardForAtom(const void*);
+ uint8_t& cardForAtom(const void*);
+
+private:
+ uint8_t m_cards[cardCount];
+ COMPILE_ASSERT(!(cardSize & (cardSize - 1)), cardSet_cardSize_is_power_of_two);
+ COMPILE_ASSERT(!(cardCount & (cardCount - 1)), cardSet_cardCount_is_power_of_two);
+};
+
+template <size_t cardSize, size_t blockSize> uint8_t& CardSet<cardSize, blockSize>::cardForAtom(const void* ptr)
+{
+ ASSERT(ptr > this && ptr < (reinterpret_cast<char*>(this) + cardCount * cardSize));
+ uintptr_t card = (reinterpret_cast<uintptr_t>(ptr) / cardSize) % cardCount;
+ return m_cards[card];
+}
+
+template <size_t cardSize, size_t blockSize> bool CardSet<cardSize, blockSize>::isCardMarkedForAtom(const void* ptr)
+{
+ return cardForAtom(ptr);
+}
+
+template <size_t cardSize, size_t blockSize> void CardSet<cardSize, blockSize>::markCardForAtom(const void* ptr)
+{
+ cardForAtom(ptr) = 1;
+}
+
+}
+
+#endif
}
}
-#if ENABLE(GGC)
-void Heap::writeBarrierSlowCase(const JSCell* owner, JSCell* cell)
-{
-}
-
-#else
-
-void Heap::writeBarrierSlowCase(const JSCell*, JSCell*)
-{
-}
-#endif
-
} // namespace JSC
static void writeBarrier(const JSCell*, JSValue);
static void writeBarrier(const JSCell*, JSCell*);
+ static uint8_t* addressOfCardFor(JSCell*);
Heap(JSGlobalData*, HeapSize);
~Heap();
static const size_t minExtraCost = 256;
static const size_t maxExtraCost = 1024 * 1024;
-
-#if ENABLE(GGC)
- static void writeBarrierFastCase(const JSCell* owner, JSCell*);
-#endif
bool isValidAllocation(size_t);
void reportExtraMemoryCostSlowCase(size_t);
RegisterFile& registerFile();
- static void writeBarrierSlowCase(const JSCell*, JSCell*);
-
void waitForRelativeTimeWhileHoldingLock(double relative);
void waitForRelativeTime(double relative);
void blockFreeingThreadMain();
}
#if ENABLE(GGC)
- inline void Heap::writeBarrierFastCase(const JSCell* owner, JSCell* cell)
+ inline uint8_t* Heap::addressOfCardFor(JSCell* cell)
{
- if (MarkedBlock::blockFor(owner)->inNewSpace())
- return;
- writeBarrierSlowCase(owner, cell);
+ return MarkedBlock::blockFor(cell)->addressOfCardFor(cell);
}
- inline void Heap::writeBarrier(const JSCell* owner, JSCell* cell)
+ inline void Heap::writeBarrier(const JSCell* owner, JSCell*)
{
WriteBarrierCounters::countWriteBarrier();
- writeBarrierFastCase(owner, cell);
+ MarkedBlock::blockFor(owner)->setDirtyObject(owner);
}
inline void Heap::writeBarrier(const JSCell* owner, JSValue value)
{
- WriteBarrierCounters::countWriteBarrier();
if (!value)
return;
if (!value.isCell())
return;
- writeBarrierFastCase(owner, value.asCell());
+ writeBarrier(owner, value.asCell());
}
#else
#ifndef MarkedBlock_h
#define MarkedBlock_h
+#include "CardSet.h"
+
#include <wtf/Bitmap.h>
#include <wtf/DoublyLinkedList.h>
#include <wtf/HashFunctions.h>
// object the heap will commonly allocate is four words.
static const size_t atomSize = 4 * sizeof(void*);
static const size_t blockSize = 16 * KB;
- static const size_t atomsPerBlock = blockSize / atomSize; // ~0.4% overhead for mark bits.
+ static const size_t blockMask = ~(blockSize - 1); // blockSize must be a power of two.
+
+ static const size_t atomsPerBlock = blockSize / atomSize; // ~0.4% overhead
+ static const size_t bytesPerCard = 512; // 1.6% overhead
+ static const int log2CardSize = 9;
struct FreeCell {
FreeCell* next;
bool testAndSetMarked(const void*);
bool testAndClearMarked(const void*);
void setMarked(const void*);
+
+#if ENABLE(GGC)
+ void setDirtyObject(const void* atom)
+ {
+ m_cards.markCardForAtom(atom);
+ }
+
+ uint8_t* addressOfCardFor(const void* atom)
+ {
+ return &m_cards.cardForAtom(atom);
+ }
+
+ static inline size_t offsetOfCards()
+ {
+ return OBJECT_OFFSETOF(MarkedBlock, m_cards);
+ }
+#endif
template <typename Functor> void forEachCell(Functor&);
private:
- static const size_t blockMask = ~(blockSize - 1); // blockSize must be a power of two.
static const size_t atomMask = ~(atomSize - 1); // atomSize must be a power of two.
enum DestructorState { FreeCellsDontHaveObjects, SomeFreeCellsStillHaveObjects, AllFreeCellsHaveObjects };
{
return static_cast<DestructorState>(m_destructorState);
}
-
+
+#if ENABLE(GGC)
+ CardSet<bytesPerCard, blockSize> m_cards;
+#endif
+
size_t m_endAtom; // This is a fuzzy end. Always test for < m_endAtom.
size_t m_atomsPerCell;
WTF::Bitmap<atomsPerBlock> m_marks;
functor(reinterpret_cast<JSCell*>(&atoms()[i]));
}
}
-
+
} // namespace JSC
namespace WTF {
void testPrototype(JSValue, JumpList& failureCases);
- void emitWriteBarrier(RegisterID owner, RegisterID scratch, WriteBarrierUseKind);
+ enum WriteBarrierMode { UnconditionalWriteBarrier, ShouldFilterImmediates };
+ // value register in write barrier is used before any scratch registers
+ // so may safely be the same as either of the scratch registers.
+ void emitWriteBarrier(RegisterID owner, RegisterID valueTag, RegisterID scratch, RegisterID scratch2, WriteBarrierMode, WriteBarrierUseKind);
+ void emitWriteBarrier(JSCell* owner, RegisterID value, RegisterID scratch, WriteBarrierMode, WriteBarrierUseKind);
template<typename ClassType, typename StructureType> void emitAllocateBasicJSObject(StructureType, void* vtable, RegisterID result, RegisterID storagePtr);
template<typename T> void emitAllocateJSFinalObject(T structure, RegisterID result, RegisterID storagePtr);
Jump empty = branchTestPtr(Zero, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
Label storeResult(this);
- emitGetVirtualRegister(value, regT0);
- storePtr(regT0, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
+ emitGetVirtualRegister(value, regT3);
+ storePtr(regT3, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
Jump end = jump();
empty.link(this);
add32(TrustedImm32(1), Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
branch32(Below, regT1, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length))).linkTo(storeResult, this);
- move(regT1, regT0);
- add32(TrustedImm32(1), regT0);
- store32(regT0, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)));
+ add32(TrustedImm32(1), Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)));
jump().linkTo(storeResult, this);
end.link(this);
+
+ emitWriteBarrier(regT0, regT3, regT1, regT3, ShouldFilterImmediates, WriteBarrierForPropertyAccess);
}
void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
// Jump to a slow case if either the base object is an immediate, or if the Structure does not match.
emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
- emitWriteBarrier(regT0, regT2, WriteBarrierForPropertyAccess);
-
BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById);
Label hotPathBegin(this);
addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetPutByIdStructure);
- loadPtr(Address(regT0, JSObject::offsetOfPropertyStorage()), regT0);
- DataLabel32 displacementLabel = storePtrWithAddressOffsetPatch(regT1, Address(regT0, patchPutByIdDefaultOffset));
+ loadPtr(Address(regT0, JSObject::offsetOfPropertyStorage()), regT2);
+ DataLabel32 displacementLabel = storePtrWithAddressOffsetPatch(regT1, Address(regT2, patchPutByIdDefaultOffset));
END_UNINTERRUPTED_SEQUENCE(sequencePutById);
+ emitWriteBarrier(regT0, regT1, regT2, regT3, ShouldFilterImmediates, WriteBarrierForPropertyAccess);
+
ASSERT_JIT_OFFSET_UNUSED(displacementLabel, differenceBetween(hotPathBegin, displacementLabel), patchOffsetPutByIdPropertyMapOffset);
}
restoreReturnAddressBeforeReturn(regT3);
}
-
- emitWriteBarrier(regT0, regT2, WriteBarrierForPropertyAccess);
+
+ // Planting the new structure triggers the write barrier so we need
+ // an unconditional barrier here.
+ emitWriteBarrier(regT0, regT1, regT2, regT3, UnconditionalWriteBarrier, WriteBarrierForPropertyAccess);
storePtr(TrustedImmPtr(newStructure), Address(regT0, JSCell::structureOffset()));
compilePutDirectOffset(regT0, regT1, cachedOffset);
loadPtr(Address(regT1, OBJECT_OFFSETOF(ScopeChainNode, next)), regT1);
loadPtr(Address(regT1, OBJECT_OFFSETOF(ScopeChainNode, object)), regT1);
- emitWriteBarrier(regT1, regT2, WriteBarrierForVariableAccess);
+ emitWriteBarrier(regT1, regT0, regT2, regT3, ShouldFilterImmediates, WriteBarrierForVariableAccess);
loadPtr(Address(regT1, JSVariableObject::offsetOfRegisters()), regT1);
storePtr(regT0, Address(regT1, currentInstruction[1].u.operand * sizeof(Register)));
JSGlobalObject* globalObject = m_codeBlock->globalObject();
emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
- move(TrustedImmPtr(globalObject), regT1);
-
- emitWriteBarrier(regT1, regT2, WriteBarrierForVariableAccess);
+ move(TrustedImmPtr(globalObject), regT1);
loadPtr(Address(regT1, JSVariableObject::offsetOfRegisters()), regT1);
storePtr(regT0, Address(regT1, currentInstruction[1].u.operand * sizeof(Register)));
+ emitWriteBarrier(globalObject, regT0, regT2, ShouldFilterImmediates, WriteBarrierForVariableAccess);
}
-void JIT::emitWriteBarrier(RegisterID owner, RegisterID scratch, WriteBarrierUseKind useKind)
+#endif // USE(JSVALUE64)
+
+void JIT::emitWriteBarrier(RegisterID owner, RegisterID value, RegisterID scratch, RegisterID scratch2, WriteBarrierMode mode, WriteBarrierUseKind useKind)
{
UNUSED_PARAM(owner);
UNUSED_PARAM(scratch);
+ UNUSED_PARAM(scratch2);
UNUSED_PARAM(useKind);
+ UNUSED_PARAM(value);
+ UNUSED_PARAM(mode);
ASSERT(owner != scratch);
+ ASSERT(owner != scratch2);
#if ENABLE(WRITE_BARRIER_PROFILING)
emitCount(WriteBarrierCounters::jitCounterFor(useKind));
#endif
+
+#if ENABLE(GGC)
+ Jump filterCells;
+ if (mode == ShouldFilterImmediates)
+ filterCells = emitJumpIfNotJSCell(value);
+ move(owner, scratch);
+ andPtr(TrustedImm32(static_cast<int32_t>(MarkedBlock::blockMask)), scratch);
+ move(owner, scratch2);
+ andPtr(TrustedImm32(static_cast<int32_t>(~MarkedBlock::blockMask)), scratch2);
+ rshift32(TrustedImm32(MarkedBlock::log2CardSize), scratch2);
+ store8(TrustedImm32(1), BaseIndex(scratch, scratch2, TimesOne, MarkedBlock::offsetOfCards()));
+ if (mode == ShouldFilterImmediates)
+ filterCells.link(this);
+#endif
}
-#endif // USE(JSVALUE64)
+void JIT::emitWriteBarrier(JSCell* owner, RegisterID value, RegisterID scratch, WriteBarrierMode mode, WriteBarrierUseKind useKind)
+{
+ UNUSED_PARAM(owner);
+ UNUSED_PARAM(scratch);
+ UNUSED_PARAM(useKind);
+ UNUSED_PARAM(value);
+ UNUSED_PARAM(mode);
+
+#if ENABLE(WRITE_BARRIER_PROFILING)
+ emitCount(WriteBarrierCounters::jitCounterFor(useKind));
+#endif
+
+#if ENABLE(GGC)
+ Jump filterCells;
+ if (mode == ShouldFilterImmediates)
+ filterCells = emitJumpIfNotJSCell(value);
+ uint8_t* cardAddress = Heap::addressOfCardFor(owner);
+ move(TrustedImmPtr(cardAddress), scratch);
+ store8(TrustedImm32(1), Address(scratch));
+ if (mode == ShouldFilterImmediates)
+ filterCells.link(this);
+#endif
+}
void JIT::testPrototype(JSValue prototype, JumpList& failureCases)
{
emitLoad2(base, regT1, regT0, property, regT3, regT2);
addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
- emitJumpSlowCaseIfNotJSCell(base, regT1);
- emitWriteBarrier(regT0, regT1, WriteBarrierForPropertyAccess);
addSlowCase(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsArrayVPtr)));
addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, JSArray::vectorLengthOffset())));
+ emitJumpSlowCaseIfNotJSCell(base, regT1);
+ emitWriteBarrier(regT0, regT1, regT1, regT3, UnconditionalWriteBarrier, WriteBarrierForPropertyAccess);
+
loadPtr(Address(regT0, JSArray::storageOffset()), regT3);
Jump empty = branch32(Equal, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag));
emitJumpSlowCaseIfNotJSCell(base, regT1);
- emitWriteBarrier(regT0, regT1, WriteBarrierForPropertyAccess);
-
BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById);
Label hotPathBegin(this);
addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetPutByIdStructure);
- loadPtr(Address(regT0, JSObject::offsetOfPropertyStorage()), regT0);
- DataLabel32 displacementLabel1 = storePtrWithAddressOffsetPatch(regT2, Address(regT0, patchPutByIdDefaultOffset)); // payload
- DataLabel32 displacementLabel2 = storePtrWithAddressOffsetPatch(regT3, Address(regT0, patchPutByIdDefaultOffset)); // tag
+ loadPtr(Address(regT0, JSObject::offsetOfPropertyStorage()), regT1);
+ DataLabel32 displacementLabel1 = storePtrWithAddressOffsetPatch(regT2, Address(regT1, patchPutByIdDefaultOffset)); // payload
+ DataLabel32 displacementLabel2 = storePtrWithAddressOffsetPatch(regT3, Address(regT1, patchPutByIdDefaultOffset)); // tag
END_UNINTERRUPTED_SEQUENCE(sequencePutById);
+
+ emitWriteBarrier(regT0, regT2, regT1, regT2, ShouldFilterImmediates, WriteBarrierForPropertyAccess);
ASSERT_JIT_OFFSET_UNUSED(displacementLabel1, differenceBetween(hotPathBegin, displacementLabel1), patchOffsetPutByIdPropertyMapOffset1);
ASSERT_JIT_OFFSET_UNUSED(displacementLabel2, differenceBetween(hotPathBegin, displacementLabel2), patchOffsetPutByIdPropertyMapOffset2);
linkSlowCase(iter);
JITStubCall stubCall(this, direct ? cti_op_put_by_id_direct : cti_op_put_by_id);
- stubCall.addArgument(regT1, regT0);
+ stubCall.addArgument(base);
stubCall.addArgument(TrustedImmPtr(&(m_codeBlock->identifier(ident))));
stubCall.addArgument(regT3, regT2);
Call call = stubCall.call();
#endif
}
- emitWriteBarrier(regT0, regT1, WriteBarrierForPropertyAccess);
+ emitWriteBarrier(regT0, regT1, regT1, regT3, UnconditionalWriteBarrier, WriteBarrierForPropertyAccess);
storePtr(TrustedImmPtr(newStructure), Address(regT0, JSCell::structureOffset()));
#if CPU(MIPS) || CPU(SH4) || CPU(ARM)
loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2);
loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, object)), regT2);
- emitWriteBarrier(regT2, regT3, WriteBarrierForVariableAccess);
-
- loadPtr(Address(regT2, JSVariableObject::offsetOfRegisters()), regT2);
- emitStore(index, regT1, regT0, regT2);
- map(m_bytecodeOffset + OPCODE_LENGTH(op_put_scoped_var), value, regT1, regT0);
+ loadPtr(Address(regT2, JSVariableObject::offsetOfRegisters()), regT3);
+ emitStore(index, regT1, regT0, regT3);
+ emitWriteBarrier(regT2, regT1, regT0, regT1, ShouldFilterImmediates, WriteBarrierForVariableAccess);
}
void JIT::emit_op_get_global_var(Instruction* currentInstruction)
emitLoad(value, regT1, regT0);
move(TrustedImmPtr(globalObject), regT2);
- emitWriteBarrier(regT2, regT3, WriteBarrierForVariableAccess);
+ emitWriteBarrier(globalObject, regT1, regT3, ShouldFilterImmediates, WriteBarrierForVariableAccess);
loadPtr(Address(regT2, JSVariableObject::offsetOfRegisters()), regT2);
emitStore(index, regT1, regT0, regT2);
map(m_bytecodeOffset + OPCODE_LENGTH(op_put_global_var), value, regT1, regT0);
}
-void JIT::emitWriteBarrier(RegisterID owner, RegisterID scratch, WriteBarrierUseKind useKind)
-{
- UNUSED_PARAM(owner);
- UNUSED_PARAM(scratch);
- UNUSED_PARAM(useKind);
- ASSERT(owner != scratch);
-
-#if ENABLE(WRITE_BARRIER_PROFILING)
- emitCount(WriteBarrierCounters::jitCounterFor(useKind));
-#endif
-}
-
} // namespace JSC
#endif // USE(JSVALUE32_64)
+2011-09-23 Oliver Hunt <oliver@apple.com>
+
+ Make write barriers actually do something when enabled
+ https://bugs.webkit.org/show_bug.cgi?id=68717
+
+ Reviewed by Geoffrey Garen.
+
+ Add a forwarding header, and fix an evaluation ordering
+ issue that shows up if you try to use write barriers.
+
+ * ForwardingHeaders/heap/CardSet.h: Added.
+ * bindings/js/JSEventListener.h:
+ (WebCore::JSEventListener::jsFunction):
+
2011-09-23 James Robinson <jamesr@chromium.org>
Avoid updating compositing state during paint
--- /dev/null
+#ifndef WebCore_FWD_CardSet_h
+#define WebCore_FWD_CardSet_h
+#include <JavaScriptCore/CardSet.h>
+#endif
inline JSC::JSObject* JSEventListener::jsFunction(ScriptExecutionContext* scriptExecutionContext) const
{
- if (!m_jsFunction)
- m_jsFunction.setMayBeNull(*scriptExecutionContext->globalData(), m_wrapper.get(), initializeJSFunction(scriptExecutionContext));
+ if (!m_jsFunction) {
+ JSC::JSObject* function = initializeJSFunction(scriptExecutionContext);
+ m_jsFunction.setMayBeNull(*scriptExecutionContext->globalData(), m_wrapper.get(), function);
+ }
// Verify that we have a valid wrapper protecting our function from
// garbage collection.