+2011-09-15 Filip Pizlo <fpizlo@apple.com>
+
+ The DFG non-speculative JIT is no longer used and should be removed.
+ https://bugs.webkit.org/show_bug.cgi?id=68177
+
+ Reviewed by Geoffrey Garen.
+
+ This removes the non-speculative JIT and everything that relied on it,
+ including the ability to turn on DFG but not tiered compilation the,
+ ability to perform speculation failure into non-speculative JIT code,
+ and the ability to statically terminate speculation.
+
+ * GNUmakefile.list.am:
+ * JavaScriptCore.pro:
+ * JavaScriptCore.xcodeproj/project.pbxproj:
+ * bytecode/CodeBlock.h:
+ * bytecompiler/BytecodeGenerator.cpp:
+ (JSC::BytecodeGenerator::emitLoopHint):
+ * dfg/DFGByteCodeParser.cpp:
+ (JSC::DFG::ByteCodeParser::ByteCodeParser):
+ (JSC::DFG::ByteCodeParser::getStrongPrediction):
+ (JSC::DFG::ByteCodeParser::parseBlock):
+ * dfg/DFGDriver.cpp:
+ (JSC::DFG::compile):
+ * dfg/DFGGenerationInfo.h:
+ * dfg/DFGGraph.cpp:
+ (JSC::DFG::Graph::predictArgumentTypes):
+ * dfg/DFGJITCodeGenerator.cpp:
+ * dfg/DFGJITCompiler.cpp:
+ (JSC::DFG::JITCompiler::linkOSRExits):
+ (JSC::DFG::JITCompiler::compileBody):
+ * dfg/DFGJITCompiler.h:
+ * dfg/DFGNode.h:
+ * dfg/DFGNonSpeculativeJIT.cpp: Removed.
+ * dfg/DFGNonSpeculativeJIT.h: Removed.
+ * dfg/DFGOSREntry.cpp:
+ (JSC::DFG::prepareOSREntry):
+ * dfg/DFGPropagator.cpp:
+ * dfg/DFGPropagator.h:
+ * dfg/DFGSpeculativeJIT.cpp:
+ (JSC::DFG::SpeculativeJIT::compile):
+ * dfg/DFGSpeculativeJIT.h:
+ (JSC::DFG::SpeculativeJIT::osrExits):
+ (JSC::DFG::SpeculativeJIT::speculationRecovery):
+ (JSC::DFG::SpeculativeJIT::speculationCheck):
+ (JSC::DFG::SpeculativeJIT::terminateSpeculativeExecution):
+ * jit/JIT.cpp:
+ (JSC::JIT::privateCompileMainPass):
+ (JSC::JIT::privateCompile):
+ * jit/JIT.h:
+ * jit/JITCode.h:
+ (JSC::JITCode::bottomTierJIT):
+ * runtime/JSGlobalData.cpp:
+ (JSC::JSGlobalData::JSGlobalData):
+ (JSC::JSGlobalData::~JSGlobalData):
+ * runtime/JSGlobalData.h:
+ * wtf/Platform.h:
+
2011-09-15 Eric Seidel <eric@webkit.org>
Remove ENABLE(SVG_AS_IMAGE) since all major ports have it on by default
Source/JavaScriptCore/dfg/DFGJITCompiler.cpp \
Source/JavaScriptCore/dfg/DFGJITCompiler.h \
Source/JavaScriptCore/dfg/DFGNode.h \
- Source/JavaScriptCore/dfg/DFGNonSpeculativeJIT.cpp \
- Source/JavaScriptCore/dfg/DFGNonSpeculativeJIT.h \
Source/JavaScriptCore/dfg/DFGOperations.cpp \
Source/JavaScriptCore/dfg/DFGOperations.h \
Source/JavaScriptCore/dfg/DFGPropagator.cpp \
dfg/DFGGraph.cpp \
dfg/DFGJITCodeGenerator.cpp \
dfg/DFGJITCompiler.cpp \
- dfg/DFGNonSpeculativeJIT.cpp \
dfg/DFGOperations.cpp \
dfg/DFGSpeculativeJIT.cpp \
interpreter/CallFrame.cpp \
86EC9DCA1328DF82002B2AD7 /* DFGJITCodeGenerator.h in Headers */ = {isa = PBXBuildFile; fileRef = 86EC9DBA1328DF82002B2AD7 /* DFGJITCodeGenerator.h */; };
86EC9DCB1328DF82002B2AD7 /* DFGJITCompiler.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 86EC9DBB1328DF82002B2AD7 /* DFGJITCompiler.cpp */; };
86EC9DCC1328DF82002B2AD7 /* DFGJITCompiler.h in Headers */ = {isa = PBXBuildFile; fileRef = 86EC9DBC1328DF82002B2AD7 /* DFGJITCompiler.h */; };
- 86EC9DCD1328DF82002B2AD7 /* DFGNonSpeculativeJIT.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 86EC9DBD1328DF82002B2AD7 /* DFGNonSpeculativeJIT.cpp */; };
- 86EC9DCE1328DF82002B2AD7 /* DFGNonSpeculativeJIT.h in Headers */ = {isa = PBXBuildFile; fileRef = 86EC9DBE1328DF82002B2AD7 /* DFGNonSpeculativeJIT.h */; };
86EC9DCF1328DF82002B2AD7 /* DFGOperations.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 86EC9DBF1328DF82002B2AD7 /* DFGOperations.cpp */; };
86EC9DD01328DF82002B2AD7 /* DFGOperations.h in Headers */ = {isa = PBXBuildFile; fileRef = 86EC9DC01328DF82002B2AD7 /* DFGOperations.h */; };
86EC9DD11328DF82002B2AD7 /* DFGRegisterBank.h in Headers */ = {isa = PBXBuildFile; fileRef = 86EC9DC11328DF82002B2AD7 /* DFGRegisterBank.h */; };
86EC9DBA1328DF82002B2AD7 /* DFGJITCodeGenerator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGJITCodeGenerator.h; path = dfg/DFGJITCodeGenerator.h; sourceTree = "<group>"; };
86EC9DBB1328DF82002B2AD7 /* DFGJITCompiler.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DFGJITCompiler.cpp; path = dfg/DFGJITCompiler.cpp; sourceTree = "<group>"; };
86EC9DBC1328DF82002B2AD7 /* DFGJITCompiler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGJITCompiler.h; path = dfg/DFGJITCompiler.h; sourceTree = "<group>"; };
- 86EC9DBD1328DF82002B2AD7 /* DFGNonSpeculativeJIT.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DFGNonSpeculativeJIT.cpp; path = dfg/DFGNonSpeculativeJIT.cpp; sourceTree = "<group>"; };
- 86EC9DBE1328DF82002B2AD7 /* DFGNonSpeculativeJIT.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGNonSpeculativeJIT.h; path = dfg/DFGNonSpeculativeJIT.h; sourceTree = "<group>"; };
86EC9DBF1328DF82002B2AD7 /* DFGOperations.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DFGOperations.cpp; path = dfg/DFGOperations.cpp; sourceTree = "<group>"; };
86EC9DC01328DF82002B2AD7 /* DFGOperations.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGOperations.h; path = dfg/DFGOperations.h; sourceTree = "<group>"; };
86EC9DC11328DF82002B2AD7 /* DFGRegisterBank.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGRegisterBank.h; path = dfg/DFGRegisterBank.h; sourceTree = "<group>"; };
86EC9DBB1328DF82002B2AD7 /* DFGJITCompiler.cpp */,
86EC9DBC1328DF82002B2AD7 /* DFGJITCompiler.h */,
86ECA3E9132DEF1C002B2AD7 /* DFGNode.h */,
- 86EC9DBD1328DF82002B2AD7 /* DFGNonSpeculativeJIT.cpp */,
- 86EC9DBE1328DF82002B2AD7 /* DFGNonSpeculativeJIT.h */,
86EC9DBF1328DF82002B2AD7 /* DFGOperations.cpp */,
86EC9DC01328DF82002B2AD7 /* DFGOperations.h */,
86EC9DC11328DF82002B2AD7 /* DFGRegisterBank.h */,
86EC9DCA1328DF82002B2AD7 /* DFGJITCodeGenerator.h in Headers */,
86EC9DCC1328DF82002B2AD7 /* DFGJITCompiler.h in Headers */,
86ECA3EA132DEF1C002B2AD7 /* DFGNode.h in Headers */,
- 86EC9DCE1328DF82002B2AD7 /* DFGNonSpeculativeJIT.h in Headers */,
142E3136134FF0A600AFADB5 /* HandleHeap.h in Headers */,
142E3138134FF0A600AFADB5 /* HandleStack.h in Headers */,
86EC9DD01328DF82002B2AD7 /* DFGOperations.h in Headers */,
86EC9DC71328DF82002B2AD7 /* DFGGraph.cpp in Sources */,
86EC9DC91328DF82002B2AD7 /* DFGJITCodeGenerator.cpp in Sources */,
86EC9DCB1328DF82002B2AD7 /* DFGJITCompiler.cpp in Sources */,
- 86EC9DCD1328DF82002B2AD7 /* DFGNonSpeculativeJIT.cpp in Sources */,
86EC9DCF1328DF82002B2AD7 /* DFGOperations.cpp in Sources */,
86EC9DD21328DF82002B2AD7 /* DFGSpeculativeJIT.cpp in Sources */,
14469DD7107EC79E00650446 /* dtoa.cpp in Sources */,
void unlinkIncomingCalls();
#endif
-#if ENABLE(TIERED_COMPILATION)
+#if ENABLE(DFG_JIT)
void setJITCodeMap(PassOwnPtr<CompactJITCodeMap> jitCodeMap)
{
m_jitCodeMap = jitCodeMap;
MacroAssemblerCodePtr m_jitCodeWithArityCheck;
SentinelLinkedList<CallLinkInfo, BasicRawSentinelNode<CallLinkInfo> > m_incomingCalls;
#endif
-#if ENABLE(TIERED_COMPILATION)
+#if ENABLE(DFG_JIT)
OwnPtr<CompactJITCodeMap> m_jitCodeMap;
#endif
#if ENABLE(VALUE_PROFILER)
void BytecodeGenerator::emitLoopHint()
{
-#if ENABLE(TIERED_COMPILATION)
+#if ENABLE(DFG_JIT)
emitOpcode(op_loop_hint);
#endif
}
namespace JSC { namespace DFG {
-#if ENABLE(DFG_JIT_RESTRICTIONS)
-// FIXME: Temporarily disable arithmetic, until we fix associated performance regressions.
-// FIXME: temporarily disable property accesses until we fix regressions.
-#define ARITHMETIC_OP() m_parseFailed = true
-#define PROPERTY_ACCESS_OP() m_parseFailed = true
-#else
-#define ARITHMETIC_OP() ((void)0)
-#define PROPERTY_ACCESS_OP() ((void)0)
-#endif
-
// === ByteCodeParser ===
//
// This class is used to compile the dataflow graph from a CodeBlock.
, m_parameterSlots(0)
, m_numPassedVarArgs(0)
{
-#if ENABLE(DYNAMIC_OPTIMIZATION)
ASSERT(m_profiledBlock);
-#endif
}
// Parse a full CodeBlock of bytecode.
UNUSED_PARAM(nodeIndex);
UNUSED_PARAM(bytecodeIndex);
-#if ENABLE(DYNAMIC_OPTIMIZATION)
ValueProfile* profile = m_profiledBlock->valueProfileForBytecodeOffset(bytecodeIndex);
ASSERT(profile);
PredictedType prediction = profile->computeUpdatedPrediction();
printf("Dynamic [%u, %u] prediction: %s\n", nodeIndex, bytecodeIndex, predictionToString(prediction));
#endif
return prediction;
-#else
- return PredictNone;
-#endif
}
void stronglyPredict(NodeIndex nodeIndex, unsigned bytecodeIndex)
// === Arithmetic operations ===
case op_add: {
- ARITHMETIC_OP();
NodeIndex op1 = get(currentInstruction[2].u.operand);
NodeIndex op2 = get(currentInstruction[3].u.operand);
// If both operands can statically be determined to the numbers, then this is an arithmetic add.
}
case op_sub: {
- ARITHMETIC_OP();
NodeIndex op1 = getToNumber(currentInstruction[2].u.operand);
NodeIndex op2 = getToNumber(currentInstruction[3].u.operand);
if (isSmallInt32Constant(op1) || isSmallInt32Constant(op2)) {
}
case op_mul: {
- ARITHMETIC_OP();
NodeIndex op1 = getToNumber(currentInstruction[2].u.operand);
NodeIndex op2 = getToNumber(currentInstruction[3].u.operand);
set(currentInstruction[1].u.operand, addToGraph(ArithMul, op1, op2));
}
case op_mod: {
- ARITHMETIC_OP();
NodeIndex op1 = getToNumber(currentInstruction[2].u.operand);
NodeIndex op2 = getToNumber(currentInstruction[3].u.operand);
set(currentInstruction[1].u.operand, addToGraph(ArithMod, op1, op2));
}
case op_div: {
- ARITHMETIC_OP();
NodeIndex op1 = getToNumber(currentInstruction[2].u.operand);
NodeIndex op2 = getToNumber(currentInstruction[3].u.operand);
set(currentInstruction[1].u.operand, addToGraph(ArithDiv, op1, op2));
}
case op_not: {
- ARITHMETIC_OP();
NodeIndex value = get(currentInstruction[2].u.operand);
set(currentInstruction[1].u.operand, addToGraph(LogicalNot, value));
NEXT_OPCODE(op_not);
}
case op_less: {
- ARITHMETIC_OP();
NodeIndex op1 = get(currentInstruction[2].u.operand);
NodeIndex op2 = get(currentInstruction[3].u.operand);
set(currentInstruction[1].u.operand, addToGraph(CompareLess, op1, op2));
}
case op_lesseq: {
- ARITHMETIC_OP();
NodeIndex op1 = get(currentInstruction[2].u.operand);
NodeIndex op2 = get(currentInstruction[3].u.operand);
set(currentInstruction[1].u.operand, addToGraph(CompareLessEq, op1, op2));
}
case op_greater: {
- ARITHMETIC_OP();
NodeIndex op1 = get(currentInstruction[2].u.operand);
NodeIndex op2 = get(currentInstruction[3].u.operand);
set(currentInstruction[1].u.operand, addToGraph(CompareGreater, op1, op2));
}
case op_greatereq: {
- ARITHMETIC_OP();
NodeIndex op1 = get(currentInstruction[2].u.operand);
NodeIndex op2 = get(currentInstruction[3].u.operand);
set(currentInstruction[1].u.operand, addToGraph(CompareGreaterEq, op1, op2));
}
case op_eq: {
- ARITHMETIC_OP();
NodeIndex op1 = get(currentInstruction[2].u.operand);
NodeIndex op2 = get(currentInstruction[3].u.operand);
set(currentInstruction[1].u.operand, addToGraph(CompareEq, op1, op2));
}
case op_eq_null: {
- ARITHMETIC_OP();
NodeIndex value = get(currentInstruction[2].u.operand);
set(currentInstruction[1].u.operand, addToGraph(CompareEq, value, constantNull()));
NEXT_OPCODE(op_eq_null);
}
case op_stricteq: {
- ARITHMETIC_OP();
NodeIndex op1 = get(currentInstruction[2].u.operand);
NodeIndex op2 = get(currentInstruction[3].u.operand);
set(currentInstruction[1].u.operand, addToGraph(CompareStrictEq, op1, op2));
}
case op_neq: {
- ARITHMETIC_OP();
NodeIndex op1 = get(currentInstruction[2].u.operand);
NodeIndex op2 = get(currentInstruction[3].u.operand);
set(currentInstruction[1].u.operand, addToGraph(LogicalNot, addToGraph(CompareEq, op1, op2)));
}
case op_neq_null: {
- ARITHMETIC_OP();
NodeIndex value = get(currentInstruction[2].u.operand);
set(currentInstruction[1].u.operand, addToGraph(LogicalNot, addToGraph(CompareEq, value, constantNull())));
NEXT_OPCODE(op_neq_null);
}
case op_nstricteq: {
- ARITHMETIC_OP();
NodeIndex op1 = get(currentInstruction[2].u.operand);
NodeIndex op2 = get(currentInstruction[3].u.operand);
set(currentInstruction[1].u.operand, addToGraph(LogicalNot, addToGraph(CompareStrictEq, op1, op2)));
}
case op_get_by_id: {
- PROPERTY_ACCESS_OP();
NodeIndex base = get(currentInstruction[2].u.operand);
unsigned identifier = currentInstruction[3].u.operand;
}
case op_put_by_id: {
- PROPERTY_ACCESS_OP();
NodeIndex value = get(currentInstruction[3].u.operand);
NodeIndex base = get(currentInstruction[1].u.operand);
unsigned identifier = currentInstruction[2].u.operand;
NEXT_OPCODE(op_call_put_result);
case op_resolve: {
- PROPERTY_ACCESS_OP();
unsigned identifier = currentInstruction[2].u.operand;
NodeIndex resolve = addToGraph(Resolve, OpInfo(identifier));
}
case op_resolve_base: {
- PROPERTY_ACCESS_OP();
unsigned identifier = currentInstruction[2].u.operand;
NodeIndex resolve = addToGraph(currentInstruction[3].u.operand ? ResolveBaseStrictPut : ResolveBase, OpInfo(identifier));
propagate(dfg, globalData, codeBlock);
-#if ENABLE(DYNAMIC_OPTIMIZATION)
// Save the predictions we've made, so that OSR entry can verify them. Predictions
// are saved in the CodeBlock from which we will be doing OSR entry, since the
// CodeBlock to which we are OSRing may be replaced at any time.
ASSERT(codeBlock->alternative());
ASSERT(codeBlock->alternative()->getJITType() == JITCode::BaselineJIT);
codeBlock->alternative()->setPredictions(predictions.release());
-#endif
JITCompiler dataFlowJIT(globalData, dfg, codeBlock);
if (compileMode == CompileFunction) {
private:
// The index of the node whose result is stored in this virtual register.
- // FIXME: Can we remove this? - this is currently only used when collecting
- // snapshots of the RegisterBank for SpeculationCheck/EntryLocation. Could
- // investigate storing NodeIndex as the name in RegsiterBank, instead of
- // VirtualRegister.
NodeIndex m_nodeIndex;
uint32_t m_useCount;
DataFormat m_registerFormat;
}
}
-#if ENABLE(DYNAMIC_OPTIMIZATION)
ASSERT(codeBlock);
ASSERT(codeBlock->alternative());
printf("Argument [%lu] prediction: %s\n", arg, predictionToString(m_predictions.getArgumentPrediction(arg)));
#endif
}
-#else
- UNUSED_PARAM(codeBlock);
-#endif
}
} } // namespace JSC::DFG
#if ENABLE(DFG_JIT)
-#include "DFGNonSpeculativeJIT.h"
#include "DFGSpeculativeJIT.h"
#include "LinkBuffer.h"
#include "CodeBlock.h"
#include "DFGJITCodeGenerator.h"
-#include "DFGNonSpeculativeJIT.h"
#include "DFGOperations.h"
#include "DFGRegisterBank.h"
#include "DFGSpeculativeJIT.h"
loadPtr(addressFor(node.virtualRegister()), gpr);
}
-#if ENABLE(DFG_OSR_EXIT)
void JITCompiler::exitSpeculativeWithOSR(const OSRExit& exit, SpeculationRecovery* recovery, Vector<BytecodeAndMachineOffset>& decodedCodeMap)
{
// 1) Pro-forma stuff.
++exitsIter;
}
}
-#else // ENABLE(DFG_OSR_EXIT)
-class GeneralizedRegister {
-public:
- GeneralizedRegister() { }
-
- static GeneralizedRegister createGPR(GPRReg gpr)
- {
- GeneralizedRegister result;
- result.m_isFPR = false;
- result.m_register.gpr = gpr;
- return result;
- }
-
- static GeneralizedRegister createFPR(FPRReg fpr)
- {
- GeneralizedRegister result;
- result.m_isFPR = true;
- result.m_register.fpr = fpr;
- return result;
- }
-
- bool isFPR() const
- {
- return m_isFPR;
- }
-
- GPRReg gpr() const
- {
- ASSERT(!m_isFPR);
- return m_register.gpr;
- }
-
- FPRReg fpr() const
- {
- ASSERT(m_isFPR);
- return m_register.fpr;
- }
-
- const SpeculationCheck::RegisterInfo& findInSpeculationCheck(const SpeculationCheck& check)
- {
- if (isFPR())
- return check.m_fprInfo[FPRInfo::toIndex(fpr())];
- return check.m_gprInfo[GPRInfo::toIndex(gpr())];
- }
-
- const EntryLocation::RegisterInfo& findInEntryLocation(const EntryLocation& entry)
- {
- if (isFPR())
- return entry.m_fprInfo[FPRInfo::toIndex(fpr())];
- return entry.m_gprInfo[GPRInfo::toIndex(gpr())];
- }
-
- DataFormat previousDataFormat(const SpeculationCheck& check)
- {
- return findInSpeculationCheck(check).format;
- }
-
- DataFormat nextDataFormat(const EntryLocation& entry)
- {
- return findInEntryLocation(entry).format;
- }
-
- void convert(DataFormat oldDataFormat, DataFormat newDataFormat, JITCompiler& jit)
- {
- if (LIKELY(!needDataFormatConversion(oldDataFormat, newDataFormat)))
- return;
-
- if (oldDataFormat == DataFormatInteger) {
- jit.orPtr(GPRInfo::tagTypeNumberRegister, gpr());
- return;
- }
-
- ASSERT(newDataFormat == DataFormatInteger);
- jit.zeroExtend32ToPtr(gpr(), gpr());
- return;
- }
-
- void moveTo(GeneralizedRegister& other, DataFormat myDataFormat, DataFormat otherDataFormat, JITCompiler& jit, FPRReg scratchFPR)
- {
- if (UNLIKELY(isFPR())) {
- if (UNLIKELY(other.isFPR())) {
- jit.moveDouble(fpr(), other.fpr());
- return;
- }
-
- JITCompiler::Jump done;
-
- if (scratchFPR != InvalidFPRReg) {
- // we have a scratch FPR, so attempt a conversion to int
- JITCompiler::JumpList notInt;
- jit.branchConvertDoubleToInt32(fpr(), other.gpr(), notInt, scratchFPR);
- jit.orPtr(GPRInfo::tagTypeNumberRegister, other.gpr());
- done = jit.jump();
- notInt.link(&jit);
- }
-
- jit.boxDouble(fpr(), other.gpr());
-
- if (done.isSet())
- done.link(&jit);
- return;
- }
-
- if (UNLIKELY(other.isFPR())) {
- jit.unboxDouble(gpr(), other.fpr());
- return;
- }
-
- if (LIKELY(!needDataFormatConversion(myDataFormat, otherDataFormat))) {
- jit.move(gpr(), other.gpr());
- return;
- }
-
- if (myDataFormat == DataFormatInteger) {
- jit.orPtr(gpr(), GPRInfo::tagTypeNumberRegister, other.gpr());
- return;
- }
-
- ASSERT(otherDataFormat == DataFormatInteger);
- jit.zeroExtend32ToPtr(gpr(), other.gpr());
- }
-
- void swapWith(GeneralizedRegister& other, DataFormat myDataFormat, DataFormat myNewDataFormat, DataFormat otherDataFormat, DataFormat otherNewDataFormat, JITCompiler& jit, GPRReg scratchGPR, FPRReg scratchFPR)
- {
- if (UNLIKELY(isFPR())) {
- if (UNLIKELY(other.isFPR())) {
- if (scratchFPR == InvalidFPRReg)
- jit.moveDoubleToPtr(fpr(), scratchGPR);
- else
- jit.moveDouble(fpr(), scratchFPR);
- jit.moveDouble(other.fpr(), fpr());
- if (scratchFPR == InvalidFPRReg)
- jit.movePtrToDouble(scratchGPR, other.fpr());
- else
- jit.moveDouble(scratchFPR, other.fpr());
- return;
- }
-
- jit.move(other.gpr(), scratchGPR);
-
- JITCompiler::Jump done;
-
- if (scratchFPR != InvalidFPRReg) {
- JITCompiler::JumpList notInt;
- jit.branchConvertDoubleToInt32(fpr(), other.gpr(), notInt, scratchFPR);
- jit.orPtr(GPRInfo::tagTypeNumberRegister, other.gpr());
- done = jit.jump();
- notInt.link(&jit);
- }
-
- jit.boxDouble(fpr(), other.gpr());
-
- if (done.isSet())
- done.link(&jit);
-
- jit.unboxDouble(scratchGPR, fpr());
- return;
- }
-
- if (UNLIKELY(other.isFPR())) {
- other.swapWith(*this, otherDataFormat, otherNewDataFormat, myDataFormat, myNewDataFormat, jit, scratchGPR, scratchFPR);
- return;
- }
-
- jit.swap(gpr(), other.gpr());
-
- if (UNLIKELY(needDataFormatConversion(otherDataFormat, myNewDataFormat))) {
- if (otherDataFormat == DataFormatInteger)
- jit.orPtr(GPRInfo::tagTypeNumberRegister, gpr());
- else if (myNewDataFormat == DataFormatInteger)
- jit.zeroExtend32ToPtr(gpr(), gpr());
- }
-
- if (UNLIKELY(needDataFormatConversion(myDataFormat, otherNewDataFormat))) {
- if (myDataFormat == DataFormatInteger)
- jit.orPtr(GPRInfo::tagTypeNumberRegister, other.gpr());
- else if (otherNewDataFormat == DataFormatInteger)
- jit.zeroExtend32ToPtr(other.gpr(), other.gpr());
- }
- }
-
-private:
- bool m_isFPR;
- union {
- GPRReg gpr;
- FPRReg fpr;
- } m_register;
-};
-
-struct ShuffledRegister {
- GeneralizedRegister reg;
- ShuffledRegister* previous;
- bool hasFrom;
- bool hasTo;
- bool handled;
-
- ShuffledRegister() { }
-
- ShuffledRegister(GeneralizedRegister reg)
- : reg(reg)
- , previous(0)
- , hasFrom(false)
- , hasTo(false)
- , handled(false)
- {
- }
-
- bool isEndOfNonCyclingPermutation()
- {
- return hasTo && !hasFrom;
- }
-
- void handleNonCyclingPermutation(const SpeculationCheck& check, const EntryLocation& entry, JITCompiler& jit, FPRReg& scratchFPR1, FPRReg& scratchFPR2)
- {
- ShuffledRegister* cur = this;
- while (cur->previous) {
- cur->previous->reg.moveTo(cur->reg, cur->previous->reg.previousDataFormat(check), cur->reg.nextDataFormat(entry), jit, scratchFPR1);
- cur->handled = true;
- if (cur->reg.isFPR()) {
- if (scratchFPR1 == InvalidFPRReg)
- scratchFPR1 = cur->reg.fpr();
- else {
- ASSERT(scratchFPR1 != cur->reg.fpr());
- scratchFPR2 = cur->reg.fpr();
- }
- }
- cur = cur->previous;
- }
- cur->handled = true;
- if (cur->reg.isFPR()) {
- if (scratchFPR1 == InvalidFPRReg)
- scratchFPR1 = cur->reg.fpr();
- else {
- ASSERT(scratchFPR1 != cur->reg.fpr());
- scratchFPR2 = cur->reg.fpr();
- }
- }
- }
-
- void handleCyclingPermutation(const SpeculationCheck& check, const EntryLocation& entry, JITCompiler& jit, GPRReg scratchGPR, FPRReg scratchFPR1, FPRReg scratchFPR2)
- {
- // first determine the cycle length
-
- unsigned cycleLength = 0;
-
- ShuffledRegister* cur = this;
- ShuffledRegister* next = 0;
- do {
- ASSERT(cur);
- cycleLength++;
- cur->handled = true;
- next = cur;
- cur = cur->previous;
- } while (cur != this);
-
- ASSERT(cycleLength);
- ASSERT(next->previous == cur);
-
- // now determine the best way to handle the permutation, depending on the
- // length.
-
- switch (cycleLength) {
- case 1:
- reg.convert(reg.previousDataFormat(check), reg.nextDataFormat(entry), jit);
- break;
-
- case 2:
- reg.swapWith(previous->reg, reg.previousDataFormat(check), reg.nextDataFormat(entry), previous->reg.previousDataFormat(check), previous->reg.nextDataFormat(entry), jit, scratchGPR, scratchFPR1);
- break;
-
- default:
- GeneralizedRegister scratch;
- if (UNLIKELY(reg.isFPR() && next->reg.isFPR())) {
- if (scratchFPR2 == InvalidFPRReg) {
- scratch = GeneralizedRegister::createGPR(scratchGPR);
- reg.moveTo(scratch, DataFormatDouble, DataFormatJSDouble, jit, scratchFPR1);
- } else {
- scratch = GeneralizedRegister::createFPR(scratchFPR2);
- reg.moveTo(scratch, DataFormatDouble, DataFormatDouble, jit, scratchFPR1);
- }
- } else {
- scratch = GeneralizedRegister::createGPR(scratchGPR);
- reg.moveTo(scratch, reg.previousDataFormat(check), next->reg.nextDataFormat(entry), jit, scratchFPR1);
- }
-
- cur = this;
- while (cur->previous != this) {
- ASSERT(cur);
- cur->previous->reg.moveTo(cur->reg, cur->previous->reg.previousDataFormat(check), cur->reg.nextDataFormat(entry), jit, scratchFPR1);
- cur = cur->previous;
- }
-
- if (UNLIKELY(reg.isFPR() && next->reg.isFPR())) {
- if (scratchFPR2 == InvalidFPRReg)
- scratch.moveTo(next->reg, DataFormatJSDouble, DataFormatDouble, jit, scratchFPR1);
- else
- scratch.moveTo(next->reg, DataFormatDouble, DataFormatDouble, jit, scratchFPR1);
- } else
- scratch.moveTo(next->reg, next->reg.nextDataFormat(entry), next->reg.nextDataFormat(entry), jit, scratchFPR1);
- break;
- }
- }
-
- static ShuffledRegister* lookup(ShuffledRegister* gprs, ShuffledRegister* fprs, GeneralizedRegister& reg)
- {
- if (reg.isFPR())
- return fprs + FPRInfo::toIndex(reg.fpr());
- return gprs + GPRInfo::toIndex(reg.gpr());
- }
-};
-
-template<typename T>
-T& lookupForRegister(T* gprs, T* fprs, unsigned index)
-{
- ASSERT(index < GPRInfo::numberOfRegisters + FPRInfo::numberOfRegisters);
- if (index < GPRInfo::numberOfRegisters)
- return gprs[index];
- return fprs[index - GPRInfo::numberOfRegisters];
-}
-
-// This is written in a way that allows for a HashMap<NodeIndex, GeneralizedRegister> to be
-// easily substituted, if it is found to be wise to do so. So far performance measurements
-// indicate that this is faster, likely because the HashMap would have never grown very big
-// and we would thus be wasting time performing complex hashing logic that, though O(1) on
-// average, would be less than the ~7 loop iterations that the find() method below would do
-// (since it's uncommon that we'd have register allocated more than 7 registers, in the
-// current scheme).
-class NodeToRegisterMap {
-public:
- struct Tuple {
- NodeIndex first;
- GeneralizedRegister second;
-
- Tuple()
- {
- }
- };
-
- typedef Tuple* iterator;
-
- NodeToRegisterMap()
- : m_occupancy(0)
- {
- }
-
- void set(NodeIndex first, GeneralizedRegister second)
- {
- m_payload[m_occupancy].first = first;
- m_payload[m_occupancy].second = second;
- m_occupancy++;
- }
-
- Tuple* end()
- {
- return 0;
- }
-
- Tuple* find(NodeIndex first)
- {
- for (unsigned i = m_occupancy; i-- > 0;) {
- if (m_payload[i].first == first)
- return m_payload + i;
- }
- return 0;
- }
-
- void clear()
- {
- m_occupancy = 0;
- }
-
-private:
- Tuple m_payload[GPRInfo::numberOfRegisters + FPRInfo::numberOfRegisters];
- unsigned m_occupancy;
-};
-
-void JITCompiler::jumpFromSpeculativeToNonSpeculative(const SpeculationCheck& check, const EntryLocation& entry, SpeculationRecovery* recovery, NodeToRegisterMap& checkNodeToRegisterMap, NodeToRegisterMap& entryNodeToRegisterMap)
-{
- ASSERT(check.m_nodeIndex == entry.m_nodeIndex);
-
- // Link the jump from the Speculative path to here.
- check.m_check.link(this);
-
-#if ENABLE(DFG_DEBUG_VERBOSE)
- fprintf(stderr, "Speculation failure for Node @%d at JIT offset 0x%x\n", (int)check.m_nodeIndex, debugOffset());
-#endif
-#if ENABLE(DFG_JIT_BREAK_ON_SPECULATION_FAILURE)
- breakpoint();
-#endif
-
-#if ENABLE(DFG_VERBOSE_SPECULATION_FAILURE)
- SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo;
- debugInfo->codeBlock = m_codeBlock;
- debugInfo->debugOffset = debugOffset();
-
- debugCall(debugOperationPrintSpeculationFailure, debugInfo);
-#endif
-
-#if ENABLE(DFG_SUCCESS_STATS)
- static SamplingCounter counter("SpeculationFailure");
- emitCount(counter);
-#endif
-
- // Does this speculation check require any additional recovery to be performed,
- // to restore any state that has been overwritten before we enter back in to the
- // non-speculative path.
- if (recovery) {
- switch (recovery->type()) {
- case SpeculativeAdd: {
- ASSERT(check.m_gprInfo[GPRInfo::toIndex(recovery->dest())].nodeIndex != NoNode);
- // Revert the add.
- sub32(recovery->src(), recovery->dest());
-
- // If recovery->dest() should have been boxed prior to the addition, then rebox
- // it.
- DataFormat format = check.m_gprInfo[GPRInfo::toIndex(recovery->dest())].format;
- ASSERT(format == DataFormatInteger || format == DataFormatJSInteger || format == DataFormatJS);
- if (format != DataFormatInteger)
- orPtr(GPRInfo::tagTypeNumberRegister, recovery->dest());
- break;
- }
-
- case BooleanSpeculationCheck: {
- ASSERT(check.m_gprInfo[GPRInfo::toIndex(recovery->dest())].nodeIndex != NoNode);
- // Rebox the (non-)boolean
- xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), recovery->dest());
- break;
- }
-
- default:
- ASSERT_NOT_REACHED();
- break;
- }
- }
-
- // First, we need a reverse mapping that tells us, for a NodeIndex, which register
- // that node is in.
-
- checkNodeToRegisterMap.clear();
- entryNodeToRegisterMap.clear();
-
- GPRReg scratchGPR = InvalidGPRReg;
- FPRReg scratchFPR1 = InvalidFPRReg;
- FPRReg scratchFPR2 = InvalidFPRReg;
- bool needToRestoreTagMaskRegister = false;
-
- for (unsigned index = 0; index < GPRInfo::numberOfRegisters; ++index) {
- NodeIndex nodeIndexInCheck = check.m_gprInfo[index].nodeIndex;
- if (nodeIndexInCheck != NoNode)
- checkNodeToRegisterMap.set(nodeIndexInCheck, GeneralizedRegister::createGPR(GPRInfo::toRegister(index)));
- NodeIndex nodeIndexInEntry = entry.m_gprInfo[index].nodeIndex;
- if (nodeIndexInEntry != NoNode)
- entryNodeToRegisterMap.set(nodeIndexInEntry, GeneralizedRegister::createGPR(GPRInfo::toRegister(index)));
- else if (nodeIndexInCheck == NoNode)
- scratchGPR = GPRInfo::toRegister(index);
- }
-
- for (unsigned index = 0; index < FPRInfo::numberOfRegisters; ++index) {
- NodeIndex nodeIndexInCheck = check.m_fprInfo[index].nodeIndex;
- if (nodeIndexInCheck != NoNode)
- checkNodeToRegisterMap.set(nodeIndexInCheck, GeneralizedRegister::createFPR(FPRInfo::toRegister(index)));
- NodeIndex nodeIndexInEntry = entry.m_fprInfo[index].nodeIndex;
- if (nodeIndexInEntry != NoNode)
- entryNodeToRegisterMap.set(nodeIndexInEntry, GeneralizedRegister::createFPR(FPRInfo::toRegister(index)));
- else if (nodeIndexInCheck == NoNode) {
- if (scratchFPR1 == InvalidFPRReg)
- scratchFPR1 = FPRInfo::toRegister(index);
- else
- scratchFPR2 = FPRInfo::toRegister(index);
- }
- }
-
- ASSERT((scratchFPR1 == InvalidFPRReg && scratchFPR2 == InvalidFPRReg) || (scratchFPR1 != scratchFPR2));
-
- // How this works:
- // 1) Spill any values that are not spilled on speculative, but are spilled
- // on non-speculative.
- // 2) For the set of nodes that are in registers on both paths, perform a
- // shuffling.
- // 3) Fill any values that were spilled on speculative, but are not spilled
- // on non-speculative.
-
- // If we find registers that can be used as scratch registers along the way,
- // save them.
-
- // Part 1: spill any values that are not spilled on speculative, but are
- // spilled on non-speculative.
-
- // This also sets up some data structures that Part 2 will need.
-
- ShuffledRegister gprs[GPRInfo::numberOfRegisters];
- ShuffledRegister fprs[FPRInfo::numberOfRegisters];
-
- for (unsigned index = 0; index < GPRInfo::numberOfRegisters; ++index)
- gprs[index] = ShuffledRegister(GeneralizedRegister::createGPR(GPRInfo::toRegister(index)));
- for (unsigned index = 0; index < FPRInfo::numberOfRegisters; ++index)
- fprs[index] = ShuffledRegister(GeneralizedRegister::createFPR(FPRInfo::toRegister(index)));
-
- for (unsigned index = 0; index < GPRInfo::numberOfRegisters; ++index) {
- NodeIndex nodeIndex = check.m_gprInfo[index].nodeIndex;
-
- // Bail out if this register isn't assigned to anything.
- if (nodeIndex == NoNode)
- continue;
-
- // If the non-speculative path also has a register for the nodeIndex that this
- // register stores, link them together.
- NodeToRegisterMap::iterator mapIterator = entryNodeToRegisterMap.find(nodeIndex);
- if (mapIterator != entryNodeToRegisterMap.end()) {
- gprs[index].hasFrom = true;
-
- ShuffledRegister* next = ShuffledRegister::lookup(gprs, fprs, mapIterator->second);
- next->previous = gprs + index;
- next->hasTo = true;
-
- // If the non-speculative path has not spilled this register, then skip the spillin
- // part below regardless of whether or not the speculative path has spilled it.
- if (!mapIterator->second.findInEntryLocation(entry).isSpilled)
- continue;
- } else {
- // If the non-speculative entry isn't using this register and it does not need
- // the value in this register to be placed into any other register, then this
- // register can be used for scratch.
- if (entry.m_gprInfo[index].nodeIndex == NoNode)
- scratchGPR = GPRInfo::toRegister(index);
- }
-
- // If the speculative path has already spilled the register then there is no need to
- // spill it.
- if (check.m_gprInfo[index].isSpilled)
- continue;
-
- DataFormat dataFormat = check.m_gprInfo[index].format;
- VirtualRegister virtualRegister = graph()[nodeIndex].virtualRegister();
-
- ASSERT(dataFormat == DataFormatInteger || DataFormatCell || dataFormat & DataFormatJS);
- if (dataFormat == DataFormatInteger)
- orPtr(GPRInfo::tagTypeNumberRegister, GPRInfo::toRegister(index));
- storePtr(GPRInfo::toRegister(index), addressFor(virtualRegister));
- }
-
- if (scratchGPR == InvalidGPRReg) {
- scratchGPR = GPRInfo::tagMaskRegister;
- needToRestoreTagMaskRegister = true;
- }
-
- for (unsigned index = 0; index < FPRInfo::numberOfRegisters; ++index) {
- NodeIndex nodeIndex = check.m_fprInfo[index].nodeIndex;
- if (nodeIndex == NoNode)
- continue;
-
- NodeToRegisterMap::iterator mapIterator = entryNodeToRegisterMap.find(nodeIndex);
- if (mapIterator != entryNodeToRegisterMap.end()) {
- fprs[index].hasFrom = true;
-
- ShuffledRegister* next = ShuffledRegister::lookup(gprs, fprs, mapIterator->second);
- next->previous = fprs + index;
- next->hasTo = true;
-
- if (!mapIterator->second.findInEntryLocation(entry).isSpilled)
- continue;
- } else {
- // If the non-speculative entry isn't using this register and it does not need
- // the value in this register to be placed into any other register, then this
- // register can be used for scratch.
- if (entry.m_fprInfo[index].nodeIndex == NoNode) {
- if (scratchFPR1 == InvalidFPRReg)
- scratchFPR1 = FPRInfo::toRegister(index);
- else if (scratchFPR2)
- scratchFPR2 = FPRInfo::toRegister(index);
- ASSERT((scratchFPR1 == InvalidFPRReg && scratchFPR2 == InvalidFPRReg) || (scratchFPR1 != scratchFPR2));
- }
- }
-
- if (check.m_fprInfo[index].isSpilled)
- continue;
-
- VirtualRegister virtualRegister = graph()[nodeIndex].virtualRegister();
-
- moveDoubleToPtr(FPRInfo::toRegister(index), scratchGPR);
- subPtr(GPRInfo::tagTypeNumberRegister, scratchGPR);
- storePtr(scratchGPR, addressFor(virtualRegister));
- }
-
-#if !ASSERT_DISABLED
- // Assert that we've not assigned a scratch register to something that we're going to shuffle.
- ASSERT(scratchGPR != InvalidGPRReg);
- if (scratchGPR != GPRInfo::tagMaskRegister) {
- ASSERT(!gprs[GPRInfo::toIndex(scratchGPR)].hasTo);
- ASSERT(!gprs[GPRInfo::toIndex(scratchGPR)].hasFrom);
- }
- if (scratchFPR1 != InvalidFPRReg) {
- ASSERT(scratchFPR1 != scratchFPR2);
- ASSERT(!fprs[FPRInfo::toIndex(scratchFPR1)].hasTo);
- ASSERT(!fprs[FPRInfo::toIndex(scratchFPR1)].hasFrom);
- if (scratchFPR2 != InvalidFPRReg) {
- ASSERT(!fprs[FPRInfo::toIndex(scratchFPR2)].hasTo);
- ASSERT(!fprs[FPRInfo::toIndex(scratchFPR2)].hasFrom);
- }
- } else
- ASSERT(scratchFPR2 == InvalidFPRReg);
-#endif
-
- // Part 2: For the set of nodes that are in registers on both paths,
- // perform a shuffling.
-
- for (unsigned index = 0; index < GPRInfo::numberOfRegisters + FPRInfo::numberOfRegisters; ++index) {
- ShuffledRegister& reg = lookupForRegister(gprs, fprs, index);
- if (!reg.isEndOfNonCyclingPermutation() || reg.handled || (!reg.hasFrom && !reg.hasTo))
- continue;
-
- reg.handleNonCyclingPermutation(check, entry, *this, scratchFPR1, scratchFPR2);
- ASSERT((scratchFPR1 == InvalidFPRReg && scratchFPR2 == InvalidFPRReg) || (scratchFPR1 != scratchFPR2));
- }
-
- for (unsigned index = 0; index < GPRInfo::numberOfRegisters + FPRInfo::numberOfRegisters; ++index) {
- ShuffledRegister& reg = lookupForRegister(gprs, fprs, index);
- if (reg.handled || (!reg.hasFrom && !reg.hasTo))
- continue;
-
- reg.handleCyclingPermutation(check, entry, *this, scratchGPR, scratchFPR1, scratchFPR2);
- ASSERT((scratchFPR1 == InvalidFPRReg && scratchFPR2 == InvalidFPRReg) || (scratchFPR1 != scratchFPR2));
- }
-
-#if !ASSERT_DISABLED
- for (unsigned index = 0; index < GPRInfo::numberOfRegisters + FPRInfo::numberOfRegisters; ++index) {
- ShuffledRegister& reg = lookupForRegister(gprs, fprs, index);
- ASSERT(reg.handled || (!reg.hasFrom && !reg.hasTo));
- }
-#endif
-
- // Part 3: Fill any values that were spilled on speculative, but are not spilled
- // on non-speculative.
-
- for (unsigned index = 0; index < FPRInfo::numberOfRegisters; ++index) {
- NodeIndex nodeIndex = entry.m_fprInfo[index].nodeIndex;
- if (nodeIndex == NoNode || entry.m_fprInfo[index].isSpilled)
- continue;
-
- NodeToRegisterMap::iterator mapIterator = checkNodeToRegisterMap.find(nodeIndex);
- if (mapIterator != checkNodeToRegisterMap.end()
- && !mapIterator->second.findInSpeculationCheck(check).isSpilled)
- continue;
-
- fillNumericToDouble(nodeIndex, FPRInfo::toRegister(index), GPRInfo::regT0);
- }
-
- for (unsigned index = 0; index < GPRInfo::numberOfRegisters; ++index) {
- NodeIndex nodeIndex = entry.m_gprInfo[index].nodeIndex;
- if (nodeIndex == NoNode || entry.m_gprInfo[index].isSpilled)
- continue;
-
- NodeToRegisterMap::iterator mapIterator = checkNodeToRegisterMap.find(nodeIndex);
- if (mapIterator != checkNodeToRegisterMap.end()
- && !mapIterator->second.findInSpeculationCheck(check).isSpilled)
- continue;
-
- DataFormat dataFormat = entry.m_gprInfo[index].format;
- if (dataFormat == DataFormatInteger)
- fillInt32ToInteger(nodeIndex, GPRInfo::toRegister(index));
- else {
- ASSERT(dataFormat & DataFormatJS || dataFormat == DataFormatCell); // Treat cell as JSValue for now!
- fillToJS(nodeIndex, GPRInfo::toRegister(index));
- // FIXME: For subtypes of DataFormatJS, should jitAssert the subtype?
- }
- }
-
- if (needToRestoreTagMaskRegister)
- move(TrustedImmPtr(reinterpret_cast<void*>(TagMask)), GPRInfo::tagMaskRegister);
-
- // Jump into the non-speculative path.
- jump(entry.m_entry);
-}
-
-void JITCompiler::linkSpeculationChecks(SpeculativeJIT& speculative, NonSpeculativeJIT& nonSpeculative)
-{
- // Iterators to walk over the set of bail outs & corresponding entry points.
- SpeculationCheckVector::Iterator checksIter = speculative.speculationChecks().begin();
- SpeculationCheckVector::Iterator checksEnd = speculative.speculationChecks().end();
- NonSpeculativeJIT::EntryLocationVector::Iterator entriesIter = nonSpeculative.entryLocations().begin();
- NonSpeculativeJIT::EntryLocationVector::Iterator entriesEnd = nonSpeculative.entryLocations().end();
-
- NodeToRegisterMap checkNodeToRegisterMap;
- NodeToRegisterMap entryNodeToRegisterMap;
-
- // Iterate over the speculation checks.
- while (checksIter != checksEnd) {
- // For every bail out from the speculative path, we must have provided an entry point
- // into the non-speculative one.
- ASSERT(checksIter->m_nodeIndex == entriesIter->m_nodeIndex);
-
- // There may be multiple bail outs that map to the same entry point!
- do {
- ASSERT(checksIter != checksEnd);
- ASSERT(entriesIter != entriesEnd);
-
- // Plant code to link this speculation failure.
- const SpeculationCheck& check = *checksIter;
- const EntryLocation& entry = *entriesIter;
- jumpFromSpeculativeToNonSpeculative(check, entry, speculative.speculationRecovery(check.m_recoveryIndex), checkNodeToRegisterMap, entryNodeToRegisterMap);
- ++checksIter;
- } while (checksIter != checksEnd && checksIter->m_nodeIndex == entriesIter->m_nodeIndex);
- ++entriesIter;
- }
-
- // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56289
- ASSERT(!(checksIter != checksEnd));
- ASSERT(!(entriesIter != entriesEnd));
-}
-#endif // ENABLE(DFG_OSR_EXIT)
void JITCompiler::compileEntry()
{
void JITCompiler::compileBody()
{
- // We generate the speculative code path, followed by the non-speculative
- // code for the function. Next we need to link the two together, making
- // bail-outs from the speculative path jump to the corresponding point on
- // the non-speculative one (and generating any code necessary to juggle
- // register values around, rebox values, and ensure spilled, to match the
- // non-speculative path's requirements).
+ // We generate the speculative code path, followed by OSR exit code to return
+ // to the old JIT code if speculations fail.
#if ENABLE(DFG_JIT_BREAK_ON_EVERY_FUNCTION)
// Handy debug tool!
breakpoint();
#endif
- // First generate the speculative path.
Label speculativePathBegin = label();
SpeculativeJIT speculative(*this);
-#if !ENABLE(DFG_DEBUG_LOCAL_DISBALE_SPECULATIVE)
bool compiledSpeculative = speculative.compile();
-#else
- bool compiledSpeculative = false;
-#endif
+ ASSERT_UNUSED(compiledSpeculative, compiledSpeculative);
- // Next, generate the non-speculative path. We pass this a SpeculationCheckIndexIterator
- // to allow it to check which nodes in the graph may bail out, and may need to reenter the
- // non-speculative path.
- if (compiledSpeculative) {
#if ENABLE(DFG_OSR_ENTRY)
- m_codeBlock->setJITCodeMap(m_jitCodeMapEncoder.finish());
+ m_codeBlock->setJITCodeMap(m_jitCodeMapEncoder.finish());
#endif
-
-#if ENABLE(DFG_OSR_EXIT)
- linkOSRExits(speculative);
-#else
- SpeculationCheckIndexIterator checkIterator(speculative.speculationChecks());
- NonSpeculativeJIT nonSpeculative(*this);
- nonSpeculative.compile(checkIterator);
-
- // Link the bail-outs from the speculative path to the corresponding entry points into the non-speculative one.
- linkSpeculationChecks(speculative, nonSpeculative);
-#endif
- } else {
- // If compilation through the SpeculativeJIT failed, throw away the code we generated.
- m_calls.clear();
- m_propertyAccesses.clear();
- m_jsCalls.clear();
- m_methodGets.clear();
- rewindToLabel(speculativePathBegin);
-
-#if ENABLE(DFG_OSR_EXIT)
- SpeculationCheckIndexIterator checkIterator;
-#else
- SpeculationCheckVector noChecks;
- SpeculationCheckIndexIterator checkIterator(noChecks);
-#endif
- NonSpeculativeJIT nonSpeculative(*this);
- nonSpeculative.compile(checkIterator);
- }
+
+ linkOSRExits(speculative);
// Iterate over the m_calls vector, checking for exception checks,
// and linking them to here.
class JITCodeGenerator;
class NodeToRegisterMap;
-class NonSpeculativeJIT;
class SpeculativeJIT;
class SpeculationRecovery;
struct EntryLocation;
-struct SpeculationCheck;
struct OSRExit;
#ifndef NDEBUG
void fillInt32ToInteger(NodeIndex, GPRReg);
void fillToJS(NodeIndex, GPRReg);
-#if ENABLE(DFG_OSR_EXIT)
void exitSpeculativeWithOSR(const OSRExit&, SpeculationRecovery*, Vector<BytecodeAndMachineOffset>& decodedCodeMap);
void linkOSRExits(SpeculativeJIT&);
-#else
- void jumpFromSpeculativeToNonSpeculative(const SpeculationCheck&, const EntryLocation&, SpeculationRecovery*, NodeToRegisterMap& checkNodeToRegisterMap, NodeToRegisterMap& entryNodeToRegisterMap);
- void linkSpeculationChecks(SpeculativeJIT&, NonSpeculativeJIT&);
-#endif
// The globalData, used to access constants such as the vPtrs.
JSGlobalData* m_globalData;
#define ENABLE_DFG_VERBOSE_SPECULATION_FAILURE 0
// Disable the DFG JIT without having to touch Platform.h!
#define DFG_DEBUG_LOCAL_DISBALE 0
-// Disable the SpeculativeJIT without having to touch Platform.h!
-#define DFG_DEBUG_LOCAL_DISBALE_SPECULATIVE 0
// Enable OSR entry from baseline JIT.
-#define ENABLE_DFG_OSR_ENTRY ENABLE_TIERED_COMPILATION
-// Disable the non-speculative JIT and use OSR instead.
-#define ENABLE_DFG_OSR_EXIT ENABLE_TIERED_COMPILATION
+#define ENABLE_DFG_OSR_ENTRY ENABLE_DFG_JIT
// Generate stats on how successful we were in making use of the DFG jit, and remaining on the hot path.
#define ENABLE_DFG_SUCCESS_STATS 0
+++ /dev/null
-/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "DFGNonSpeculativeJIT.h"
-
-#include "DFGSpeculativeJIT.h"
-
-#if ENABLE(DFG_JIT)
-
-namespace JSC { namespace DFG {
-
-EntryLocation::EntryLocation(MacroAssembler::Label entry, NonSpeculativeJIT* jit)
- : m_entry(entry)
- , m_nodeIndex(jit->m_compileIndex)
-{
- for (gpr_iterator iter = jit->m_gprs.begin(); iter != jit->m_gprs.end(); ++iter) {
- if (iter.name() != InvalidVirtualRegister) {
- GenerationInfo& info = jit->m_generationInfo[iter.name()];
- m_gprInfo[iter.index()].nodeIndex = info.nodeIndex();
- m_gprInfo[iter.index()].format = info.registerFormat();
- ASSERT(m_gprInfo[iter.index()].format != DataFormatNone);
- m_gprInfo[iter.index()].isSpilled = info.spillFormat() != DataFormatNone;
- } else
- m_gprInfo[iter.index()].nodeIndex = NoNode;
- }
- for (fpr_iterator iter = jit->m_fprs.begin(); iter != jit->m_fprs.end(); ++iter) {
- if (iter.name() != InvalidVirtualRegister) {
- GenerationInfo& info = jit->m_generationInfo[iter.name()];
- ASSERT(info.registerFormat() == DataFormatDouble);
- m_fprInfo[iter.index()].nodeIndex = info.nodeIndex();
- m_fprInfo[iter.index()].format = DataFormatDouble;
- m_fprInfo[iter.index()].isSpilled = info.spillFormat() != DataFormatNone;
- } else
- m_fprInfo[iter.index()].nodeIndex = NoNode;
- }
-}
-
-void NonSpeculativeJIT::compile(SpeculationCheckIndexIterator& checkIterator, Node& node)
-{
-#if ENABLE(DFG_OSR_EXIT)
- UNUSED_PARAM(checkIterator);
-#else
- // Check for speculation checks from the corresponding instruction in the
- // speculative path. Do not check for NodeIndex 0, since this is checked
- // in the outermost compile layer, at the head of the non-speculative path
- // (for index 0 we may need to check regardless of whether or not the node
- // will be generated, since argument type speculation checks will appear
- // as speculation checks at this index).
- if (m_compileIndex && checkIterator.hasCheckAtIndex(m_compileIndex))
- trackEntry(m_jit.label());
-#endif
-
- NodeType op = node.op;
-
- switch (op) {
- case ConvertThis: {
- JSValueOperand thisValue(this, node.child1());
- GPRReg thisGPR = thisValue.gpr();
- flushRegisters();
-
- GPRResult result(this);
- callOperation(operationConvertThis, result.gpr(), thisGPR);
- cellResult(result.gpr(), m_compileIndex);
- break;
- }
-
- case JSConstant:
- initConstantInfo(m_compileIndex);
- break;
-
- case GetLocal: {
- GPRTemporary result(this);
- m_jit.loadPtr(JITCompiler::addressFor(node.local()), result.gpr());
-
- // Like jsValueResult, but don't useChildren - our children are phi nodes,
- // and don't represent values within this dataflow with virtual registers.
- VirtualRegister virtualRegister = node.virtualRegister();
- m_gprs.retain(result.gpr(), virtualRegister, SpillOrderJS);
- m_generationInfo[virtualRegister].initJSValue(m_compileIndex, node.refCount(), result.gpr(), DataFormatJS);
- break;
- }
-
- case SetLocal: {
- JSValueOperand value(this, node.child1());
- m_jit.storePtr(value.gpr(), JITCompiler::addressFor(node.local()));
- noResult(m_compileIndex);
- break;
- }
-
- case BitAnd:
- case BitOr:
- case BitXor:
- if (isInt32Constant(node.child1())) {
- IntegerOperand op2(this, node.child2());
- GPRTemporary result(this, op2);
-
- bitOp(op, valueOfInt32Constant(node.child1()), op2.gpr(), result.gpr());
-
- integerResult(result.gpr(), m_compileIndex);
- } else if (isInt32Constant(node.child2())) {
- IntegerOperand op1(this, node.child1());
- GPRTemporary result(this, op1);
-
- bitOp(op, valueOfInt32Constant(node.child2()), op1.gpr(), result.gpr());
-
- integerResult(result.gpr(), m_compileIndex);
- } else {
- IntegerOperand op1(this, node.child1());
- IntegerOperand op2(this, node.child2());
- GPRTemporary result(this, op1, op2);
-
- GPRReg reg1 = op1.gpr();
- GPRReg reg2 = op2.gpr();
- bitOp(op, reg1, reg2, result.gpr());
-
- integerResult(result.gpr(), m_compileIndex);
- }
- break;
-
- case BitRShift:
- case BitLShift:
- case BitURShift:
- if (isInt32Constant(node.child2())) {
- IntegerOperand op1(this, node.child1());
- GPRTemporary result(this, op1);
-
- int shiftAmount = valueOfInt32Constant(node.child2()) & 0x1f;
- // Shifts by zero should have been optimized out of the graph!
- ASSERT(shiftAmount);
- shiftOp(op, op1.gpr(), shiftAmount, result.gpr());
-
- integerResult(result.gpr(), m_compileIndex);
- } else {
- // Do not allow shift amount to be used as the result, MacroAssembler does not permit this.
- IntegerOperand op1(this, node.child1());
- IntegerOperand op2(this, node.child2());
- GPRTemporary result(this, op1);
-
- GPRReg reg1 = op1.gpr();
- GPRReg reg2 = op2.gpr();
- shiftOp(op, reg1, reg2, result.gpr());
-
- integerResult(result.gpr(), m_compileIndex);
- }
- break;
-
- case UInt32ToNumber: {
- nonSpeculativeUInt32ToNumber(node);
- break;
- }
-
- case ValueToInt32: {
- nonSpeculativeValueToInt32(node);
- break;
- }
-
- case ValueToNumber:
- case ValueToDouble: {
- nonSpeculativeValueToNumber(node);
- break;
- }
-
- case ValueAdd:
- case ArithAdd: {
- nonSpeculativeAdd(op, node);
- break;
- }
-
- case ArithSub: {
- nonSpeculativeArithSub(node);
- break;
- }
-
- case ArithMul: {
- nonSpeculativeBasicArithOp(ArithMul, node);
- break;
- }
-
- case ArithDiv: {
- DoubleOperand op1(this, node.child1());
- DoubleOperand op2(this, node.child2());
- FPRTemporary result(this, op1);
- FPRReg op1FPR = op1.fpr();
- FPRReg op2FPR = op2.fpr();
- FPRReg resultFPR = result.fpr();
-
- m_jit.divDouble(op1FPR, op2FPR, resultFPR);
-
- doubleResult(resultFPR, m_compileIndex);
- break;
- }
-
- case ArithMod: {
- nonSpeculativeArithMod(node);
- break;
- }
-
- case LogicalNot: {
- nonSpeculativeLogicalNot(node);
- break;
- }
-
- case CompareLess:
- if (nonSpeculativeCompare(node, MacroAssembler::LessThan, operationCompareLess))
- return;
- break;
-
- case CompareLessEq:
- if (nonSpeculativeCompare(node, MacroAssembler::LessThanOrEqual, operationCompareLessEq))
- return;
- break;
-
- case CompareGreater:
- if (nonSpeculativeCompare(node, MacroAssembler::GreaterThan, operationCompareGreater))
- return;
- break;
-
- case CompareGreaterEq:
- if (nonSpeculativeCompare(node, MacroAssembler::GreaterThanOrEqual, operationCompareGreaterEq))
- return;
- break;
-
- case CompareEq:
- if (isNullConstant(node.child1())) {
- if (nonSpeculativeCompareNull(node, node.child2()))
- return;
- break;
- }
- if (isNullConstant(node.child2())) {
- if (nonSpeculativeCompareNull(node, node.child1()))
- return;
- break;
- }
- if (nonSpeculativeCompare(node, MacroAssembler::Equal, operationCompareEq))
- return;
- break;
-
- case CompareStrictEq:
- if (nonSpeculativeStrictEq(node))
- return;
- break;
-
- case GetByVal: {
- if (node.child3() != NoNode)
- use(node.child3());
-
- JSValueOperand base(this, node.child1());
- JSValueOperand property(this, node.child2());
-
- GPRTemporary storage(this);
- GPRTemporary cleanIndex(this);
-
- GPRReg baseGPR = base.gpr();
- GPRReg propertyGPR = property.gpr();
- GPRReg storageGPR = storage.gpr();
- GPRReg cleanIndexGPR = cleanIndex.gpr();
-
- base.use();
- property.use();
-
- JITCompiler::Jump baseNotCell = m_jit.branchTestPtr(MacroAssembler::NonZero, baseGPR, GPRInfo::tagMaskRegister);
-
- JITCompiler::Jump propertyNotInt = m_jit.branchPtr(MacroAssembler::Below, propertyGPR, GPRInfo::tagTypeNumberRegister);
-
- // Get the array storage. We haven't yet checked this is a JSArray, so this is only safe if
- // an access with offset JSArray::storageOffset() is valid for all JSCells!
- m_jit.loadPtr(MacroAssembler::Address(baseGPR, JSArray::storageOffset()), storageGPR);
-
- JITCompiler::Jump baseNotArray = m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR), MacroAssembler::TrustedImmPtr(m_jit.globalData()->jsArrayVPtr));
-
- m_jit.zeroExtend32ToPtr(propertyGPR, cleanIndexGPR);
-
- JITCompiler::Jump outOfBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, cleanIndexGPR, MacroAssembler::Address(baseGPR, JSArray::vectorLengthOffset()));
-
- m_jit.loadPtr(MacroAssembler::BaseIndex(storageGPR, cleanIndexGPR, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), storageGPR);
-
- JITCompiler::Jump loadFailed = m_jit.branchTestPtr(MacroAssembler::Zero, storageGPR);
-
- JITCompiler::Jump done = m_jit.jump();
-
- baseNotCell.link(&m_jit);
- propertyNotInt.link(&m_jit);
- baseNotArray.link(&m_jit);
- outOfBounds.link(&m_jit);
- loadFailed.link(&m_jit);
-
- silentSpillAllRegisters(storageGPR);
- setupStubArguments(baseGPR, propertyGPR);
- m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
- appendCallWithExceptionCheck(operationGetByVal);
- m_jit.move(GPRInfo::returnValueGPR, storageGPR);
- silentFillAllRegisters(storageGPR);
-
- done.link(&m_jit);
-
- jsValueResult(storageGPR, m_compileIndex, UseChildrenCalledExplicitly);
- break;
- }
-
- case PutByVal:
- case PutByValAlias: {
- JSValueOperand base(this, node.child1());
- JSValueOperand property(this, node.child2());
- JSValueOperand value(this, node.child3());
- GPRTemporary storage(this);
- GPRTemporary cleanIndex(this);
- GPRReg baseGPR = base.gpr();
- GPRReg propertyGPR = property.gpr();
- GPRReg valueGPR = value.gpr();
- GPRReg storageGPR = storage.gpr();
- GPRReg cleanIndexGPR = cleanIndex.gpr();
-
- base.use();
- property.use();
- value.use();
-
- writeBarrier(m_jit, baseGPR, storageGPR, WriteBarrierForPropertyAccess);
-
- JITCompiler::Jump baseNotCell = m_jit.branchTestPtr(MacroAssembler::NonZero, baseGPR, GPRInfo::tagMaskRegister);
-
- JITCompiler::Jump propertyNotInt = m_jit.branchPtr(MacroAssembler::Below, propertyGPR, GPRInfo::tagTypeNumberRegister);
-
- m_jit.loadPtr(MacroAssembler::Address(baseGPR, JSArray::storageOffset()), storageGPR);
-
- JITCompiler::Jump baseNotArray = m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR), MacroAssembler::TrustedImmPtr(m_jit.globalData()->jsArrayVPtr));
-
- m_jit.zeroExtend32ToPtr(propertyGPR, cleanIndexGPR);
-
- JITCompiler::Jump outOfBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, cleanIndexGPR, MacroAssembler::Address(baseGPR, JSArray::vectorLengthOffset()));
-
- JITCompiler::Jump notHoleValue = m_jit.branchTestPtr(MacroAssembler::NonZero, MacroAssembler::BaseIndex(storageGPR, cleanIndexGPR, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
-
- JITCompiler::Jump lengthDoesNotNeedUpdate = m_jit.branch32(MacroAssembler::Below, cleanIndexGPR, MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_length)));
-
- m_jit.add32(TrustedImm32(1), cleanIndexGPR);
- m_jit.store32(cleanIndexGPR, MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_length)));
- m_jit.zeroExtend32ToPtr(propertyGPR, cleanIndexGPR);
-
- lengthDoesNotNeedUpdate.link(&m_jit);
- notHoleValue.link(&m_jit);
-
- m_jit.storePtr(valueGPR, MacroAssembler::BaseIndex(storageGPR, cleanIndexGPR, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
-
- JITCompiler::Jump done = m_jit.jump();
-
- baseNotCell.link(&m_jit);
- propertyNotInt.link(&m_jit);
- baseNotArray.link(&m_jit);
- outOfBounds.link(&m_jit);
-
- silentSpillAllRegisters(InvalidGPRReg);
- setupStubArguments(baseGPR, propertyGPR, valueGPR);
- m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
- JITCompiler::Call functionCall = appendCallWithExceptionCheck(m_jit.codeBlock()->isStrictMode() ? operationPutByValStrict : operationPutByValNonStrict);
- silentFillAllRegisters(InvalidGPRReg);
-
- done.link(&m_jit);
-
- noResult(m_compileIndex, UseChildrenCalledExplicitly);
- break;
- }
-
- case GetById: {
- JSValueOperand base(this, node.child1());
- GPRReg baseGPR = base.gpr();
- GPRTemporary result(this, base);
- GPRReg resultGPR = result.gpr();
- GPRReg scratchGPR;
-
- if (resultGPR == baseGPR)
- scratchGPR = tryAllocate();
- else
- scratchGPR = resultGPR;
-
- base.use();
-
- JITCompiler::Jump notCell = m_jit.branchTestPtr(MacroAssembler::NonZero, baseGPR, GPRInfo::tagMaskRegister);
-
- cachedGetById(baseGPR, resultGPR, scratchGPR, node.identifierNumber(), notCell);
-
- jsValueResult(resultGPR, m_compileIndex, UseChildrenCalledExplicitly);
- break;
- }
-
- case GetMethod: {
- JSValueOperand base(this, node.child1());
- GPRReg baseGPR = base.gpr();
- GPRTemporary result(this, base);
- GPRReg resultGPR = result.gpr();
- GPRReg scratchGPR;
- if (resultGPR == baseGPR)
- scratchGPR = tryAllocate();
- else
- scratchGPR = resultGPR;
-
- base.use();
-
- JITCompiler::Jump notCell = m_jit.branchTestPtr(MacroAssembler::NonZero, baseGPR, GPRInfo::tagMaskRegister);
-
- cachedGetMethod(baseGPR, resultGPR, scratchGPR, node.identifierNumber(), notCell);
-
- jsValueResult(resultGPR, m_compileIndex, UseChildrenCalledExplicitly);
- break;
- }
-
- case PutById: {
- JSValueOperand base(this, node.child1());
- JSValueOperand value(this, node.child2());
- GPRTemporary scratch(this);
- GPRReg valueGPR = value.gpr();
- GPRReg baseGPR = base.gpr();
- GPRReg scratchGPR = scratch.gpr();
-
- base.use();
- value.use();
-
- JITCompiler::Jump notCell = m_jit.branchTestPtr(MacroAssembler::NonZero, baseGPR, GPRInfo::tagMaskRegister);
-
- cachedPutById(baseGPR, valueGPR, scratchGPR, node.identifierNumber(), NotDirect, notCell);
-
- noResult(m_compileIndex, UseChildrenCalledExplicitly);
- break;
- }
-
- case PutByIdDirect: {
- JSValueOperand base(this, node.child1());
- JSValueOperand value(this, node.child2());
- GPRTemporary scratch(this);
- GPRReg valueGPR = value.gpr();
- GPRReg baseGPR = base.gpr();
-
- base.use();
- value.use();
-
- JITCompiler::Jump notCell = m_jit.branchTestPtr(MacroAssembler::NonZero, baseGPR, GPRInfo::tagMaskRegister);
-
- cachedPutById(baseGPR, valueGPR, scratch.gpr(), node.identifierNumber(), Direct, notCell);
-
- noResult(m_compileIndex, UseChildrenCalledExplicitly);
- break;
- }
-
- case GetGlobalVar: {
- GPRTemporary result(this);
-
- JSVariableObject* globalObject = m_jit.codeBlock()->globalObject();
- m_jit.loadPtr(globalObject->addressOfRegisters(), result.gpr());
- m_jit.loadPtr(JITCompiler::addressForGlobalVar(result.gpr(), node.varNumber()), result.gpr());
-
- jsValueResult(result.gpr(), m_compileIndex);
- break;
- }
-
- case PutGlobalVar: {
- JSValueOperand value(this, node.child1());
- GPRTemporary globalObject(this);
- GPRTemporary scratch(this);
-
- GPRReg globalObjectReg = globalObject.gpr();
- GPRReg scratchReg = scratch.gpr();
-
- m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.codeBlock()->globalObject()), globalObjectReg);
-
- writeBarrier(m_jit, globalObjectReg, scratchReg, WriteBarrierForVariableAccess);
-
- m_jit.loadPtr(MacroAssembler::Address(globalObjectReg, JSVariableObject::offsetOfRegisters()), scratchReg);
- m_jit.storePtr(value.gpr(), JITCompiler::addressForGlobalVar(scratchReg, node.varNumber()));
-
- noResult(m_compileIndex);
- break;
- }
-
- case DFG::Jump: {
- BlockIndex taken = m_jit.graph().blockIndexForBytecodeOffset(node.takenBytecodeOffset());
- if (taken != (m_block + 1))
- addBranch(m_jit.jump(), taken);
- noResult(m_compileIndex);
- break;
- }
-
- case Branch:
- emitBranch(node);
- break;
-
- case Return: {
- ASSERT(GPRInfo::callFrameRegister != GPRInfo::regT1);
- ASSERT(GPRInfo::regT1 != GPRInfo::returnValueGPR);
- ASSERT(GPRInfo::returnValueGPR != GPRInfo::callFrameRegister);
-
-#if ENABLE(DFG_SUCCESS_STATS)
- static SamplingCounter counter("NonSpeculativeJIT");
- m_jit.emitCount(counter);
-#endif
-
- // Return the result in returnValueGPR.
- JSValueOperand op1(this, node.child1());
- m_jit.move(op1.gpr(), GPRInfo::returnValueGPR);
-
- // Grab the return address.
- m_jit.emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, GPRInfo::regT1);
- // Restore our caller's "r".
- m_jit.emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, GPRInfo::callFrameRegister);
- // Return.
- m_jit.restoreReturnAddressBeforeReturn(GPRInfo::regT1);
- m_jit.ret();
-
- noResult(m_compileIndex);
- break;
- }
-
- case CheckHasInstance: {
- nonSpeculativeCheckHasInstance(node);
- break;
- }
-
- case InstanceOf: {
- nonSpeculativeInstanceOf(node);
- break;
- }
-
- case Phi:
- ASSERT_NOT_REACHED();
-
- case Breakpoint:
-#if ENABLE(DEBUG_WITH_BREAKPOINT)
- m_jit.breakpoint();
-#else
- ASSERT_NOT_REACHED();
-#endif
- break;
-
- case Call:
- case Construct:
- emitCall(node);
- break;
-
- case Resolve: {
- flushRegisters();
- GPRResult result(this);
- callOperation(operationResolve, result.gpr(), identifier(node.identifierNumber()));
- jsValueResult(result.gpr(), m_compileIndex);
- break;
- }
-
- case ResolveBase: {
- flushRegisters();
- GPRResult result(this);
- callOperation(operationResolveBase, result.gpr(), identifier(node.identifierNumber()));
- jsValueResult(result.gpr(), m_compileIndex);
- break;
- }
-
- case ResolveBaseStrictPut: {
- flushRegisters();
- GPRResult result(this);
- callOperation(operationResolveBaseStrictPut, result.gpr(), identifier(node.identifierNumber()));
- jsValueResult(result.gpr(), m_compileIndex);
- break;
- }
- }
-
- if (node.hasResult() && node.mustGenerate())
- use(m_compileIndex);
-}
-
-void NonSpeculativeJIT::compile(SpeculationCheckIndexIterator& checkIterator, BasicBlock& block)
-{
- ASSERT(m_compileIndex == block.begin);
- m_blockHeads[m_block] = m_jit.label();
-
-#if ENABLE(DFG_JIT_BREAK_ON_EVERY_BLOCK)
- m_jit.breakpoint();
-#endif
-
- for (; m_compileIndex < block.end; ++m_compileIndex) {
- Node& node = m_jit.graph()[m_compileIndex];
- if (!node.shouldGenerate())
- continue;
-
-#if ENABLE(DFG_DEBUG_VERBOSE)
- fprintf(stderr, "NonSpeculativeJIT generating Node @%d at code offset 0x%x ", (int)m_compileIndex, m_jit.debugOffset());
-#endif
-#if ENABLE(DFG_JIT_BREAK_ON_EVERY_NODE)
- m_jit.breakpoint();
-#endif
-
- checkConsistency();
- compile(checkIterator, node);
-#if ENABLE(DFG_DEBUG_VERBOSE)
- if (node.hasResult())
- fprintf(stderr, "-> %s\n", dataFormatToString(m_generationInfo[node.virtualRegister()].registerFormat()));
- else
- fprintf(stderr, "\n");
-#endif
- checkConsistency();
- }
-}
-
-void NonSpeculativeJIT::compile(SpeculationCheckIndexIterator& checkIterator)
-{
- // Check for speculation checks added at function entry (checking argument types).
-#if !ENABLE(DFG_OSR_EXIT)
- if (checkIterator.hasCheckAtIndex(m_compileIndex))
- trackEntry(m_jit.label());
-#endif
-
- ASSERT(!m_compileIndex);
- for (m_block = 0; m_block < m_jit.graph().m_blocks.size(); ++m_block)
- compile(checkIterator, *m_jit.graph().m_blocks[m_block]);
- linkBranches();
-}
-
-} } // namespace JSC::DFG
-
-#endif
+++ /dev/null
-/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DFGNonSpeculativeJIT_h
-#define DFGNonSpeculativeJIT_h
-
-#if ENABLE(DFG_JIT)
-
-#include <dfg/DFGJITCodeGenerator.h>
-
-namespace JSC { namespace DFG {
-
-class SpeculationCheckIndexIterator;
-
-// === EntryLocation ===
-//
-// This structure describes an entry point into the non-speculative
-// code path. This is used in linking bail-outs from the speculative path.
-struct EntryLocation {
- EntryLocation(MacroAssembler::Label, NonSpeculativeJIT*);
-
- // The node this entry point corresponds to, and the label
- // marking the start of code for the given node.
- MacroAssembler::Label m_entry;
- NodeIndex m_nodeIndex;
-
- // For every entry point we record a map recording for every
- // machine register which, if any, values it contains. For
- // GPR registers we must also record the format of the value.
- struct RegisterInfo {
- NodeIndex nodeIndex;
- DataFormat format;
- bool isSpilled;
- };
- RegisterInfo m_gprInfo[GPRInfo::numberOfRegisters];
- RegisterInfo m_fprInfo[FPRInfo::numberOfRegisters];
-};
-
-// === NonSpeculativeJIT ===
-//
-// This class is used to generate code for the non-speculative path.
-// Code generation will take advantage of static information available
-// in the dataflow to perform safe optimizations - for example, avoiding
-// boxing numeric values between arithmetic operations, but will not
-// perform any unsafe optimizations that would render the code unable
-// to produce the correct results for any possible input.
-class NonSpeculativeJIT : public JITCodeGenerator {
- friend struct EntryLocation;
-public:
- NonSpeculativeJIT(JITCompiler& jit)
- : JITCodeGenerator(jit, false)
- {
- }
-
- void compile(SpeculationCheckIndexIterator&);
-
- typedef SegmentedVector<EntryLocation, 16> EntryLocationVector;
- EntryLocationVector& entryLocations() { return m_entryLocations; }
-
-private:
- void compile(SpeculationCheckIndexIterator&, Node&);
- void compile(SpeculationCheckIndexIterator&, BasicBlock&);
-
- // These methods are used to plant calls out to C++
- // helper routines to convert between types.
-
- // Record an entry location into the non-speculative code path;
- // for every bail-out on the speculative path we record information
- // to be able to re-enter into the non-speculative one.
- void trackEntry(MacroAssembler::Label entry)
- {
- m_entryLocations.append(EntryLocation(entry, this));
- }
-
- EntryLocationVector m_entryLocations;
-};
-
-} } // namespace JSC::DFG
-
-#endif
-#endif
-
ASSERT(codeBlock->getJITType() == JITCode::DFGJIT);
ASSERT(codeBlock->alternative());
ASSERT(codeBlock->alternative()->getJITType() == JITCode::BaselineJIT);
+ ASSERT(codeBlock->jitCodeMap());
#if ENABLE(JIT_VERBOSE_OSR)
printf("OSR in %p(%p) from bc#%u\n", codeBlock, codeBlock->alternative(), bytecodeIndex);
// should almost certainly include calling either codeBlock->optimizeAfterWarmUp()
// or codeBlock->dontOptimizeAnytimeSoon().
- // 1) Check if the DFG code set a code map. If it didn't, it means that it
- // cannot handle OSR entry. This currently only happens if we disable
- // dynamic speculation termination and end up with a DFG code block that
- // was compiled entirely with the non-speculative JIT. The non-speculative
- // JIT does not support OSR entry and probably never will, since it is
- // kind of a deprecated compiler right now.
-
-#if ENABLE(DYNAMIC_TERMINATE_SPECULATION)
- ASSERT(codeBlock->jitCodeMap());
-#else
- if (!codeBlock->jitCodeMap()) {
-#if ENABLE(JIT_VERBOSE_OSR)
- printf(" OSR failed because of a missing JIT code map.\n");
-#endif
- return 0;
- }
-#endif
-
- // 2) Verify predictions. If the predictions are inconsistent with the actual
+ // 1) Verify predictions. If the predictions are inconsistent with the actual
// values, then OSR entry is not possible at this time. It's tempting to
// assume that we could somehow avoid this case. We can certainly avoid it
// for first-time loop OSR - that is, OSR into a CodeBlock that we have just
}
}
- // 3) Check the stack height. The DFG JIT may require a taller stack than the
+ // 2) Check the stack height. The DFG JIT may require a taller stack than the
// baseline JIT, in some cases. If we can't grow the stack, then don't do
// OSR right now. That's the only option we have unless we want basic block
// boundaries to start throwing RangeErrors. Although that would be possible,
printf(" OSR should succeed.\n");
#endif
- // 4) Fix the call frame.
+ // 3) Fix the call frame.
exec->setCodeBlock(codeBlock);
- // 5) Find and return the destination machine code address. The DFG stores
+ // 4) Find and return the destination machine code address. The DFG stores
// the machine code offsets of OSR targets in a CompactJITCodeMap.
// Decoding it is not super efficient, but we expect that OSR entry
// happens sufficiently rarely, and that OSR entrypoints are sufficiently
#include "config.h"
#include "DFGPropagator.h"
-#if ENABLE(DFG_JIT) && ENABLE(DYNAMIC_OPTIMIZATION)
+#if ENABLE(DFG_JIT)
#include "DFGGraph.h"
} } // namespace JSC::DFG
-#endif // ENABLE(DFG_JIT) && ENABLE(DYNAMIC_OPTIMIZATION)
+#endif // ENABLE(DFG_JIT)
namespace DFG {
// Propagate dynamic predictions from value sources to variables.
-#if ENABLE(DYNAMIC_OPTIMIZATION)
void propagate(Graph&, JSGlobalData*, CodeBlock*);
-#else
-inline void propagate(Graph&, JSGlobalData*, CodeBlock*) { }
-#endif
} } // namespace JSC::DFG
return InvalidGPRReg;
}
-#if !ENABLE(DFG_OSR_EXIT)
-SpeculationCheck::SpeculationCheck(MacroAssembler::Jump check, SpeculativeJIT* jit, unsigned recoveryIndex)
- : m_check(check)
- , m_nodeIndex(jit->m_compileIndex)
- , m_recoveryIndex(recoveryIndex)
-{
- for (gpr_iterator iter = jit->m_gprs.begin(); iter != jit->m_gprs.end(); ++iter) {
- if (iter.name() != InvalidVirtualRegister) {
- GenerationInfo& info = jit->m_generationInfo[iter.name()];
- m_gprInfo[iter.index()].nodeIndex = info.nodeIndex();
- m_gprInfo[iter.index()].format = info.registerFormat();
- ASSERT(m_gprInfo[iter.index()].format != DataFormatNone);
- m_gprInfo[iter.index()].isSpilled = info.spillFormat() != DataFormatNone;
- } else
- m_gprInfo[iter.index()].nodeIndex = NoNode;
- }
- for (fpr_iterator iter = jit->m_fprs.begin(); iter != jit->m_fprs.end(); ++iter) {
- if (iter.name() != InvalidVirtualRegister) {
- GenerationInfo& info = jit->m_generationInfo[iter.name()];
- ASSERT(info.registerFormat() == DataFormatDouble);
- m_fprInfo[iter.index()].nodeIndex = info.nodeIndex();
- m_fprInfo[iter.index()].format = DataFormatDouble;
- m_fprInfo[iter.index()].isSpilled = info.spillFormat() != DataFormatNone;
- } else
- m_fprInfo[iter.index()].nodeIndex = NoNode;
- }
-}
-#endif
-
#ifndef NDEBUG
void ValueSource::dump(FILE* out) const
{
}
#endif
-#if ENABLE(DFG_OSR_EXIT)
OSRExit::OSRExit(MacroAssembler::Jump check, SpeculativeJIT* jit, unsigned recoveryIndex)
: m_check(check)
, m_nodeIndex(jit->m_compileIndex)
m_variables[variable].dump(out);
}
#endif
-#endif
GPRReg SpeculativeJIT::fillSpeculateInt(NodeIndex nodeIndex, DataFormat& returnFormat)
{
checkConsistency();
compile(node);
if (!m_compileOkay) {
-#if ENABLE(DYNAMIC_TERMINATE_SPECULATION)
m_compileOkay = true;
m_compileIndex = block.end;
clearGenerationInfo();
-#endif
return;
}
initializeVariableTypes();
ASSERT(!m_compileIndex);
- for (m_block = 0; m_block < m_jit.graph().m_blocks.size(); ++m_block) {
+ for (m_block = 0; m_block < m_jit.graph().m_blocks.size(); ++m_block)
compile(*m_jit.graph().m_blocks[m_block]);
-#if !ENABLE(DYNAMIC_TERMINATE_SPECULATION)
- if (!m_compileOkay)
- return false;
-#endif
- }
linkBranches();
return true;
}
GPRReg m_src;
};
-#if !ENABLE(DFG_OSR_EXIT)
-// === SpeculationCheck ===
-//
-// This structure records a bail-out from the speculative path,
-// which will need to be linked in to the non-speculative one.
-struct SpeculationCheck {
- SpeculationCheck(MacroAssembler::Jump, SpeculativeJIT*, unsigned recoveryIndex = 0);
-
- // The location of the jump out from the speculative path,
- // and the node we were generating code for.
- MacroAssembler::Jump m_check;
- NodeIndex m_nodeIndex;
- // Used to record any additional recovery to be performed; this
- // value is an index into the SpeculativeJIT's m_speculationRecoveryList
- // array, offset by 1. (m_recoveryIndex == 0) means no recovery.
- unsigned m_recoveryIndex;
-
- struct RegisterInfo {
- NodeIndex nodeIndex;
- DataFormat format;
- bool isSpilled;
- };
- RegisterInfo m_gprInfo[GPRInfo::numberOfRegisters];
- RegisterInfo m_fprInfo[FPRInfo::numberOfRegisters];
-};
-typedef SegmentedVector<SpeculationCheck, 16> SpeculationCheckVector;
-#endif // !ENABLE(DFG_OSR_EXIT)
-
class ValueSource {
public:
ValueSource()
} m_source;
};
-#if ENABLE(DFG_OSR_EXIT)
// === OSRExit ===
//
// This structure describes how to exit the speculative path by
int m_lastSetOperand;
};
typedef SegmentedVector<OSRExit, 16> OSRExitVector;
-#endif // ENABLE(DFG_OSR_EXIT)
// === SpeculativeJIT ===
//
// to propagate type information (including information that has
// only speculatively been asserted) through the dataflow.
class SpeculativeJIT : public JITCodeGenerator {
- friend struct SpeculationCheck;
friend struct OSRExit;
public:
SpeculativeJIT(JITCompiler&);
// Retrieve the list of bail-outs from the speculative path,
// and additional recovery information.
-#if !ENABLE(DFG_OSR_EXIT)
- SpeculationCheckVector& speculationChecks()
- {
- return m_speculationChecks;
- }
-#else
OSRExitVector& osrExits()
{
return m_osrExits;
}
-#endif
SpeculationRecovery* speculationRecovery(size_t index)
{
- // SpeculationCheck::m_recoveryIndex is offset by 1,
+ // OSRExit::m_recoveryIndex is offset by 1,
// 0 means no recovery.
return index ? &m_speculationRecoveryList[index - 1] : 0;
}
{
if (!m_compileOkay)
return;
-#if !ENABLE(DFG_OSR_EXIT)
- m_speculationChecks.append(SpeculationCheck(jumpToFail, this));
-#else
m_osrExits.append(OSRExit(jumpToFail, this));
-#endif
}
// Add a speculation check with additional recovery.
void speculationCheck(MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
if (!m_compileOkay)
return;
m_speculationRecoveryList.append(recovery);
-#if !ENABLE(DFG_OSR_EXIT)
- m_speculationChecks.append(SpeculationCheck(jumpToFail, this, m_speculationRecoveryList.size()));
-#else
m_osrExits.append(OSRExit(jumpToFail, this, m_speculationRecoveryList.size()));
-#endif
}
// Called when we statically determine that a speculation will fail.
#if ENABLE(DFG_DEBUG_VERBOSE)
fprintf(stderr, "SpeculativeJIT was terminated.\n");
#endif
-#if ENABLE(DYNAMIC_TERMINATE_SPECULATION)
if (!m_compileOkay)
return;
speculationCheck(m_jit.jump());
m_compileOkay = false;
-#else
- // Under static speculation, it's more profitable to give up entirely at this
- // point.
- m_compileOkay = false;
-#endif
}
template<bool strict>
// will make conflicting speculations about the same operand). In such cases this
// flag is cleared, indicating no further code generation should take place.
bool m_compileOkay;
-#if !ENABLE(DFG_OSR_EXIT)
- // This vector tracks bail-outs from the speculative path to the non-speculative one.
- SpeculationCheckVector m_speculationChecks;
-#else
// This vector tracks bail-outs from the speculative path to the old JIT.
OSRExitVector m_osrExits;
-#endif
// Some bail-outs need to record additional information recording specific recovery
// to be performed (for example, on detected overflow from an add, we may need to
// reverse the addition if an operand is being overwritten).
GPRReg m_gprOrInvalid;
};
-// === SpeculationCheckIndexIterator ===
-//
-// This class is used by the non-speculative JIT to check which
-// nodes require entry points from the speculative path.
-#if ENABLE(DFG_OSR_EXIT)
-// This becomes a stub if OSR is enabled.
-class SpeculationCheckIndexIterator {
-public:
- SpeculationCheckIndexIterator() { }
-};
-#else
-class SpeculationCheckIndexIterator {
-public:
- SpeculationCheckIndexIterator(SpeculationCheckVector& speculationChecks)
- : m_speculationChecks(speculationChecks)
- , m_iter(m_speculationChecks.begin())
- , m_end(m_speculationChecks.end())
- {
- }
-
- bool hasCheckAtIndex(NodeIndex nodeIndex)
- {
- while (m_iter != m_end) {
- NodeIndex current = m_iter->m_nodeIndex;
- if (current >= nodeIndex)
- return current == nodeIndex;
- ++m_iter;
- }
- return false;
- }
-
-private:
- SpeculationCheckVector& m_speculationChecks;
- SpeculationCheckVector::Iterator m_iter;
- SpeculationCheckVector::Iterator m_end;
-};
-#endif
-
inline SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
: JITCodeGenerator(jit, true)
, m_compileOkay(true)
{
}
-#if ENABLE(TIERED_COMPILATION)
+#if ENABLE(DFG_JIT)
void JIT::emitOptimizationCheck(OptimizationCheckKind kind)
{
if (!shouldEmitProfiling())
m_labels[m_bytecodeOffset] = label();
-#if ENABLE(TIERED_COMPILATION)
+#if ENABLE(DFG_JIT)
if (m_canBeOptimized)
m_jitCodeMapEncoder.append(m_bytecodeOffset, differenceBetween(m_startOfCode, label()));
#endif
JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck)
{
-#if ENABLE(TIERED_COMPILATION)
+#if ENABLE(DFG_JIT)
m_canBeOptimized = m_codeBlock->canCompileWithDFG();
if (m_canBeOptimized)
m_startOfCode = label();
info.callReturnLocation = m_codeBlock->structureStubInfo(m_methodCallCompilationInfo[i].propertyAccessIndex).callReturnLocation;
}
-#if ENABLE(TIERED_COMPILATION)
+#if ENABLE(DFG_JIT)
if (m_canBeOptimized)
m_codeBlock->setJITCodeMap(m_jitCodeMapEncoder.finish());
#endif
void emitLoadCharacterString(RegisterID src, RegisterID dst, JumpList& failures);
enum OptimizationCheckKind { LoopOptimizationCheck, RetOptimizationCheck };
-#if ENABLE(TIERED_COMPILATION)
+#if ENABLE(DFG_JIT)
void emitOptimizationCheck(OptimizationCheckKind);
#else
void emitOptimizationCheck(OptimizationCheckKind) { }
void sampleCodeBlock(CodeBlock*) {}
#endif
-#if ENABLE(TIERED_COMPILATION)
+#if ENABLE(DFG_JIT)
bool shouldEmitProfiling() { return m_canBeOptimized; }
#else
// Enables use of value profiler with tiered compilation turned off,
WeakRandom m_randomGenerator;
static CodeRef stringGetByValStubGenerator(JSGlobalData*);
-#if ENABLE(TIERED_COMPILATION)
+#if ENABLE(DFG_JIT)
bool m_canBeOptimized;
Label m_startOfCode;
CompactJITCodeMap::Encoder m_jitCodeMapEncoder;
static JITType bottomTierJIT()
{
-#if ENABLE(TIERED_COMPILATION)
return BaselineJIT;
-#else
- return DFGJIT;
-#endif
}
static JITType topTierJIT()
, parser(new Parser)
, interpreter(0)
, heap(this, heapSize)
-#if ENABLE(TIERED_COMPILATION)
+#if ENABLE(DFG_JIT)
, sizeOfLastOSRScratchBuffer(0)
#endif
, dynamicGlobalObject(0)
delete m_rtTraceList;
#endif
-#if ENABLE(TIERED_COMPILATION)
+#if ENABLE(DFG_JIT)
for (unsigned i = 0; i < osrScratchBuffers.size(); ++i)
fastFree(osrScratchBuffers[i]);
#endif
#ifndef NDEBUG
int64_t debugDataBuffer[64];
#endif
-#if ENABLE(TIERED_COMPILATION)
+#if ENABLE(DFG_JIT)
Vector<void*> osrScratchBuffers;
size_t sizeOfLastOSRScratchBuffer;
#define ENABLE_DFG_JIT 1
#endif
-#if !defined(ENABLE_TIERED_COMPILATION) && ENABLE(DFG_JIT)
-#define ENABLE_TIERED_COMPILATION 1
-#endif
-
-/* Currently only implemented for JSVALUE64, only tested on PLATFORM(MAC) */
-#if !defined(ENABLE_VALUE_PROFILER) && ENABLE(TIERED_COMPILATION)
+/* Currently only implemented for JSVALUE64, only tested on PLATFORM(MAC). */
+#if !defined(ENABLE_VALUE_PROFILER) && ENABLE(DFG_JIT)
#define ENABLE_VALUE_PROFILER 1
#endif
-#if !defined(ENABLE_DYNAMIC_OPTIMIZATION) && ENABLE(TIERED_COMPILATION)
-#define ENABLE_DYNAMIC_OPTIMIZATION 1
-#endif
-
-#if !defined(ENABLE_DYNAMIC_TERMINATE_SPECULATION) && ENABLE(DYNAMIC_OPTIMIZATION)
-#define ENABLE_DYNAMIC_TERMINATE_SPECULATION 1
-#endif
-
#if !defined(ENABLE_VERBOSE_VALUE_PROFILE) && ENABLE(VALUE_PROFILER)
#define ENABLE_VERBOSE_VALUE_PROFILE 0
#endif