LIST(APPEND JavaScriptCore_LINK_FLAGS
${ECORE_LDFLAGS}
)
+
+IF (ENABLE_DFG_JIT)
+ LIST(APPEND JavaScriptCore_INCLUDE_DIRECTORIES
+ ${JAVASCRIPTCORE_DIR}/dfg
+ )
+ LIST(APPEND JavaScriptCore_SOURCES
+ dfg/DFGByteCodeParser.cpp
+ dfg/DFGCapabilities.cpp
+ dfg/DFGDriver.cpp
+ dfg/DFGGraph.cpp
+ dfg/DFGJITCodeGenerator.cpp
+ dfg/DFGJITCodeGenerator32_64.cpp
+ dfg/DFGJITCompiler.cpp
+ dfg/DFGJITCompiler32_64.cpp
+ dfg/DFGOperations.cpp
+ dfg/DFGOSREntry.cpp
+ dfg/DFGPropagator.cpp
+ dfg/DFGRepatch.cpp
+ dfg/DFGSpeculativeJIT.cpp
+ dfg/DFGSpeculativeJIT32_64.cpp
+ )
+ENDIF ()
+2011-09-23 Yuqiang Xian <yuqiang.xian@intel.com>
+
+ Add JSVALUE32_64 support to DFG JIT
+ https://bugs.webkit.org/show_bug.cgi?id=67460
+
+ Reviewed by Gavin Barraclough.
+
+ This is the initial attempt to add JSVALUE32_64 support to DFG JIT.
+ It's tested on IA32 Linux EFL port currently. It still cannot run
+ all the test cases and benchmarks so should be turned off now.
+
+ The major work includes:
+ 1) dealing with JSVALUE32_64 data format in DFG JIT;
+ 2) bindings between 64-bit JS Value and 32-bit registers;
+ 3) handling of function calls. Currently for DFG operation function
+ calls we follow the X86 cdecl calling convention on Linux, and the
+ implementation is in a naive way by pushing the arguments into stack
+ one by one.
+
+ The known issues include:
+ 1) some code duplicates unnecessarily, especially in Speculative JIT
+ code generation, where most of the operations on SpeculataInteger /
+ SpeculateDouble should be identical to the JSVALUE64 code. Refactoring
+ is needed in the future;
+ 2) lack of op_call and op_construct support, comparing to current
+ JSVALUE64 DFG;
+ 3) currently integer speculations assume to be StrictInt32;
+ 4) lack of JSBoolean speculations;
+ 5) boxing and unboxing doubles could be improved;
+ 6) DFG X86 register description is different with the baseline JIT,
+ the timeoutCheckRegister is used for general purpose usage;
+ 7) calls to runtime functions with primitive double parameters (e.g.
+ fmod) don't work. Support needs to be added to the assembler to
+ implement the mechanism of passing double parameters for X86 cdecl
+ convention.
+
+ And there should be many other hidden bugs which should be exposed and
+ resolved in later debugging process.
+
+ * CMakeListsEfl.txt:
+ * assembler/MacroAssemblerX86.h:
+ (JSC::MacroAssemblerX86::loadDouble):
+ (JSC::MacroAssemblerX86::storeDouble):
+ * assembler/X86Assembler.h:
+ (JSC::X86Assembler::movsd_rm):
+ * bytecode/StructureStubInfo.h:
+ * dfg/DFGByteCodeParser.cpp:
+ (JSC::DFG::ByteCodeParser::parseBlock):
+ * dfg/DFGCapabilities.h:
+ (JSC::DFG::canCompileOpcode):
+ * dfg/DFGFPRInfo.h:
+ (JSC::DFG::FPRInfo::debugName):
+ * dfg/DFGGPRInfo.h:
+ (JSC::DFG::GPRInfo::toRegister):
+ (JSC::DFG::GPRInfo::toIndex):
+ (JSC::DFG::GPRInfo::debugName):
+ * dfg/DFGGenerationInfo.h:
+ (JSC::DFG::needDataFormatConversion):
+ (JSC::DFG::GenerationInfo::initJSValue):
+ (JSC::DFG::GenerationInfo::initDouble):
+ (JSC::DFG::GenerationInfo::gpr):
+ (JSC::DFG::GenerationInfo::tagGPR):
+ (JSC::DFG::GenerationInfo::payloadGPR):
+ (JSC::DFG::GenerationInfo::fpr):
+ (JSC::DFG::GenerationInfo::fillJSValue):
+ (JSC::DFG::GenerationInfo::fillCell):
+ (JSC::DFG::GenerationInfo::fillDouble):
+ * dfg/DFGJITCodeGenerator.cpp:
+ * dfg/DFGJITCodeGenerator.h:
+ (JSC::DFG::JITCodeGenerator::allocate):
+ (JSC::DFG::JITCodeGenerator::use):
+ (JSC::DFG::JITCodeGenerator::registersMatched):
+ (JSC::DFG::JITCodeGenerator::silentSpillGPR):
+ (JSC::DFG::JITCodeGenerator::silentFillGPR):
+ (JSC::DFG::JITCodeGenerator::silentFillFPR):
+ (JSC::DFG::JITCodeGenerator::silentSpillAllRegisters):
+ (JSC::DFG::JITCodeGenerator::silentFillAllRegisters):
+ (JSC::DFG::JITCodeGenerator::boxDouble):
+ (JSC::DFG::JITCodeGenerator::unboxDouble):
+ (JSC::DFG::JITCodeGenerator::spill):
+ (JSC::DFG::addressOfDoubleConstant):
+ (JSC::DFG::integerResult):
+ (JSC::DFG::jsValueResult):
+ (JSC::DFG::setupResults):
+ (JSC::DFG::callOperation):
+ (JSC::JSValueOperand::JSValueOperand):
+ (JSC::JSValueOperand::~JSValueOperand):
+ (JSC::JSValueOperand::isDouble):
+ (JSC::JSValueOperand::fill):
+ (JSC::JSValueOperand::tagGPR):
+ (JSC::JSValueOperand::payloadGPR):
+ (JSC::JSValueOperand::fpr):
+ (JSC::GPRTemporary::~GPRTemporary):
+ (JSC::GPRTemporary::gpr):
+ (JSC::GPRResult2::GPRResult2):
+ * dfg/DFGJITCodeGenerator32_64.cpp: Added.
+ (JSC::DFG::JITCodeGenerator::clearGenerationInfo):
+ (JSC::DFG::JITCodeGenerator::fillInteger):
+ (JSC::DFG::JITCodeGenerator::fillDouble):
+ (JSC::DFG::JITCodeGenerator::fillJSValue):
+ (JSC::DFG::JITCodeGenerator::fillStorage):
+ (JSC::DFG::JITCodeGenerator::useChildren):
+ (JSC::DFG::JITCodeGenerator::isStrictInt32):
+ (JSC::DFG::JITCodeGenerator::isKnownInteger):
+ (JSC::DFG::JITCodeGenerator::isKnownNumeric):
+ (JSC::DFG::JITCodeGenerator::isKnownCell):
+ (JSC::DFG::JITCodeGenerator::isKnownNotInteger):
+ (JSC::DFG::JITCodeGenerator::isKnownNotNumber):
+ (JSC::DFG::JITCodeGenerator::isKnownBoolean):
+ (JSC::DFG::JITCodeGenerator::nonSpeculativeValueToNumber):
+ (JSC::DFG::JITCodeGenerator::nonSpeculativeValueToInt32):
+ (JSC::DFG::JITCodeGenerator::nonSpeculativeUInt32ToNumber):
+ (JSC::DFG::JITCodeGenerator::nonSpeculativeKnownConstantArithOp):
+ (JSC::DFG::JITCodeGenerator::nonSpeculativeBasicArithOp):
+ (JSC::DFG::JITCodeGenerator::nonSpeculativeArithMod):
+ (JSC::DFG::JITCodeGenerator::nonSpeculativeCheckHasInstance):
+ (JSC::DFG::JITCodeGenerator::nonSpeculativeInstanceOf):
+ (JSC::DFG::JITCodeGenerator::cachedGetById):
+ (JSC::DFG::JITCodeGenerator::writeBarrier):
+ (JSC::DFG::JITCodeGenerator::cachedPutById):
+ (JSC::DFG::JITCodeGenerator::cachedGetMethod):
+ (JSC::DFG::JITCodeGenerator::nonSpeculativeNonPeepholeCompareNull):
+ (JSC::DFG::JITCodeGenerator::nonSpeculativePeepholeBranchNull):
+ (JSC::DFG::JITCodeGenerator::nonSpeculativeCompareNull):
+ (JSC::DFG::JITCodeGenerator::nonSpeculativePeepholeBranch):
+ (JSC::DFG::JITCodeGenerator::nonSpeculativeNonPeepholeCompare):
+ (JSC::DFG::JITCodeGenerator::nonSpeculativeCompare):
+ (JSC::DFG::JITCodeGenerator::nonSpeculativePeepholeStrictEq):
+ (JSC::DFG::JITCodeGenerator::nonSpeculativeNonPeepholeStrictEq):
+ (JSC::DFG::JITCodeGenerator::nonSpeculativeStrictEq):
+ (JSC::DFG::JITCodeGenerator::emitBranch):
+ (JSC::DFG::JITCodeGenerator::nonSpeculativeLogicalNot):
+ (JSC::DFG::JITCodeGenerator::emitCall):
+ (JSC::DFG::JITCodeGenerator::speculationCheck):
+ (JSC::DFG::dataFormatString):
+ (JSC::DFG::JITCodeGenerator::dump):
+ (JSC::DFG::JITCodeGenerator::checkConsistency):
+ (JSC::DFG::GPRTemporary::GPRTemporary):
+ (JSC::DFG::FPRTemporary::FPRTemporary):
+ * dfg/DFGJITCompiler.cpp:
+ * dfg/DFGJITCompiler.h:
+ (JSC::DFG::JITCompiler::tagForGlobalVar):
+ (JSC::DFG::JITCompiler::payloadForGlobalVar):
+ (JSC::DFG::JITCompiler::appendCallWithExceptionCheck):
+ (JSC::DFG::JITCompiler::addressOfDoubleConstant):
+ (JSC::DFG::JITCompiler::boxDouble):
+ (JSC::DFG::JITCompiler::unboxDouble):
+ (JSC::DFG::JITCompiler::addPropertyAccess):
+ (JSC::DFG::JITCompiler::PropertyAccessRecord::PropertyAccessRecord):
+ * dfg/DFGJITCompiler32_64.cpp: Added.
+ (JSC::DFG::JITCompiler::fillNumericToDouble):
+ (JSC::DFG::JITCompiler::fillInt32ToInteger):
+ (JSC::DFG::JITCompiler::fillToJS):
+ (JSC::DFG::JITCompiler::exitSpeculativeWithOSR):
+ (JSC::DFG::JITCompiler::linkOSRExits):
+ (JSC::DFG::JITCompiler::compileEntry):
+ (JSC::DFG::JITCompiler::compileBody):
+ (JSC::DFG::JITCompiler::link):
+ (JSC::DFG::JITCompiler::compile):
+ (JSC::DFG::JITCompiler::compileFunction):
+ (JSC::DFG::JITCompiler::jitAssertIsInt32):
+ (JSC::DFG::JITCompiler::jitAssertIsJSInt32):
+ (JSC::DFG::JITCompiler::jitAssertIsJSNumber):
+ (JSC::DFG::JITCompiler::jitAssertIsJSDouble):
+ (JSC::DFG::JITCompiler::jitAssertIsCell):
+ (JSC::DFG::JITCompiler::emitCount):
+ (JSC::DFG::JITCompiler::setSamplingFlag):
+ (JSC::DFG::JITCompiler::clearSamplingFlag):
+ * dfg/DFGJITCompilerInlineMethods.h: Added.
+ (JSC::DFG::JITCompiler::emitLoadTag):
+ (JSC::DFG::JITCompiler::emitLoadPayload):
+ (JSC::DFG::JITCompiler::emitLoad):
+ (JSC::DFG::JITCompiler::emitLoad2):
+ (JSC::DFG::JITCompiler::emitLoadDouble):
+ (JSC::DFG::JITCompiler::emitLoadInt32ToDouble):
+ (JSC::DFG::JITCompiler::emitStore):
+ (JSC::DFG::JITCompiler::emitStoreInt32):
+ (JSC::DFG::JITCompiler::emitStoreCell):
+ (JSC::DFG::JITCompiler::emitStoreBool):
+ (JSC::DFG::JITCompiler::emitStoreDouble):
+ * dfg/DFGNode.h:
+ * dfg/DFGOperations.cpp:
+ * dfg/DFGRepatch.cpp:
+ (JSC::DFG::generateProtoChainAccessStub):
+ (JSC::DFG::tryCacheGetByID):
+ (JSC::DFG::tryBuildGetByIDList):
+ (JSC::DFG::tryCachePutByID):
+ * dfg/DFGSpeculativeJIT.cpp:
+ * dfg/DFGSpeculativeJIT.h:
+ (JSC::DFG::ValueRecovery::inGPR):
+ (JSC::DFG::ValueRecovery::inPair):
+ (JSC::DFG::ValueRecovery::tagGPR):
+ (JSC::DFG::ValueRecovery::payloadGPR):
+ * dfg/DFGSpeculativeJIT32_64.cpp: Added.
+ (JSC::DFG::SpeculativeJIT::fillSpeculateIntInternal):
+ (JSC::DFG::ValueSource::dump):
+ (JSC::DFG::ValueRecovery::dump):
+ (JSC::DFG::OSRExit::OSRExit):
+ (JSC::DFG::OSRExit::dump):
+ (JSC::DFG::SpeculativeJIT::fillSpeculateInt):
+ (JSC::DFG::SpeculativeJIT::fillSpeculateIntStrict):
+ (JSC::DFG::SpeculativeJIT::fillSpeculateDouble):
+ (JSC::DFG::SpeculativeJIT::fillSpeculateCell):
+ (JSC::DFG::SpeculativeJIT::fillSpeculateBoolean):
+ (JSC::DFG::SpeculativeJIT::compilePeepHoleIntegerBranch):
+ (JSC::DFG::SpeculativeJIT::convertToDouble):
+ (JSC::DFG::SpeculativeJIT::compilePeepHoleDoubleBranch):
+ (JSC::DFG::SpeculativeJIT::compilePeepHoleObjectEquality):
+ (JSC::DFG::SpeculativeJIT::compileObjectEquality):
+ (JSC::DFG::SpeculativeJIT::compare):
+ (JSC::DFG::SpeculativeJIT::compile):
+ (JSC::DFG::SpeculativeJIT::compileMovHint):
+ (JSC::DFG::SpeculativeJIT::checkArgumentTypes):
+ (JSC::DFG::SpeculativeJIT::initializeVariableTypes):
+ (JSC::DFG::SpeculativeJIT::computeValueRecoveryFor):
+ * runtime/JSValue.h:
+
2011-09-23 Filip Pizlo <fpizlo@apple.com>
wtf/BitVector.h has a variety of bugs which manifest when the
using MacroAssemblerX86Common::call;
using MacroAssemblerX86Common::addDouble;
using MacroAssemblerX86Common::loadDouble;
+ using MacroAssemblerX86Common::storeDouble;
using MacroAssemblerX86Common::convertInt32ToDouble;
void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
m_assembler.movsd_mr(address, dest);
}
+ void storeDouble(FPRegisterID src, const void* address)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.movsd_rm(src, address);
+ }
+
void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest)
{
m_assembler.cvtsi2sd_mr(src.m_ptr, dest);
m_formatter.prefix(PRE_SSE_F2);
m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, address);
}
+ void movsd_rm(XMMRegisterID src, const void* address)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, address);
+ }
#endif
void mulsd_rr(XMMRegisterID src, XMMRegisterID dst)
#if ENABLE(DFG_JIT)
int8_t baseGPR;
+#if USE(JSVALUE32_64)
+ int8_t valueTagGPR;
+#endif
int8_t valueGPR;
int8_t scratchGPR;
int16_t deltaCallToDone;
case op_end:
addToGraph(Return, get(currentInstruction[1].u.operand));
LAST_OPCODE(op_end);
-
+
case op_throw:
addToGraph(Throw, get(currentInstruction[1].u.operand));
LAST_OPCODE(op_throw);
addToGraph(ThrowReferenceError);
LAST_OPCODE(op_throw_reference_error);
+#if USE(JSVALUE64)
case op_call: {
NodeIndex callTarget = get(currentInstruction[1].u.operand);
if (m_graph.isFunctionConstant(m_codeBlock, callTarget)) {
addCall(interpreter, currentInstruction, Construct);
NEXT_OPCODE(op_construct);
}
+#endif
case op_call_put_result:
NEXT_OPCODE(op_call_put_result);
case op_loop_if_greatereq:
case op_ret:
case op_end:
+#if USE(JSVALUE64)
case op_call:
case op_construct:
+#endif
case op_call_put_result:
case op_resolve:
case op_resolve_base:
static const char* debugName(FPRReg reg)
{
ASSERT(reg != InvalidFPRReg);
+#if CPU(X86_64)
ASSERT(reg < 16);
static const char* nameForRegister[16] = {
"xmm0", "xmm1", "xmm2", "xmm3",
"xmm8", "xmm9", "xmm10", "xmm11",
"xmm12", "xmm13", "xmm14", "xmm15"
};
+#elif CPU(X86)
+ ASSERT(reg < 8);
+ static const char* nameForRegister[8] = {
+ "xmm0", "xmm1", "xmm2", "xmm3",
+ "xmm4", "xmm5", "xmm6", "xmm7"
+ };
+#endif
return nameForRegister[reg];
}
#endif
class GPRInfo {
public:
typedef GPRReg RegisterType;
- static const unsigned numberOfRegisters = 4;
+ static const unsigned numberOfRegisters = 5;
// These registers match the baseline JIT.
static const GPRReg cachedResultRegister = X86Registers::eax;
- static const GPRReg timeoutCheckRegister = X86Registers::esi;
+ static const GPRReg cachedResultRegister2 = X86Registers::edx;
static const GPRReg callFrameRegister = X86Registers::edi;
// Temporary registers.
static const GPRReg regT0 = X86Registers::eax;
static const GPRReg regT1 = X86Registers::edx;
static const GPRReg regT2 = X86Registers::ecx;
static const GPRReg regT3 = X86Registers::ebx;
+ static const GPRReg regT4 = X86Registers::esi;
// These constants provide the names for the general purpose argument & return value registers.
static const GPRReg argumentGPR0 = X86Registers::ecx; // regT2
static const GPRReg argumentGPR1 = X86Registers::edx; // regT1
static GPRReg toRegister(unsigned index)
{
ASSERT(index < numberOfRegisters);
- static const GPRReg registerForIndex[numberOfRegisters] = { regT0, regT1, regT2, regT3, };
+ static const GPRReg registerForIndex[numberOfRegisters] = { regT0, regT1, regT2, regT3, regT4 };
return registerForIndex[index];
}
{
ASSERT(reg != InvalidGPRReg);
ASSERT(reg < 8);
- static const unsigned indexForRegister[8] = { 0, 2, 1, 3, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex };
+ static const unsigned indexForRegister[8] = { 0, 2, 1, 3, InvalidIndex, InvalidIndex, 4, InvalidIndex };
unsigned result = indexForRegister[reg];
ASSERT(result != InvalidIndex);
return result;
ASSERT(reg != InvalidGPRReg);
ASSERT(reg < 8);
static const char* nameForRegister[8] = {
- "rax", "rcx", "rdx", "rbx",
- "rsp", "rbp", "rsi", "rdi",
+ "eax", "ecx", "edx", "ebx",
+ "esp", "ebp", "esi", "edi",
};
return nameForRegister[reg];
}
}
#endif
+#if USE(JSVALUE64)
inline bool needDataFormatConversion(DataFormat from, DataFormat to)
{
ASSERT(from != DataFormatNone);
return true;
}
+#elif USE(JSVALUE32_64)
+inline bool needDataFormatConversion(DataFormat from, DataFormat to)
+{
+ ASSERT(from != DataFormatNone);
+ ASSERT(to != DataFormatNone);
+ switch (from) {
+ case DataFormatInteger:
+ case DataFormatCell:
+ return to != DataFormatInteger && to != DataFormatCell;
+ case DataFormatDouble:
+ case DataFormatJSDouble:
+ switch (to) {
+ case DataFormatDouble:
+ case DataFormatJS:
+ case DataFormatJSDouble:
+ return false;
+ default:
+ return true;
+ }
+ case DataFormatJS:
+ return !(to & DataFormatJS);
+ case DataFormatJSInteger:
+ case DataFormatJSCell:
+ case DataFormatJSBoolean:
+ return to != DataFormatJS && to != from;
+ case DataFormatStorage:
+ ASSERT(to == DataFormatStorage);
+ return false;
+ default:
+ ASSERT_NOT_REACHED();
+ }
+ return true;
+}
+#endif
+
inline bool isJSFormat(DataFormat format, DataFormat expectedFormat)
{
ASSERT(expectedFormat & DataFormatJS);
m_canFill = false;
u.gpr = gpr;
}
+#if USE(JSVALUE64)
void initJSValue(NodeIndex nodeIndex, uint32_t useCount, GPRReg gpr, DataFormat format = DataFormatJS)
{
ASSERT(format & DataFormatJS);
m_canFill = false;
u.gpr = gpr;
}
+#elif USE(JSVALUE32_64)
+ void initJSValue(NodeIndex nodeIndex, uint32_t useCount, GPRReg tagGPR, GPRReg payloadGPR, DataFormat format = DataFormatJS)
+ {
+ ASSERT(format & DataFormatJS);
+
+ m_nodeIndex = nodeIndex;
+ m_useCount = useCount;
+ m_registerFormat = format;
+ m_spillFormat = DataFormatNone;
+ m_canFill = false;
+ u.v.tagGPR = tagGPR;
+ u.v.payloadGPR = payloadGPR;
+ }
+#endif
void initCell(NodeIndex nodeIndex, uint32_t useCount, GPRReg gpr)
{
m_nodeIndex = nodeIndex;
}
void initDouble(NodeIndex nodeIndex, uint32_t useCount, FPRReg fpr)
{
+ ASSERT(fpr != InvalidFPRReg);
m_nodeIndex = nodeIndex;
m_useCount = useCount;
m_registerFormat = DataFormatDouble;
}
// Get the machine resister currently holding the value.
+#if USE(JSVALUE64)
GPRReg gpr() { ASSERT(m_registerFormat && m_registerFormat != DataFormatDouble); return u.gpr; }
FPRReg fpr() { ASSERT(m_registerFormat == DataFormatDouble); return u.fpr; }
+#elif USE(JSVALUE32_64)
+ GPRReg gpr() { ASSERT(m_registerFormat == DataFormatInteger || m_registerFormat == DataFormatCell || m_registerFormat == DataFormatStorage); return u.gpr; }
+ GPRReg tagGPR() { ASSERT(m_registerFormat & DataFormatJS); return u.v.tagGPR; }
+ GPRReg payloadGPR() { ASSERT(m_registerFormat & DataFormatJS); return u.v.payloadGPR; }
+ FPRReg fpr() { ASSERT(m_registerFormat == DataFormatDouble || m_registerFormat == DataFormatJSDouble); return u.fpr; }
+#endif
// Check whether a value needs spilling in order to free up any associated machine registers.
bool needsSpill()
// Record that this value is filled into machine registers,
// tracking which registers, and what format the value has.
+#if USE(JSVALUE64)
void fillJSValue(GPRReg gpr, DataFormat format = DataFormatJS)
{
ASSERT(format & DataFormatJS);
m_registerFormat = format;
u.gpr = gpr;
}
+#elif USE(JSVALUE32_64)
+ void fillJSValue(GPRReg tagGPR, GPRReg payloadGPR, DataFormat format = DataFormatJS)
+ {
+ ASSERT(format & DataFormatJS);
+ m_registerFormat = format;
+ u.v.tagGPR = tagGPR; // FIXME: for JSValues with known type (boolean, integer, cell etc.) no tagGPR is needed?
+ u.v.payloadGPR = payloadGPR;
+ }
+ void fillCell(GPRReg gpr)
+ {
+ m_registerFormat = DataFormatCell;
+ u.gpr = gpr;
+ }
+#endif
void fillInteger(GPRReg gpr)
{
m_registerFormat = DataFormatInteger;
}
void fillDouble(FPRReg fpr)
{
+ ASSERT(fpr != InvalidFPRReg);
m_registerFormat = DataFormatDouble;
u.fpr = fpr;
}
union {
GPRReg gpr;
FPRReg fpr;
+#if USE(JSVALUE32_64)
+ struct {
+ GPRReg tagGPR;
+ GPRReg payloadGPR;
+ } v;
+#endif
} u;
};
#include "DFGJITCodeGenerator.h"
#if ENABLE(DFG_JIT)
+#if USE(JSVALUE64)
#include "DFGSpeculativeJIT.h"
#include "LinkBuffer.h"
} } // namespace JSC::DFG
#endif
+#endif
public:
GPRReg fillInteger(NodeIndex, DataFormat& returnFormat);
FPRReg fillDouble(NodeIndex);
+#if USE(JSVALUE64)
GPRReg fillJSValue(NodeIndex);
+#elif USE(JSVALUE32_64)
+ bool fillJSValue(NodeIndex, GPRReg&, GPRReg&, FPRReg&);
+#endif
GPRReg fillStorage(NodeIndex);
// lock and unlock GPR & FPR registers.
{
VirtualRegister spillMe;
GPRReg gpr = m_gprs.allocate(spillMe);
- if (spillMe != InvalidVirtualRegister)
+ if (spillMe != InvalidVirtualRegister) {
+#if USE(JSVALUE32_64)
+ GenerationInfo& info = m_generationInfo[spillMe];
+ ASSERT(info.registerFormat() != DataFormatJSDouble);
+ if ((info.registerFormat() & DataFormatJS))
+ m_gprs.release(info.tagGPR() == gpr ? info.payloadGPR() : info.tagGPR());
+#endif
spill(spillMe);
+ }
return gpr;
}
GPRReg allocate(GPRReg specific)
{
VirtualRegister spillMe = m_gprs.allocateSpecific(specific);
- if (spillMe != InvalidVirtualRegister)
+ if (spillMe != InvalidVirtualRegister) {
+#if USE(JSVALUE32_64)
+ GenerationInfo& info = m_generationInfo[spillMe];
+ ASSERT(info.registerFormat() != DataFormatJSDouble);
+ if ((info.registerFormat() & DataFormatJS))
+ m_gprs.release(info.tagGPR() == specific ? info.payloadGPR() : info.tagGPR());
+#endif
spill(spillMe);
+ }
return specific;
}
GPRReg tryAllocate()
// Release the associated machine registers.
DataFormat registerFormat = info.registerFormat();
+#if USE(JSVALUE64)
if (registerFormat == DataFormatDouble)
m_fprs.release(info.fpr());
else if (registerFormat != DataFormatNone)
m_gprs.release(info.gpr());
+#elif USE(JSVALUE32_64)
+ if (registerFormat == DataFormatDouble || registerFormat == DataFormatJSDouble)
+ m_fprs.release(info.fpr());
+ else if (registerFormat == DataFormatInteger || registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
+ m_gprs.release(info.gpr());
+ else if (registerFormat & DataFormatJS) {
+ m_gprs.release(info.tagGPR());
+ m_gprs.release(info.payloadGPR());
+ }
+#endif
}
static void markCellCard(MacroAssembler&, GPRReg ownerGPR, GPRReg scratchGPR1, GPRReg scratchGPR2);
void clearGenerationInfo();
+#if USE(JSVALUE32_64)
+ bool registersMatched(GenerationInfo& info, GPRReg exclude, GPRReg exclude2)
+ {
+ ASSERT(info.registerFormat() != DataFormatNone);
+ ASSERT(info.registerFormat() != DataFormatDouble);
+ ASSERT(info.registerFormat() != DataFormatJSDouble);
+ return !(info.registerFormat() & DataFormatJS) ? (info.gpr() == exclude || info.gpr() == exclude2) : (info.tagGPR() == exclude || info.tagGPR() == exclude2 || info.payloadGPR() == exclude || info.payloadGPR() == exclude2);
+ }
+#endif
+
// These methods are used when generating 'unexpected'
// calls out from JIT code to C++ helper routines -
// they spill all live values to the appropriate
// slots in the RegisterFile without changing any state
// in the GenerationInfo.
- void silentSpillGPR(VirtualRegister spillMe, GPRReg exclude = InvalidGPRReg)
+ void silentSpillGPR(VirtualRegister spillMe, GPRReg exclude = InvalidGPRReg, GPRReg exclude2 = InvalidGPRReg)
{
GenerationInfo& info = m_generationInfo[spillMe];
ASSERT(info.registerFormat() != DataFormatNone);
ASSERT(info.registerFormat() != DataFormatDouble);
+#if USE(JSVALUE64)
+ UNUSED_PARAM(exclude2);
if (!info.needsSpill() || (info.gpr() == exclude))
+#elif USE(JSVALUE32_64)
+ if (!info.needsSpill() || registersMatched(info, exclude, exclude2))
+#endif
return;
DataFormat registerFormat = info.registerFormat();
+#if USE(JSVALUE64)
if (registerFormat == DataFormatInteger)
m_jit.store32(info.gpr(), JITCompiler::addressFor(spillMe));
else {
ASSERT(registerFormat & DataFormatJS || registerFormat == DataFormatCell || registerFormat == DataFormatStorage);
m_jit.storePtr(info.gpr(), JITCompiler::addressFor(spillMe));
}
+#elif USE(JSVALUE32_64)
+ if (registerFormat == DataFormatInteger)
+ m_jit.store32(info.gpr(), JITCompiler::payloadFor(spillMe));
+ else if (registerFormat == DataFormatCell)
+ m_jit.storePtr(info.gpr(), JITCompiler::payloadFor(spillMe));
+ else if (registerFormat == DataFormatStorage)
+ m_jit.storePtr(info.gpr(), JITCompiler::addressFor(spillMe));
+ else {
+ ASSERT(registerFormat & DataFormatJS);
+ m_jit.store32(info.tagGPR(), JITCompiler::tagFor(spillMe));
+ m_jit.store32(info.payloadGPR(), JITCompiler::payloadFor(spillMe));
+ }
+#endif
}
void silentSpillFPR(VirtualRegister spillMe, FPRReg exclude = InvalidFPRReg)
{
m_jit.storeDouble(info.fpr(), JITCompiler::addressFor(spillMe));
}
- void silentFillGPR(VirtualRegister spillMe, GPRReg exclude = InvalidGPRReg)
+ void silentFillGPR(VirtualRegister spillMe, GPRReg exclude = InvalidGPRReg, GPRReg exclude2 = InvalidGPRReg)
{
GenerationInfo& info = m_generationInfo[spillMe];
+#if USE(JSVALUE64)
+ UNUSED_PARAM(exclude2);
if (info.gpr() == exclude)
+#elif USE(JSVALUE32_64)
+ if (registersMatched(info, exclude, exclude2))
+#endif
return;
NodeIndex nodeIndex = info.nodeIndex();
ASSERT(info.registerFormat() != DataFormatDouble);
DataFormat registerFormat = info.registerFormat();
+#if USE(JSVALUE64)
if (registerFormat == DataFormatInteger) {
if (node.hasConstant()) {
ASSERT(isInt32Constant(nodeIndex));
ASSERT(registerFormat & DataFormatJS || registerFormat == DataFormatCell || registerFormat == DataFormatStorage);
m_jit.loadPtr(JITCompiler::addressFor(spillMe), info.gpr());
}
+#elif USE(JSVALUE32_64)
+ if (registerFormat == DataFormatInteger || registerFormat == DataFormatCell) {
+ if (node.isConstant())
+ m_jit.move(Imm32(valueOfInt32Constant(nodeIndex)), info.gpr());
+ else
+ m_jit.load32(JITCompiler::payloadFor(spillMe), info.gpr());
+ } else if (registerFormat == DataFormatStorage)
+ m_jit.load32(JITCompiler::addressFor(spillMe), info.gpr());
+ else
+ m_jit.emitLoad(nodeIndex, info.tagGPR(), info.payloadGPR());
+#endif
}
+
void silentFillFPR(VirtualRegister spillMe, GPRReg canTrample, FPRReg exclude = InvalidFPRReg)
{
GenerationInfo& info = m_generationInfo[spillMe];
return;
NodeIndex nodeIndex = info.nodeIndex();
+#if USE(JSVALUE64)
Node& node = m_jit.graph()[nodeIndex];
ASSERT(info.registerFormat() == DataFormatDouble);
}
m_jit.loadDouble(JITCompiler::addressFor(spillMe), info.fpr());
+#elif USE(JSVALUE32_64)
+ ASSERT(info.registerFormat() == DataFormatDouble || info.registerFormat() == DataFormatJSDouble);
+ m_jit.emitLoadDouble(nodeIndex, info.fpr());
+#endif
}
-
- void silentSpillAllRegisters(GPRReg exclude)
+
+ void silentSpillAllRegisters(GPRReg exclude, GPRReg exclude2 = InvalidGPRReg)
{
for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
if (iter.name() != InvalidVirtualRegister)
- silentSpillGPR(iter.name(), exclude);
+ silentSpillGPR(iter.name(), exclude, exclude2);
}
for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
if (iter.name() != InvalidVirtualRegister)
silentSpillFPR(iter.name(), exclude);
}
}
- void silentFillAllRegisters(GPRReg exclude)
+
+ void silentFillAllRegisters(GPRReg exclude, GPRReg exclude2 = InvalidGPRReg)
{
GPRReg canTrample = GPRInfo::regT0;
if (exclude == GPRInfo::regT0)
}
for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
if (iter.name() != InvalidVirtualRegister)
- silentFillGPR(iter.name(), exclude);
+ silentFillGPR(iter.name(), exclude, exclude2);
}
}
void silentFillAllRegisters(FPRReg exclude)
}
// These methods convert between doubles, and doubles boxed and JSValues.
+#if USE(JSVALUE64)
GPRReg boxDouble(FPRReg fpr, GPRReg gpr)
{
return m_jit.boxDouble(fpr, gpr);
{
return boxDouble(fpr, allocate());
}
+#elif USE(JSVALUE32_64)
+ void boxDouble(FPRReg fpr, GPRReg tagGPR, GPRReg payloadGPR, VirtualRegister virtualRegister)
+ {
+ m_jit.boxDouble(fpr, tagGPR, payloadGPR, virtualRegister);
+ }
+ void unboxDouble(GPRReg tagGPR, GPRReg payloadGPR, FPRReg fpr, VirtualRegister virtualRegister)
+ {
+ m_jit.unboxDouble(tagGPR, payloadGPR, fpr, virtualRegister);
+ }
+#endif
// Spill a VirtualRegister to the RegisterFile.
void spill(VirtualRegister spillMe)
{
GenerationInfo& info = m_generationInfo[spillMe];
+#if USE(JSVALUE32_64)
+ if (info.registerFormat() == DataFormatNone) // it has been spilled. JS values which have two GPRs can reach here
+ return;
+#endif
// Check the GenerationInfo to see if this value need writing
// to the RegisterFile - if not, mark it as spilled & return.
if (!info.needsSpill()) {
DataFormat spillFormat = info.registerFormat();
switch (spillFormat) {
+ case DataFormatStorage: {
+ // This is special, since it's not a JS value - as in it's not visible to JS
+ // code.
+ m_jit.storePtr(info.gpr(), JITCompiler::addressFor(spillMe));
+ info.spill(DataFormatStorage);
+ return;
+ }
+
+#if USE(JSVALUE64)
case DataFormatDouble: {
// All values are spilled as JSValues, so box the double via a temporary gpr.
GPRReg gpr = boxDouble(info.fpr());
info.spill(DataFormatJSDouble);
return;
}
-
- case DataFormatStorage: {
- // This is special, since it's not a JS value - as in it's not visible to JS
- // code.
- m_jit.storePtr(info.gpr(), JITCompiler::addressFor(spillMe));
- info.spill(DataFormatStorage);
- return;
- }
default:
// The following code handles JSValues, int32s, and cells.
info.spill((DataFormat)(spillFormat | DataFormatJS));
return;
}
+#elif USE(JSVALUE32_64)
+ case DataFormatDouble:
+ case DataFormatJSDouble: {
+ // On JSVALUE32_64 boxing a double is a no-op.
+ m_jit.storeDouble(info.fpr(), JITCompiler::addressFor(spillMe));
+ info.spill(DataFormatJSDouble);
+ return;
+ }
+ default:
+ // The following code handles JSValues, int32s, and cells.
+ ASSERT(spillFormat == DataFormatInteger || spillFormat == DataFormatCell || spillFormat & DataFormatJS);
+
+ if (spillFormat == DataFormatInteger || spillFormat == DataFormatCell) {
+ GPRReg reg = info.gpr();
+ m_jit.store32(reg, JITCompiler::payloadFor(spillMe));
+ // We need to box int32 and cell values ...
+ if (spillFormat == DataFormatInteger)
+ m_jit.store32(TrustedImm32(JSValue::Int32Tag), JITCompiler::tagFor(spillMe));
+ else // cells
+ m_jit.store32(TrustedImm32(JSValue::CellTag), JITCompiler::tagFor(spillMe));
+ } else { // JSValue
+ m_jit.store32(info.tagGPR(), JITCompiler::tagFor(spillMe));
+ m_jit.store32(info.payloadGPR(), JITCompiler::payloadFor(spillMe));
+ }
+ info.spill((DataFormat)(spillFormat | DataFormatJS));
+ return;
+ }
+#endif
}
bool isStrictInt32(NodeIndex);
bool isFunctionConstant(NodeIndex nodeIndex) { return m_jit.isFunctionConstant(nodeIndex); }
int32_t valueOfInt32Constant(NodeIndex nodeIndex) { return m_jit.valueOfInt32Constant(nodeIndex); }
double valueOfNumberConstant(NodeIndex nodeIndex) { return m_jit.valueOfNumberConstant(nodeIndex); }
+#if USE(JSVALUE32_64)
+ void* addressOfDoubleConstant(NodeIndex nodeIndex) { return m_jit.addressOfDoubleConstant(nodeIndex); }
+#endif
JSValue valueOfJSConstant(NodeIndex nodeIndex) { return m_jit.valueOfJSConstant(nodeIndex); }
bool valueOfBooleanConstant(NodeIndex nodeIndex) { return m_jit.valueOfBooleanConstant(nodeIndex); }
JSFunction* valueOfFunctionConstant(NodeIndex nodeIndex) { return m_jit.valueOfFunctionConstant(nodeIndex); }
}
#endif
+#if USE(JSVALUE64)
MacroAssembler::ImmPtr valueOfJSConstantAsImmPtr(NodeIndex nodeIndex)
{
return MacroAssembler::ImmPtr(JSValue::encode(valueOfJSConstant(nodeIndex)));
}
+#endif
// Helper functions to enable code sharing in implementations of bit/shift ops.
void bitOp(NodeType op, int32_t imm, GPRReg op1, GPRReg result)
void nonSpeculativeCheckHasInstance(Node&);
void nonSpeculativeInstanceOf(Node&);
+#if USE(JSVALUE64)
JITCompiler::Call cachedGetById(GPRReg baseGPR, GPRReg resultGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), NodeType = GetById);
void cachedPutById(GPRReg base, GPRReg value, NodeIndex valueIndex, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump());
void cachedGetMethod(GPRReg baseGPR, GPRReg resultGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump());
-
+#elif USE(JSVALUE32_64)
+ JITCompiler::Call cachedGetById(GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), NodeType = GetById);
+ void cachedPutById(GPRReg basePayloadGPR, GPRReg valueTagGPR, GPRReg valuePayloadGPR, NodeIndex valueIndex, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump());
+ void cachedGetMethod(GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump());
+#endif
+
void nonSpeculativeNonPeepholeCompareNull(NodeIndex operand, bool invert = false);
void nonSpeculativePeepholeBranchNull(NodeIndex operand, NodeIndex branchNodeIndex, bool invert = false);
bool nonSpeculativeCompareNull(Node&, NodeIndex operand, bool invert = false);
m_gprs.retain(reg, virtualRegister, SpillOrderInteger);
info.initInteger(nodeIndex, node.refCount(), reg);
} else {
+#if USE(JSVALUE64)
ASSERT(format == DataFormatJSInteger);
m_jit.jitAssertIsJSInt32(reg);
m_gprs.retain(reg, virtualRegister, SpillOrderJS);
info.initJSValue(nodeIndex, node.refCount(), reg, format);
+#elif USE(JSVALUE32_64)
+ ASSERT_NOT_REACHED();
+#endif
}
}
void integerResult(GPRReg reg, NodeIndex nodeIndex, UseChildrenMode mode)
GenerationInfo& info = m_generationInfo[virtualRegister];
info.initCell(nodeIndex, node.refCount(), reg);
}
+#if USE(JSVALUE64)
void jsValueResult(GPRReg reg, NodeIndex nodeIndex, DataFormat format = DataFormatJS, UseChildrenMode mode = CallUseChildren)
{
if (format == DataFormatJSInteger)
{
jsValueResult(reg, nodeIndex, DataFormatJS, mode);
}
+#elif USE(JSVALUE32_64)
+ void jsValueResult(GPRReg tag, GPRReg payload, NodeIndex nodeIndex, DataFormat format = DataFormatJS, UseChildrenMode mode = CallUseChildren)
+ {
+ Node& node = m_jit.graph()[nodeIndex];
+ if (mode == CallUseChildren)
+ useChildren(node);
+
+ VirtualRegister virtualRegister = node.virtualRegister();
+ m_gprs.retain(tag, virtualRegister, SpillOrderJS);
+ m_gprs.retain(payload, virtualRegister, SpillOrderJS);
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+ info.initJSValue(nodeIndex, node.refCount(), tag, payload, format);
+ }
+ void jsValueResult(GPRReg tag, GPRReg payload, NodeIndex nodeIndex, UseChildrenMode mode)
+ {
+ jsValueResult(tag, payload, nodeIndex, DataFormatJS, mode);
+ }
+#endif
void storageResult(GPRReg reg, NodeIndex nodeIndex, UseChildrenMode mode = CallUseChildren)
{
Node& node = m_jit.graph()[nodeIndex];
} else
m_jit.swap(destA, destB);
}
+#if CPU(X86_64)
template<FPRReg destA, FPRReg destB>
void setupTwoStubArgs(FPRReg srcA, FPRReg srcB)
{
m_jit.moveDouble(FPRInfo::returnValueFPR, result);
}
+#elif CPU(X86)
+
+ void setupResults(GPRReg tag, GPRReg payload)
+ {
+ setupTwoStubArgs<GPRInfo::returnValueGPR, GPRInfo::returnValueGPR2>(payload, tag);
+ }
+
+ // These methods add calls to C++ helper functions.
+ void callOperation(J_DFGOperation_EP operation, GPRReg resultTag, GPRReg resultPayload, void* pointer)
+ {
+ ASSERT(isFlushed());
+
+ m_jit.push(JITCompiler::TrustedImm32(reinterpret_cast<int>(pointer)));
+ m_jit.push(GPRInfo::callFrameRegister);
+
+ appendCallWithExceptionCheck(operation);
+ setupResults(resultTag, resultPayload);
+ }
+ void callOperation(J_DFGOperation_EI operation, GPRReg resultTag, GPRReg resultPayload, Identifier* identifier)
+ {
+ callOperation((J_DFGOperation_EP)operation, resultTag, resultPayload, identifier);
+ }
+ void callOperation(J_DFGOperation_EPS operation, GPRReg resultTag, GPRReg resultPayload, void* pointer, size_t size)
+ {
+ ASSERT(isFlushed());
+
+ m_jit.push(JITCompiler::TrustedImm32(size));
+ m_jit.push(JITCompiler::TrustedImm32(reinterpret_cast<int>(pointer)));
+ m_jit.push(GPRInfo::callFrameRegister);
+
+ appendCallWithExceptionCheck(operation);
+ setupResults(resultTag, resultPayload);
+ }
+ void callOperation(J_DFGOperation_EJP operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, void* pointer)
+ {
+ ASSERT(isFlushed());
+
+ m_jit.push(JITCompiler::TrustedImm32(reinterpret_cast<int>(pointer)));
+ m_jit.push(arg1Tag);
+ m_jit.push(arg1Payload);
+ m_jit.push(GPRInfo::callFrameRegister);
+
+ appendCallWithExceptionCheck(operation);
+ setupResults(resultTag, resultPayload);
+ }
+ void callOperation(J_DFGOperation_EJI operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, Identifier* identifier)
+ {
+ callOperation((J_DFGOperation_EJP)operation, resultTag, resultPayload, arg1Tag, arg1Payload, identifier);
+ }
+ void callOperation(J_DFGOperation_EJ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload)
+ {
+ ASSERT(isFlushed());
+
+ m_jit.push(arg1Tag);
+ m_jit.push(arg1Payload);
+ m_jit.push(GPRInfo::callFrameRegister);
+
+ appendCallWithExceptionCheck(operation);
+ setupResults(resultTag, resultPayload);
+ }
+ void callOperation(Z_DFGOperation_EJ operation, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload)
+ {
+ ASSERT(isFlushed());
+
+ m_jit.push(arg1Tag);
+ m_jit.push(arg1Payload);
+ m_jit.push(GPRInfo::callFrameRegister);
+
+ appendCallWithExceptionCheck(operation);
+ m_jit.move(GPRInfo::returnValueGPR, result);
+ }
+ void callOperation(Z_DFGOperation_EJJ operation, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Tag, GPRReg arg2Payload)
+ {
+ ASSERT(isFlushed());
+
+ m_jit.push(arg2Tag);
+ m_jit.push(arg2Payload);
+ m_jit.push(arg1Tag);
+ m_jit.push(arg1Payload);
+ m_jit.push(GPRInfo::callFrameRegister);
+
+ appendCallWithExceptionCheck(operation);
+ m_jit.move(GPRInfo::returnValueGPR, result);
+ }
+ void callOperation(J_DFGOperation_EJJ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Tag, GPRReg arg2Payload)
+ {
+ ASSERT(isFlushed());
+
+ m_jit.push(arg2Tag);
+ m_jit.push(arg2Payload);
+ m_jit.push(arg1Tag);
+ m_jit.push(arg1Payload);
+ m_jit.push(GPRInfo::callFrameRegister);
+
+ appendCallWithExceptionCheck(operation);
+ setupResults(resultTag, resultPayload);
+ }
+ void callOperation(V_DFGOperation_EJJP operation, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Tag, GPRReg arg2Payload, void* pointer)
+ {
+ ASSERT(isFlushed());
+
+ m_jit.push(JITCompiler::TrustedImm32(reinterpret_cast<int>(pointer)));
+ m_jit.push(arg2Tag);
+ m_jit.push(arg2Payload);
+ m_jit.push(arg1Tag);
+ m_jit.push(arg1Payload);
+ m_jit.push(GPRInfo::callFrameRegister);
+
+ appendCallWithExceptionCheck(operation);
+ }
+ void callOperation(V_DFGOperation_EJJI operation, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Tag, GPRReg arg2Payload, Identifier* identifier)
+ {
+ callOperation((V_DFGOperation_EJJP)operation, arg1Tag, arg1Payload, arg2Tag, arg2Payload, identifier);
+ }
+ void callOperation(V_DFGOperation_EJJJ operation, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Tag, GPRReg arg2Payload, GPRReg arg3Tag, GPRReg arg3Payload)
+ {
+ ASSERT(isFlushed());
+
+ m_jit.push(arg3Tag);
+ m_jit.push(arg3Payload);
+ m_jit.push(arg2Tag);
+ m_jit.push(arg2Payload);
+ m_jit.push(arg1Tag);
+ m_jit.push(arg1Payload);
+ m_jit.push(GPRInfo::callFrameRegister);
+
+ appendCallWithExceptionCheck(operation);
+ }
+ void callOperation(D_DFGOperation_DD operation, FPRReg result, FPRReg arg1, FPRReg arg2)
+ {
+ ASSERT(isFlushed());
+
+ // FIXME: Need to to pass doubles.
+ ASSERT_NOT_REACHED();
+
+ m_jit.appendCall(operation);
+ m_jit.moveDouble(FPRInfo::returnValueFPR, result);
+ }
+#endif
+
JITCompiler::Call appendCallWithExceptionCheck(const FunctionPtr& function)
{
return m_jit.appendCallWithExceptionCheck(function, m_jit.graph()[m_compileIndex].codeOrigin);
explicit JSValueOperand(JITCodeGenerator* jit, NodeIndex index)
: m_jit(jit)
, m_index(index)
+#if USE(JSVALUE64)
, m_gprOrInvalid(InvalidGPRReg)
+#elif USE(JSVALUE32_64)
+ , m_isDouble(false)
+#endif
{
ASSERT(m_jit);
+#if USE(JSVALUE64)
if (jit->isFilled(index))
gpr();
+#elif USE(JSVALUE32_64)
+ m_register.pair.tagGPR = InvalidGPRReg;
+ m_register.pair.payloadGPR = InvalidGPRReg;
+ if (jit->isFilled(index))
+ fill();
+#endif
}
~JSValueOperand()
{
+#if USE(JSVALUE64)
ASSERT(m_gprOrInvalid != InvalidGPRReg);
m_jit->unlock(m_gprOrInvalid);
+#elif USE(JSVALUE32_64)
+ if (m_isDouble) {
+ ASSERT(m_register.fpr != InvalidFPRReg);
+ m_jit->unlock(m_register.fpr);
+ } else {
+ ASSERT(m_register.pair.tagGPR != InvalidGPRReg && m_register.pair.payloadGPR != InvalidGPRReg);
+ m_jit->unlock(m_register.pair.tagGPR);
+ m_jit->unlock(m_register.pair.payloadGPR);
+ }
+#endif
}
NodeIndex index() const
return m_index;
}
+#if USE(JSVALUE64)
GPRReg gpr()
{
if (m_gprOrInvalid == InvalidGPRReg)
m_gprOrInvalid = m_jit->fillJSValue(index());
return m_gprOrInvalid;
}
-
+#elif USE(JSVALUE32_64)
+ bool isDouble() { return m_isDouble; }
+
+ void fill()
+ {
+ if (m_register.pair.tagGPR == InvalidGPRReg && m_register.pair.payloadGPR == InvalidGPRReg)
+ m_isDouble = !m_jit->fillJSValue(index(), m_register.pair.tagGPR, m_register.pair.payloadGPR, m_register.fpr);
+ }
+
+ GPRReg tagGPR()
+ {
+ fill();
+ ASSERT(!m_isDouble);
+ return m_register.pair.tagGPR;
+ }
+
+ GPRReg payloadGPR()
+ {
+ fill();
+ ASSERT(!m_isDouble);
+ return m_register.pair.payloadGPR;
+ }
+
+ FPRReg fpr()
+ {
+ fill();
+ ASSERT(m_isDouble);
+ return m_register.fpr;
+ }
+#endif
+
void use()
{
m_jit->use(m_index);
private:
JITCodeGenerator* m_jit;
NodeIndex m_index;
+#if USE(JSVALUE64)
GPRReg m_gprOrInvalid;
+#elif USE(JSVALUE32_64)
+ union {
+ struct {
+ GPRReg tagGPR;
+ GPRReg payloadGPR;
+ } pair;
+ FPRReg fpr;
+ } m_register;
+ bool m_isDouble;
+#endif
};
class StorageOperand {
GPRTemporary(JITCodeGenerator*, IntegerOperand&, IntegerOperand&);
GPRTemporary(JITCodeGenerator*, SpeculateCellOperand&);
GPRTemporary(JITCodeGenerator*, SpeculateBooleanOperand&);
+#if USE(JSVALUE64)
GPRTemporary(JITCodeGenerator*, JSValueOperand&);
+#elif USE(JSVALUE32_64)
+ GPRTemporary(JITCodeGenerator*, JSValueOperand&, bool tag = true);
+#endif
GPRTemporary(JITCodeGenerator*, StorageOperand&);
void adopt(GPRTemporary&);
~GPRTemporary()
{
- if (m_jit)
+ if (m_jit && m_gpr != InvalidGPRReg)
m_jit->unlock(gpr());
}
GPRReg gpr()
{
- ASSERT(m_gpr != InvalidGPRReg);
+ // In some cases we have lazy allocation.
+ if (m_jit && m_gpr == InvalidGPRReg)
+ m_gpr = m_jit->allocate();
return m_gpr;
}
FPRTemporary(JITCodeGenerator*, DoubleOperand&, DoubleOperand&);
FPRTemporary(JITCodeGenerator*, SpeculateDoubleOperand&);
FPRTemporary(JITCodeGenerator*, SpeculateDoubleOperand&, SpeculateDoubleOperand&);
+#if USE(JSVALUE32_64)
+ FPRTemporary(JITCodeGenerator*, JSValueOperand&);
+#endif
~FPRTemporary()
{
}
};
+#if USE(JSVALUE32_64)
+class GPRResult2 : public GPRTemporary {
+public:
+ GPRResult2(JITCodeGenerator* jit)
+ : GPRTemporary(jit, GPRInfo::returnValueGPR2)
+ {
+ }
+};
+#endif
+
class FPRResult : public FPRTemporary {
public:
FPRResult(JITCodeGenerator* jit)
--- /dev/null
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DFGJITCodeGenerator.h"
+
+#if ENABLE(DFG_JIT)
+#if USE(JSVALUE32_64)
+
+#include "DFGJITCompilerInlineMethods.h"
+#include "DFGSpeculativeJIT.h"
+#include "LinkBuffer.h"
+
+namespace JSC { namespace DFG {
+
+const double JITCodeGenerator::twoToThe32 = (double)0x100000000ull;
+
+void JITCodeGenerator::clearGenerationInfo()
+{
+ for (unsigned i = 0; i < m_generationInfo.size(); ++i)
+ m_generationInfo[i] = GenerationInfo();
+ m_gprs = RegisterBank<GPRInfo>();
+ m_fprs = RegisterBank<FPRInfo>();
+}
+
+GPRReg JITCodeGenerator::fillInteger(NodeIndex nodeIndex, DataFormat& returnFormat)
+{
+ Node& node = m_jit.graph()[nodeIndex];
+ VirtualRegister virtualRegister = node.virtualRegister();
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+
+ if (info.registerFormat() == DataFormatNone) {
+ GPRReg gpr = allocate();
+
+ if (node.hasConstant()) {
+ m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
+ if (isInt32Constant(nodeIndex))
+ m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex)), gpr);
+ else if (isNumberConstant(nodeIndex))
+ ASSERT_NOT_REACHED();
+ else {
+ ASSERT(isJSConstant(nodeIndex));
+ JSValue jsValue = valueOfJSConstant(nodeIndex);
+ m_jit.move(MacroAssembler::Imm32(jsValue.payload()), gpr);
+ }
+ } else {
+ ASSERT(info.spillFormat() == DataFormatJS || info.spillFormat() == DataFormatJSInteger);
+ m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
+ m_jit.load32(JITCompiler::payloadFor(virtualRegister), gpr);
+ }
+
+ info.fillInteger(gpr);
+ returnFormat = DataFormatInteger;
+ return gpr;
+ }
+
+ switch (info.registerFormat()) {
+ case DataFormatNone:
+ // Should have filled, above.
+ case DataFormatJSDouble:
+ case DataFormatDouble:
+ case DataFormatJS:
+ case DataFormatCell:
+ case DataFormatJSCell:
+ case DataFormatJSInteger:
+ case DataFormatBoolean:
+ case DataFormatJSBoolean:
+ case DataFormatStorage:
+ // Should only be calling this function if we know this operand to be integer.
+ ASSERT_NOT_REACHED();
+
+ case DataFormatInteger: {
+ GPRReg gpr = info.gpr();
+ m_gprs.lock(gpr);
+ m_jit.jitAssertIsInt32(gpr);
+ returnFormat = DataFormatInteger;
+ return gpr;
+ }
+ }
+
+ ASSERT_NOT_REACHED();
+ return InvalidGPRReg;
+}
+
+FPRReg JITCodeGenerator::fillDouble(NodeIndex nodeIndex)
+{
+ Node& node = m_jit.graph()[nodeIndex];
+ VirtualRegister virtualRegister = node.virtualRegister();
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+
+ if (info.registerFormat() == DataFormatNone) {
+
+ if (node.hasConstant()) {
+ if (isInt32Constant(nodeIndex)) {
+ // FIXME: should not be reachable?
+ GPRReg gpr = allocate();
+ m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex)), gpr);
+ m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
+ info.fillInteger(gpr);
+ unlock(gpr);
+ } else if (isNumberConstant(nodeIndex)) {
+ FPRReg fpr = fprAllocate();
+ m_jit.loadDouble(addressOfDoubleConstant(nodeIndex), fpr);
+ m_fprs.retain(fpr, virtualRegister, SpillOrderDouble);
+ info.fillDouble(fpr);
+ return fpr;
+ } else {
+ // FIXME: should not be reachable?
+ ASSERT_NOT_REACHED();
+ }
+ } else {
+ DataFormat spillFormat = info.spillFormat();
+ ASSERT(spillFormat & DataFormatJS);
+ if (spillFormat == DataFormatJSDouble) {
+ FPRReg fpr = fprAllocate();
+ m_jit.loadDouble(JITCompiler::addressFor(virtualRegister), fpr);
+ m_fprs.retain(fpr, virtualRegister, SpillOrderSpilled);
+ info.fillDouble(fpr);
+ return fpr;
+ }
+ GPRReg tag = allocate();
+ GPRReg payload = allocate();
+ m_jit.emitLoad(nodeIndex, tag, payload);
+ m_gprs.retain(tag, virtualRegister, SpillOrderSpilled);
+ m_gprs.retain(payload, virtualRegister, SpillOrderSpilled);
+ info.fillJSValue(tag, payload, m_isSpeculative ? spillFormat : DataFormatJS);
+ unlock(tag);
+ unlock(payload);
+ }
+ }
+
+ switch (info.registerFormat()) {
+ case DataFormatNone:
+ // Should have filled, above.
+ case DataFormatCell:
+ case DataFormatJSCell:
+ case DataFormatBoolean:
+ case DataFormatJSBoolean:
+ case DataFormatStorage:
+ // Should only be calling this function if we know this operand to be numeric.
+ ASSERT_NOT_REACHED();
+
+ case DataFormatJSInteger:
+ case DataFormatJS: {
+ GPRReg tagGPR = info.tagGPR();
+ GPRReg payloadGPR = info.payloadGPR();
+ FPRReg fpr = fprAllocate();
+ m_gprs.lock(tagGPR);
+ m_gprs.lock(payloadGPR);
+
+ JITCompiler::Jump hasUnboxedDouble;
+
+ if (info.registerFormat() != DataFormatJSInteger) {
+ JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::NotEqual, tagGPR, TrustedImm32(JSValue::Int32Tag));
+ m_jit.jitAssertIsJSDouble(tagGPR);
+ unboxDouble(tagGPR, payloadGPR, fpr, virtualRegister);
+ hasUnboxedDouble = m_jit.jump();
+ isInteger.link(&m_jit);
+ }
+
+ m_jit.convertInt32ToDouble(payloadGPR, fpr);
+
+ if (info.registerFormat() != DataFormatJSInteger)
+ hasUnboxedDouble.link(&m_jit);
+
+ m_gprs.release(tagGPR);
+ m_gprs.release(payloadGPR);
+ m_gprs.unlock(tagGPR);
+ m_gprs.unlock(payloadGPR);
+ m_fprs.retain(fpr, virtualRegister, SpillOrderDouble);
+ info.fillDouble(fpr);
+ info.killSpilled();
+ return fpr;
+ }
+
+ case DataFormatInteger: {
+ FPRReg fpr = fprAllocate();
+ GPRReg gpr = info.gpr();
+ m_gprs.lock(gpr);
+ m_jit.convertInt32ToDouble(gpr, fpr);
+ m_gprs.unlock(gpr);
+ return fpr;
+ }
+
+ case DataFormatJSDouble:
+ case DataFormatDouble: {
+ FPRReg fpr = info.fpr();
+ m_fprs.lock(fpr);
+ return fpr;
+ }
+ }
+
+ ASSERT_NOT_REACHED();
+ return InvalidFPRReg;
+}
+
+bool JITCodeGenerator::fillJSValue(NodeIndex nodeIndex, GPRReg& tagGPR, GPRReg& payloadGPR, FPRReg& fpr)
+{
+ // FIXME: For double we could fill with a FPR.
+ Node& node = m_jit.graph()[nodeIndex];
+ VirtualRegister virtualRegister = node.virtualRegister();
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+
+ switch (info.registerFormat()) {
+ case DataFormatNone: {
+
+ if (node.hasConstant()) {
+ if (isInt32Constant(nodeIndex)) {
+ tagGPR = allocate();
+ payloadGPR = allocate();
+ m_jit.emitLoad(nodeIndex, tagGPR, payloadGPR);
+ info.fillJSValue(tagGPR, payloadGPR, DataFormatJSInteger);
+ } else {
+ tagGPR = allocate();
+ payloadGPR = allocate();
+ m_jit.emitLoad(nodeIndex, tagGPR, payloadGPR);
+ info.fillJSValue(tagGPR, payloadGPR, DataFormatJS);
+ }
+
+ m_gprs.retain(tagGPR, virtualRegister, SpillOrderConstant);
+ m_gprs.retain(payloadGPR, virtualRegister, SpillOrderConstant);
+ } else {
+ DataFormat spillFormat = info.spillFormat();
+ ASSERT(spillFormat & DataFormatJS);
+ tagGPR = allocate();
+ payloadGPR = allocate();
+ m_jit.emitLoad(nodeIndex, tagGPR, payloadGPR);
+ m_gprs.retain(tagGPR, virtualRegister, SpillOrderSpilled);
+ m_gprs.retain(payloadGPR, virtualRegister, SpillOrderSpilled);
+ info.fillJSValue(tagGPR, payloadGPR, m_isSpeculative ? spillFormat : DataFormatJS);
+ }
+
+ return true;
+ }
+
+ case DataFormatCell:
+ case DataFormatInteger: {
+ GPRReg gpr = info.gpr();
+ // If the register has already been locked we need to take a copy.
+ // If not, we'll zero extend in place, so mark on the info that this is now type DataFormatInteger, not DataFormatJSInteger.
+ tagGPR = allocate();
+ if (m_gprs.isLocked(gpr)) {
+ payloadGPR = allocate();
+ m_jit.move(gpr, payloadGPR);
+ } else {
+ payloadGPR = gpr;
+ m_gprs.lock(gpr);
+ }
+ m_jit.move(info.registerFormat() == DataFormatInteger ? JITCompiler::TrustedImm32(JSValue::Int32Tag) : JITCompiler::TrustedImm32(JSValue::CellTag), tagGPR);
+ m_gprs.release(gpr);
+ m_gprs.retain(tagGPR, virtualRegister, SpillOrderJS);
+ m_gprs.retain(payloadGPR, virtualRegister, SpillOrderJS);
+ info.fillJSValue(tagGPR, payloadGPR, info.registerFormat() == DataFormatCell ? DataFormatJSCell : DataFormatJSInteger);
+ return true;
+ }
+
+ case DataFormatJSDouble:
+ case DataFormatDouble: {
+ FPRReg oldFPR = info.fpr();
+ m_fprs.lock(oldFPR);
+ tagGPR = allocate();
+ payloadGPR = allocate();
+ boxDouble(oldFPR, tagGPR, payloadGPR, virtualRegister);
+ m_fprs.unlock(oldFPR);
+ m_fprs.release(oldFPR);
+ m_gprs.retain(tagGPR, virtualRegister, SpillOrderJS);
+ m_gprs.retain(payloadGPR, virtualRegister, SpillOrderJS);
+ info.fillJSValue(tagGPR, payloadGPR, DataFormatJS);
+ return true;
+ }
+
+ case DataFormatJS:
+ case DataFormatJSInteger:
+ case DataFormatJSCell:
+ case DataFormatJSBoolean: {
+ tagGPR = info.tagGPR();
+ payloadGPR = info.payloadGPR();
+ m_gprs.lock(tagGPR);
+ m_gprs.lock(payloadGPR);
+ return true;
+ }
+
+ case DataFormatBoolean:
+ case DataFormatStorage:
+ // this type currently never occurs
+ ASSERT_NOT_REACHED();
+ }
+
+ ASSERT_NOT_REACHED();
+ return true;
+}
+
+GPRReg JITCodeGenerator::fillStorage(NodeIndex nodeIndex)
+{
+ Node& node = m_jit.graph()[nodeIndex];
+ VirtualRegister virtualRegister = node.virtualRegister();
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+
+ switch (info.registerFormat()) {
+ case DataFormatNone: {
+ GPRReg gpr = allocate();
+ ASSERT(info.spillFormat() == DataFormatStorage);
+ m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
+ m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
+ info.fillStorage(gpr);
+ return gpr;
+ }
+
+ case DataFormatStorage: {
+ GPRReg gpr = info.gpr();
+ m_gprs.lock(gpr);
+ return gpr;
+ }
+
+ default:
+ ASSERT_NOT_REACHED();
+ }
+
+ return InvalidGPRReg;
+}
+
+void JITCodeGenerator::useChildren(Node& node)
+{
+ if (node.op & NodeHasVarArgs) {
+ for (unsigned childIdx = node.firstChild(); childIdx < node.firstChild() + node.numChildren(); childIdx++)
+ use(m_jit.graph().m_varArgChildren[childIdx]);
+ } else {
+ NodeIndex child1 = node.child1();
+ if (child1 == NoNode) {
+ ASSERT(node.child2() == NoNode && node.child3() == NoNode);
+ return;
+ }
+ use(child1);
+
+ NodeIndex child2 = node.child2();
+ if (child2 == NoNode) {
+ ASSERT(node.child3() == NoNode);
+ return;
+ }
+ use(child2);
+
+ NodeIndex child3 = node.child3();
+ if (child3 == NoNode)
+ return;
+ use(child3);
+ }
+}
+
+bool JITCodeGenerator::isStrictInt32(NodeIndex nodeIndex)
+{
+ if (isInt32Constant(nodeIndex))
+ return true;
+
+ Node& node = m_jit.graph()[nodeIndex];
+ GenerationInfo& info = m_generationInfo[node.virtualRegister()];
+
+ return info.registerFormat() == DataFormatInteger;
+}
+
+bool JITCodeGenerator::isKnownInteger(NodeIndex nodeIndex)
+{
+ if (isInt32Constant(nodeIndex))
+ return true;
+
+ Node& node = m_jit.graph()[nodeIndex];
+
+ if (node.hasInt32Result())
+ return true;
+
+ GenerationInfo& info = m_generationInfo[node.virtualRegister()];
+
+ return info.isJSInteger();
+}
+
+bool JITCodeGenerator::isKnownNumeric(NodeIndex nodeIndex)
+{
+ if (isInt32Constant(nodeIndex) || isNumberConstant(nodeIndex))
+ return true;
+
+ Node& node = m_jit.graph()[nodeIndex];
+
+ if (node.hasNumberResult())
+ return true;
+
+ GenerationInfo& info = m_generationInfo[node.virtualRegister()];
+
+ return info.isJSInteger() || info.isJSDouble();
+}
+
+bool JITCodeGenerator::isKnownCell(NodeIndex nodeIndex)
+{
+ return m_generationInfo[m_jit.graph()[nodeIndex].virtualRegister()].isJSCell();
+}
+
+bool JITCodeGenerator::isKnownNotCell(NodeIndex nodeIndex)
+{
+ Node& node = m_jit.graph()[nodeIndex];
+ VirtualRegister virtualRegister = node.virtualRegister();
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+ if (node.hasConstant() && !valueOfJSConstant(nodeIndex).isCell())
+ return true;
+ return !(info.isJSCell() || info.isUnknownJS());
+}
+
+bool JITCodeGenerator::isKnownNotInteger(NodeIndex nodeIndex)
+{
+ Node& node = m_jit.graph()[nodeIndex];
+ VirtualRegister virtualRegister = node.virtualRegister();
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+
+ return info.isJSDouble() || info.isJSCell() || info.isJSBoolean()
+ || (node.hasConstant() && !valueOfJSConstant(nodeIndex).isInt32());
+}
+
+bool JITCodeGenerator::isKnownNotNumber(NodeIndex nodeIndex)
+{
+ Node& node = m_jit.graph()[nodeIndex];
+ VirtualRegister virtualRegister = node.virtualRegister();
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+
+ return (!info.isJSDouble() && !info.isJSInteger() && !info.isUnknownJS())
+ || (node.hasConstant() && !isNumberConstant(nodeIndex));
+}
+
+bool JITCodeGenerator::isKnownBoolean(NodeIndex nodeIndex)
+{
+ Node& node = m_jit.graph()[nodeIndex];
+ if (node.hasBooleanResult())
+ return true;
+
+ if (isBooleanConstant(nodeIndex))
+ return true;
+
+ VirtualRegister virtualRegister = node.virtualRegister();
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+
+ return info.isJSBoolean();
+}
+
+void JITCodeGenerator::nonSpeculativeValueToNumber(Node& node)
+{
+ if (isKnownNumeric(node.child1())) {
+ JSValueOperand op1(this, node.child1());
+ op1.fill();
+ if (op1.isDouble()) {
+ FPRTemporary result(this, op1);
+ m_jit.moveDouble(op1.fpr(), result.fpr());
+ doubleResult(result.fpr(), m_compileIndex);
+ } else {
+ GPRTemporary resultTag(this, op1);
+ GPRTemporary resultPayload(this, op1, false);
+ m_jit.move(op1.tagGPR(), resultTag.gpr());
+ m_jit.move(op1.payloadGPR(), resultPayload.gpr());
+ jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex);
+ }
+ return;
+ }
+
+ JSValueOperand op1(this, node.child1());
+ GPRTemporary resultTag(this, op1);
+ GPRTemporary resultPayload(this, op1, false);
+
+ ASSERT(!isInt32Constant(node.child1()));
+ ASSERT(!isNumberConstant(node.child1()));
+
+ GPRReg tagGPR = op1.tagGPR();
+ GPRReg payloadGPR = op1.payloadGPR();
+ GPRReg resultTagGPR = resultTag.gpr();
+ GPRReg resultPayloadGPR = resultPayload.gpr();
+ op1.use();
+
+ JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
+ JITCompiler::Jump nonNumeric = m_jit.branch32(MacroAssembler::AboveOrEqual, tagGPR, TrustedImm32(JSValue::LowestTag));
+
+ // First, if we get here we have a double encoded as a JSValue
+ JITCompiler::Jump hasUnboxedDouble = m_jit.jump();
+
+ // Next handle cells (& other JS immediates)
+ nonNumeric.link(&m_jit);
+ silentSpillAllRegisters(resultTagGPR, resultPayloadGPR);
+ m_jit.push(tagGPR);
+ m_jit.push(payloadGPR);
+ m_jit.push(GPRInfo::callFrameRegister);
+ appendCallWithExceptionCheck(dfgConvertJSValueToNumber);
+ boxDouble(FPRInfo::returnValueFPR, resultTagGPR, resultPayloadGPR, m_jit.graph()[m_compileIndex].virtualRegister());
+ silentFillAllRegisters(resultTagGPR, resultPayloadGPR);
+ JITCompiler::Jump hasCalledToNumber = m_jit.jump();
+
+ // Finally, handle integers.
+ isInteger.link(&m_jit);
+ hasUnboxedDouble.link(&m_jit);
+ m_jit.move(tagGPR, resultTagGPR);
+ m_jit.move(payloadGPR, resultPayloadGPR);
+ hasCalledToNumber.link(&m_jit);
+ jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly);
+}
+
+void JITCodeGenerator::nonSpeculativeValueToInt32(Node& node)
+{
+ ASSERT(!isInt32Constant(node.child1()));
+
+ if (isKnownInteger(node.child1())) {
+ IntegerOperand op1(this, node.child1());
+ GPRTemporary result(this, op1);
+ m_jit.move(op1.gpr(), result.gpr());
+ integerResult(result.gpr(), m_compileIndex);
+ return;
+ }
+
+ GenerationInfo& childInfo = m_generationInfo[m_jit.graph()[node.child1()].virtualRegister()];
+ if (isJSDouble(childInfo.registerFormat())) {
+ DoubleOperand op1(this, node.child1());
+ GPRTemporary result(this);
+ FPRReg fpr = op1.fpr();
+ GPRReg gpr = result.gpr();
+ op1.use();
+ JITCompiler::Jump truncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateSuccessful);
+
+ silentSpillAllRegisters(gpr);
+
+ m_jit.moveDouble(fpr, FPRInfo::argumentFPR0);
+ appendCallWithExceptionCheck(toInt32);
+ m_jit.move(GPRInfo::returnValueGPR, gpr);
+
+ silentFillAllRegisters(gpr);
+
+ truncatedToInteger.link(&m_jit);
+ integerResult(gpr, m_compileIndex, UseChildrenCalledExplicitly);
+ return;
+ }
+
+ JSValueOperand op1(this, node.child1());
+ GPRTemporary result(this);
+ GPRReg tagGPR = op1.tagGPR();
+ GPRReg payloadGPR = op1.payloadGPR();
+ GPRReg resultGPR = result.gpr();
+ op1.use();
+
+ JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
+
+ // First handle non-integers
+ silentSpillAllRegisters(resultGPR);
+ m_jit.push(tagGPR);
+ m_jit.push(payloadGPR);
+ m_jit.push(GPRInfo::callFrameRegister);
+ appendCallWithExceptionCheck(dfgConvertJSValueToInt32);
+ m_jit.move(GPRInfo::returnValueGPR, resultGPR);
+ silentFillAllRegisters(resultGPR);
+ JITCompiler::Jump hasCalledToInt32 = m_jit.jump();
+
+ // Then handle integers.
+ isInteger.link(&m_jit);
+ m_jit.move(payloadGPR, resultGPR);
+ hasCalledToInt32.link(&m_jit);
+ integerResult(resultGPR, m_compileIndex, UseChildrenCalledExplicitly);
+}
+
+void JITCodeGenerator::nonSpeculativeUInt32ToNumber(Node& node)
+{
+ IntegerOperand op1(this, node.child1());
+ FPRTemporary boxer(this);
+ GPRTemporary resultTag(this, op1);
+ GPRTemporary resultPayload(this);
+
+ JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, op1.gpr(), TrustedImm32(0));
+
+ m_jit.convertInt32ToDouble(op1.gpr(), boxer.fpr());
+ m_jit.move(JITCompiler::TrustedImmPtr(&twoToThe32), resultPayload.gpr()); // reuse resultPayload register here.
+ m_jit.addDouble(JITCompiler::Address(resultPayload.gpr(), 0), boxer.fpr());
+
+ boxDouble(boxer.fpr(), resultTag.gpr(), resultPayload.gpr(), m_jit.graph()[m_compileIndex].virtualRegister());
+
+ JITCompiler::Jump done = m_jit.jump();
+
+ positive.link(&m_jit);
+
+ m_jit.move(TrustedImm32(JSValue::Int32Tag), resultTag.gpr());
+ m_jit.move(op1.gpr(), resultPayload.gpr());
+
+ done.link(&m_jit);
+
+ jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex);
+}
+
+void JITCodeGenerator::nonSpeculativeKnownConstantArithOp(NodeType op, NodeIndex regChild, NodeIndex immChild, bool commute)
+{
+ JSValueOperand regArg(this, regChild);
+ regArg.fill();
+
+ if (regArg.isDouble()) {
+ FPRReg regArgFPR = regArg.fpr();
+ FPRTemporary imm(this);
+ FPRTemporary result(this, regArg);
+ GPRTemporary scratch(this);
+ FPRReg immFPR = imm.fpr();
+ FPRReg resultFPR = result.fpr();
+ GPRReg scratchGPR = scratch.gpr();
+ use(regChild);
+ use(immChild);
+
+ int32_t imm32 = valueOfInt32Constant(immChild);
+ m_jit.move(TrustedImm32(imm32), scratchGPR);
+ m_jit.convertInt32ToDouble(scratchGPR, immFPR);
+
+ switch (op) {
+ case ValueAdd:
+ case ArithAdd:
+ m_jit.addDouble(regArgFPR, immFPR, resultFPR);
+ break;
+
+ case ArithSub:
+ m_jit.subDouble(regArgFPR, immFPR, resultFPR);
+ break;
+
+ default:
+ ASSERT_NOT_REACHED();
+ }
+
+ doubleResult(resultFPR, m_compileIndex, UseChildrenCalledExplicitly);
+ return;
+ }
+
+ GPRReg regArgTagGPR = regArg.tagGPR();
+ GPRReg regArgPayloadGPR = regArg.payloadGPR();
+ GPRTemporary resultTag(this, regArg);
+ GPRTemporary resultPayload(this, regArg, false);
+ GPRReg resultTagGPR = resultTag.gpr();
+ GPRReg resultPayloadGPR = resultPayload.gpr();
+ FPRTemporary tmp1(this);
+ FPRTemporary tmp2(this);
+ FPRReg tmp1FPR = tmp1.fpr();
+ FPRReg tmp2FPR = tmp2.fpr();
+ use(regChild);
+ use(immChild);
+
+ JITCompiler::Jump notInt;
+ int32_t imm = valueOfInt32Constant(immChild);
+
+ if (!isKnownNumeric(regChild))
+ notInt = m_jit.branch32(MacroAssembler::NotEqual, regArgTagGPR, TrustedImm32(JSValue::Int32Tag));
+
+ JITCompiler::Jump overflow;
+
+ switch (op) {
+ case ValueAdd:
+ case ArithAdd:
+ overflow = m_jit.branchAdd32(MacroAssembler::Overflow, regArgPayloadGPR, Imm32(imm), resultPayloadGPR);
+ break;
+
+ case ArithSub:
+ overflow = m_jit.branchSub32(MacroAssembler::Overflow, regArgPayloadGPR, Imm32(imm), resultPayloadGPR);
+ break;
+
+ default:
+ ASSERT_NOT_REACHED();
+ }
+
+ m_jit.move(TrustedImm32(JSValue::Int32Tag), resultTagGPR);
+ JITCompiler::Jump done = m_jit.jump();
+
+ overflow.link(&m_jit);
+ // first deal with overflow case
+ m_jit.convertInt32ToDouble(regArgPayloadGPR, tmp2FPR);
+ m_jit.move(TrustedImm32(imm), resultPayloadGPR);
+ m_jit.convertInt32ToDouble(resultPayloadGPR, tmp1FPR);
+ switch (op) {
+ case ValueAdd:
+ case ArithAdd:
+ m_jit.addDouble(tmp1FPR, tmp2FPR);
+ break;
+
+ case ArithSub:
+ m_jit.subDouble(tmp1FPR, tmp2FPR);
+ break;
+
+ default:
+ ASSERT_NOT_REACHED();
+ }
+
+ JITCompiler::Jump doneCaseConvertedToInt;
+
+ if (op == ValueAdd) {
+ JITCompiler::JumpList failureCases;
+ m_jit.branchConvertDoubleToInt32(tmp2FPR, resultPayloadGPR, failureCases, tmp1FPR);
+ m_jit.move(TrustedImm32(JSValue::Int32Tag), resultTagGPR);
+ doneCaseConvertedToInt = m_jit.jump();
+
+ failureCases.link(&m_jit);
+ }
+
+ boxDouble(tmp2FPR, resultTagGPR, resultPayloadGPR, m_jit.graph()[m_compileIndex].virtualRegister());
+
+ if (!isKnownNumeric(regChild)) {
+ ASSERT(notInt.isSet());
+ ASSERT(op == ValueAdd);
+
+ JITCompiler::Jump doneCaseWasNumber = m_jit.jump();
+
+ notInt.link(&m_jit);
+
+ silentSpillAllRegisters(resultTagGPR, resultPayloadGPR);
+ if (commute) {
+ m_jit.push(regArgTagGPR);
+ m_jit.push(regArgPayloadGPR);
+ m_jit.push(MacroAssembler::Imm32(imm));
+ } else {
+ m_jit.push(MacroAssembler::Imm32(imm));
+ m_jit.push(regArgTagGPR);
+ m_jit.push(regArgPayloadGPR);
+ }
+ m_jit.push(GPRInfo::callFrameRegister);
+ appendCallWithExceptionCheck(operationValueAddNotNumber);
+ setupResults(resultTagGPR, resultPayloadGPR);
+ silentFillAllRegisters(resultTagGPR, resultPayloadGPR);
+
+ doneCaseWasNumber.link(&m_jit);
+ }
+
+ done.link(&m_jit);
+ if (doneCaseConvertedToInt.isSet())
+ doneCaseConvertedToInt.link(&m_jit);
+
+ jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly);
+}
+
+void JITCodeGenerator::nonSpeculativeBasicArithOp(NodeType op, Node &node)
+{
+ JSValueOperand arg1(this, node.child1());
+ JSValueOperand arg2(this, node.child2());
+ arg1.fill();
+ arg2.fill();
+
+ if (arg1.isDouble() && arg2.isDouble()) {
+ FPRReg arg1FPR = arg1.fpr();
+ FPRReg arg2FPR = arg2.fpr();
+ FPRTemporary result(this, arg1);
+ arg1.use();
+ arg2.use();
+ switch (op) {
+ case ValueAdd:
+ case ArithAdd:
+ m_jit.addDouble(arg1FPR, arg2FPR, result.fpr());
+ break;
+
+ case ArithSub:
+ m_jit.subDouble(arg1FPR, arg2FPR, result.fpr());
+ break;
+
+ case ArithMul:
+ m_jit.mulDouble(arg1FPR, arg2FPR, result.fpr());
+ break;
+
+ default:
+ ASSERT_NOT_REACHED();
+ }
+
+ doubleResult(result.fpr(), m_compileIndex, UseChildrenCalledExplicitly);
+ return;
+ }
+
+ FPRTemporary tmp1(this);
+ FPRTemporary tmp2(this);
+ FPRReg tmp1FPR = tmp1.fpr();
+ FPRReg tmp2FPR = tmp2.fpr();
+
+ GPRTemporary resultTag(this, arg1.isDouble() ? arg2 : arg1);
+ GPRTemporary resultPayload(this, arg1.isDouble() ? arg2 : arg1, false);
+ GPRReg resultTagGPR = resultTag.gpr();
+ GPRReg resultPayloadGPR = resultPayload.gpr();
+
+ GPRReg arg1TagGPR = InvalidGPRReg;
+ GPRReg arg1PayloadGPR = InvalidGPRReg;
+ GPRReg arg2TagGPR = InvalidGPRReg;
+ GPRReg arg2PayloadGPR = InvalidGPRReg;
+ GPRTemporary tmpTag(this);
+ GPRTemporary tmpPayload(this);
+
+ if (arg1.isDouble()) {
+ arg1TagGPR = tmpTag.gpr();
+ arg1PayloadGPR = tmpPayload.gpr();
+ boxDouble(arg1.fpr(), arg1TagGPR, arg1PayloadGPR, m_jit.graph()[arg1.index()].virtualRegister());
+ arg2TagGPR = arg2.tagGPR();
+ arg2PayloadGPR = arg2.payloadGPR();
+ } else if (arg2.isDouble()) {
+ arg1TagGPR = arg1.tagGPR();
+ arg1PayloadGPR = arg1.payloadGPR();
+ arg2TagGPR = tmpTag.gpr();
+ arg2PayloadGPR = tmpPayload.gpr();
+ boxDouble(arg2.fpr(), arg2TagGPR, arg2PayloadGPR, m_jit.graph()[arg2.index()].virtualRegister());
+ } else {
+ arg1TagGPR = arg1.tagGPR();
+ arg1PayloadGPR = arg1.payloadGPR();
+ arg2TagGPR = arg2.tagGPR();
+ arg2PayloadGPR = arg2.payloadGPR();
+ }
+
+ arg1.use();
+ arg2.use();
+
+ JITCompiler::Jump child1NotInt;
+ JITCompiler::Jump child2NotInt;
+ JITCompiler::JumpList overflow;
+
+ if (!isKnownInteger(node.child1()))
+ child1NotInt = m_jit.branch32(MacroAssembler::NotEqual, arg1TagGPR, TrustedImm32(JSValue::Int32Tag));
+
+ if (!isKnownInteger(node.child2()))
+ child2NotInt = m_jit.branch32(MacroAssembler::NotEqual, arg2TagGPR, TrustedImm32(JSValue::Int32Tag));
+
+ switch (op) {
+ case ValueAdd:
+ case ArithAdd: {
+ overflow.append(m_jit.branchAdd32(MacroAssembler::Overflow, arg1PayloadGPR, arg2PayloadGPR, resultPayloadGPR));
+ break;
+ }
+
+ case ArithSub: {
+ overflow.append(m_jit.branchSub32(MacroAssembler::Overflow, arg1PayloadGPR, arg2PayloadGPR, resultPayloadGPR));
+ break;
+ }
+
+ case ArithMul: {
+ overflow.append(m_jit.branchMul32(MacroAssembler::Overflow, arg1PayloadGPR, arg2PayloadGPR, resultPayloadGPR));
+ overflow.append(m_jit.branchTest32(MacroAssembler::Zero, resultPayloadGPR));
+ break;
+ }
+
+ default:
+ ASSERT_NOT_REACHED();
+ }
+
+ m_jit.move(TrustedImm32(JSValue::Int32Tag), resultTagGPR);
+
+ JITCompiler::Jump done = m_jit.jump();
+
+ JITCompiler::JumpList haveFPRArguments;
+
+ overflow.link(&m_jit);
+
+ // both arguments are integers
+ m_jit.convertInt32ToDouble(arg1PayloadGPR, tmp1FPR);
+ m_jit.convertInt32ToDouble(arg2PayloadGPR, tmp2FPR);
+
+ haveFPRArguments.append(m_jit.jump());
+
+ JITCompiler::JumpList notNumbers;
+
+ JITCompiler::Jump child2NotInt2;
+
+ if (!isKnownInteger(node.child1())) {
+ child1NotInt.link(&m_jit);
+
+ if (!isKnownNumeric(node.child1())) {
+ ASSERT(op == ValueAdd);
+ notNumbers.append(m_jit.branch32(MacroAssembler::AboveOrEqual, arg1TagGPR, TrustedImm32(JSValue::LowestTag)));
+ }
+
+ if (arg1.isDouble())
+ m_jit.moveDouble(arg1.fpr(), tmp1FPR);
+ else
+ unboxDouble(arg1TagGPR, arg1PayloadGPR, tmp1FPR, m_jit.graph()[arg1.index()].virtualRegister());
+
+ // child1 is converted to a double; child2 may either be an int or
+ // a boxed double
+
+ if (!isKnownInteger(node.child2())) {
+ if (isKnownNumeric(node.child2()))
+ child2NotInt2 = m_jit.branch32(MacroAssembler::NotEqual, arg2TagGPR, TrustedImm32(JSValue::Int32Tag));
+ else {
+ ASSERT(op == ValueAdd);
+ JITCompiler::Jump child2IsInt = m_jit.branch32(MacroAssembler::Equal, arg2TagGPR, TrustedImm32(JSValue::Int32Tag));
+ notNumbers.append(m_jit.branch32(MacroAssembler::AboveOrEqual, arg2TagGPR, TrustedImm32(JSValue::LowestTag)));
+ child2NotInt2 = m_jit.jump();
+ child2IsInt.link(&m_jit);
+ }
+ }
+
+ // child 2 is definitely an integer
+ m_jit.convertInt32ToDouble(arg2PayloadGPR, tmp2FPR);
+
+ haveFPRArguments.append(m_jit.jump());
+ }
+
+ if (!isKnownInteger(node.child2())) {
+ child2NotInt.link(&m_jit);
+
+ if (!isKnownNumeric(node.child2())) {
+ ASSERT(op == ValueAdd);
+ notNumbers.append(m_jit.branch32(MacroAssembler::AboveOrEqual, arg2TagGPR, TrustedImm32(JSValue::LowestTag)));
+ }
+
+ // child1 is definitely an integer, and child 2 is definitely not
+ m_jit.convertInt32ToDouble(arg1PayloadGPR, tmp1FPR);
+
+ if (child2NotInt2.isSet())
+ child2NotInt2.link(&m_jit);
+
+ if (arg2.isDouble())
+ m_jit.moveDouble(arg2.fpr(), tmp2FPR);
+ else
+ unboxDouble(arg2TagGPR, arg2PayloadGPR, tmp2FPR, m_jit.graph()[arg2.index()].virtualRegister());
+ }
+
+ haveFPRArguments.link(&m_jit);
+
+ switch (op) {
+ case ValueAdd:
+ case ArithAdd:
+ m_jit.addDouble(tmp2FPR, tmp1FPR);
+ break;
+
+ case ArithSub:
+ m_jit.subDouble(tmp2FPR, tmp1FPR);
+ break;
+
+ case ArithMul:
+ m_jit.mulDouble(tmp2FPR, tmp1FPR);
+ break;
+
+ default:
+ ASSERT_NOT_REACHED();
+ }
+
+ JITCompiler::Jump doneCaseConvertedToInt;
+
+ if (op == ValueAdd) {
+ JITCompiler::JumpList failureCases;
+ m_jit.branchConvertDoubleToInt32(tmp1FPR, resultPayloadGPR, failureCases, tmp2FPR);
+ m_jit.move(TrustedImm32(JSValue::Int32Tag), resultTagGPR);
+
+ doneCaseConvertedToInt = m_jit.jump();
+
+ failureCases.link(&m_jit);
+ }
+
+ boxDouble(tmp1FPR, resultTagGPR, resultPayloadGPR, m_jit.graph()[m_compileIndex].virtualRegister());
+
+ if (!notNumbers.empty()) {
+ ASSERT(op == ValueAdd);
+
+ JITCompiler::Jump doneCaseWasNumber = m_jit.jump();
+
+ notNumbers.link(&m_jit);
+
+ silentSpillAllRegisters(resultTagGPR, resultPayloadGPR);
+ m_jit.push(arg2TagGPR);
+ m_jit.push(arg2PayloadGPR);
+ m_jit.push(arg1TagGPR);
+ m_jit.push(arg1PayloadGPR);
+ m_jit.push(GPRInfo::callFrameRegister);
+ appendCallWithExceptionCheck(operationValueAddNotNumber);
+ setupResults(resultTagGPR, resultPayloadGPR);
+ silentFillAllRegisters(resultTagGPR, resultPayloadGPR);
+
+ doneCaseWasNumber.link(&m_jit);
+ }
+
+ done.link(&m_jit);
+ if (doneCaseConvertedToInt.isSet())
+ doneCaseConvertedToInt.link(&m_jit);
+
+ jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly);
+}
+
+void JITCodeGenerator::nonSpeculativeArithMod(Node& node)
+{
+ JSValueOperand op1(this, node.child1());
+ JSValueOperand op2(this, node.child2());
+ GPRTemporary eax(this, X86Registers::eax);
+ GPRTemporary edx(this, X86Registers::edx);
+
+ FPRTemporary op1Double(this);
+ FPRTemporary op2Double(this);
+
+ GPRReg op1TagGPR = op1.tagGPR();
+ GPRReg op1PayloadGPR = op1.payloadGPR();
+ GPRReg op2TagGPR = op2.tagGPR();
+ GPRReg op2PayloadGPR = op2.payloadGPR();
+
+ op1.use();
+ op2.use();
+
+ GPRReg temp2 = InvalidGPRReg;
+ if (op2PayloadGPR == X86Registers::eax || op2PayloadGPR == X86Registers::edx) {
+ temp2 = allocate();
+ m_jit.move(op2PayloadGPR, temp2);
+ op2PayloadGPR = temp2;
+ }
+
+ JITCompiler::JumpList done;
+ JITCompiler::JumpList slow;
+ JITCompiler::Jump modByZero;
+
+ if (!isKnownInteger(node.child1()))
+ slow.append(m_jit.branch32(MacroAssembler::NotEqual, op1TagGPR, TrustedImm32(JSValue::Int32Tag)));
+ if (!isKnownInteger(node.child2()))
+ slow.append(m_jit.branch32(MacroAssembler::NotEqual, op2TagGPR, TrustedImm32(JSValue::Int32Tag)));
+
+ modByZero = m_jit.branchTest32(MacroAssembler::Zero, op2PayloadGPR);
+
+ m_jit.move(op1PayloadGPR, eax.gpr());
+ m_jit.assembler().cdq();
+ m_jit.assembler().idivl_r(op2PayloadGPR);
+
+ m_jit.move(TrustedImm32(JSValue::Int32Tag), X86Registers::eax);
+ done.append(m_jit.jump());
+
+ modByZero.link(&m_jit);
+ m_jit.move(MacroAssembler::TrustedImm32(jsNumber(std::numeric_limits<double>::quiet_NaN()).tag()), X86Registers::eax);
+ m_jit.move(MacroAssembler::TrustedImm32(jsNumber(std::numeric_limits<double>::quiet_NaN()).payload()), X86Registers::edx);
+ done.append(m_jit.jump());
+
+ if (!isKnownInteger(node.child1()) || !isKnownInteger(node.child2())) {
+ slow.link(&m_jit);
+ silentSpillAllRegisters(X86Registers::eax, X86Registers::edx);
+ m_jit.push(op2TagGPR);
+ m_jit.push(op2PayloadGPR);
+ m_jit.push(op1TagGPR);
+ m_jit.push(op1PayloadGPR);
+ m_jit.push(GPRInfo::callFrameRegister);
+ appendCallWithExceptionCheck(operationArithMod);
+ setupResults(X86Registers::eax, X86Registers::edx);
+ silentFillAllRegisters(X86Registers::eax, X86Registers::edx);
+ }
+
+ done.link(&m_jit);
+
+ if (temp2 != InvalidGPRReg)
+ unlock(temp2);
+
+ jsValueResult(X86Registers::eax, X86Registers::edx, m_compileIndex, UseChildrenCalledExplicitly);
+}
+
+void JITCodeGenerator::nonSpeculativeCheckHasInstance(Node& node)
+{
+ JSValueOperand base(this, node.child1());
+ GPRTemporary structure(this);
+ GPRReg baseTagReg = base.tagGPR();
+ GPRReg basePayloadReg = base.payloadGPR();
+ GPRReg structureReg = structure.gpr();
+
+ // Check that base is a cell.
+ MacroAssembler::Jump baseNotCell = m_jit.branch32(MacroAssembler::NotEqual, baseTagReg, TrustedImm32(JSValue::CellTag));
+
+ // Check that base 'ImplementsHasInstance'.
+ m_jit.loadPtr(MacroAssembler::Address(basePayloadReg, JSCell::structureOffset()), structureReg);
+ MacroAssembler::Jump implementsHasInstance = m_jit.branchTest8(MacroAssembler::NonZero, MacroAssembler::Address(structureReg, Structure::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(ImplementsHasInstance));
+
+ // At this point we always throw, so no need to preserve registers.
+ baseNotCell.link(&m_jit);
+ m_jit.push(baseTagReg); // tag
+ m_jit.push(basePayloadReg); // payload
+ m_jit.push(GPRInfo::callFrameRegister);
+ // At some point we could optimize this to plant a direct jump, rather then checking
+ // for an exception (operationThrowHasInstanceError always throws). Probably not worth
+ // adding the extra interface to do this now, but we may also want this for op_throw.
+ appendCallWithExceptionCheck(operationThrowHasInstanceError);
+
+ implementsHasInstance.link(&m_jit);
+ noResult(m_compileIndex);
+}
+
+void JITCodeGenerator::nonSpeculativeInstanceOf(Node& node)
+{
+ // FIXME: Currently we flush all registers as the number of available registers
+ // does not meet our requirement.
+ flushRegisters();
+ GPRTemporary value(this);
+ GPRTemporary base(this);
+ GPRTemporary prototype(this);
+ GPRTemporary scratch(this);
+
+ GPRReg valueReg = value.gpr();
+ GPRReg baseReg = base.gpr();
+ GPRReg prototypeReg = prototype.gpr();
+ GPRReg scratchReg = scratch.gpr();
+
+ use(node.child3());
+ use(node.child1());
+ use(node.child2());
+
+ // Check that operands are cells (base is checked by CheckHasInstance, so we can just assert).
+ m_jit.emitLoadTag(node.child3(), valueReg);
+ MacroAssembler::Jump valueNotCell = m_jit.branch32(MacroAssembler::NotEqual, valueReg, TrustedImm32(JSValue::CellTag));
+ m_jit.emitLoadTag(node.child1(), baseReg);
+ m_jit.jitAssertIsCell(baseReg);
+ m_jit.emitLoadTag(node.child2(), prototypeReg);
+ MacroAssembler::Jump prototypeNotCell = m_jit.branch32(MacroAssembler::NotEqual, prototypeReg, TrustedImm32(JSValue::CellTag));
+
+ // Check that baseVal 'ImplementsDefaultHasInstance'.
+ m_jit.emitLoadPayload(node.child1(), baseReg);
+ m_jit.loadPtr(MacroAssembler::Address(baseReg, JSCell::structureOffset()), scratchReg);
+ MacroAssembler::Jump notDefaultHasInstance = m_jit.branchTest8(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, Structure::typeInfoFlagsOffset()), TrustedImm32(ImplementsDefaultHasInstance));
+
+ // Check that prototype is an object
+ m_jit.emitLoadPayload(node.child2(), prototypeReg);
+ m_jit.loadPtr(MacroAssembler::Address(prototypeReg, JSCell::structureOffset()), scratchReg);
+ // MacroAssembler::Jump protoNotObject = m_jit.branch8(MacroAssembler::NotEqual, MacroAssembler::Address(scratchReg, Structure::typeInfoTypeOffset()), MacroAssembler::TrustedImm32(ObjectType));
+ MacroAssembler::Jump protoNotObject = m_jit.branchIfNotObject(scratchReg);
+
+ // Initialize scratchReg with the value being checked.
+ m_jit.emitLoadPayload(node.child3(), valueReg);
+ m_jit.move(valueReg, scratchReg);
+
+ // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
+ MacroAssembler::Label loop(&m_jit);
+ m_jit.loadPtr(MacroAssembler::Address(scratchReg, JSCell::structureOffset()), scratchReg);
+ m_jit.load32(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), scratchReg);
+ MacroAssembler::Jump isInstance = m_jit.branch32(MacroAssembler::Equal, scratchReg, prototypeReg);
+ m_jit.branchTest32(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
+
+ // No match - result is false.
+ m_jit.move(TrustedImm32(0), GPRInfo::returnValueGPR);
+ MacroAssembler::Jump wasNotInstance = m_jit.jump();
+
+ // Link to here if any checks fail that require us to try calling out to an operation to help,
+ // e.g. for an API overridden HasInstance.
+ valueNotCell.link(&m_jit);
+ prototypeNotCell.link(&m_jit);
+ notDefaultHasInstance.link(&m_jit);
+ protoNotObject.link(&m_jit);
+
+ // FIXME: ld/st should be reduced if carefully arranged.
+ m_jit.emitLoadTag(node.child2(), prototypeReg);
+ m_jit.push(prototypeReg);
+ m_jit.emitLoadPayload(node.child2(), prototypeReg);
+ m_jit.push(prototypeReg);
+ m_jit.emitLoadTag(node.child1(), baseReg);
+ m_jit.push(baseReg);
+ m_jit.emitLoadPayload(node.child1(), baseReg);
+ m_jit.push(baseReg);
+ m_jit.emitLoadTag(node.child3(), valueReg);
+ m_jit.push(valueReg);
+ m_jit.emitLoadPayload(node.child3(), valueReg);
+ m_jit.push(valueReg);
+ m_jit.push(GPRInfo::callFrameRegister);
+ appendCallWithExceptionCheck(operationInstanceOf);
+ MacroAssembler::Jump wasNotDefaultHasInstance = m_jit.jump();
+
+ isInstance.link(&m_jit);
+ m_jit.move(TrustedImm32(1), GPRInfo::returnValueGPR);
+
+ wasNotInstance.link(&m_jit);
+ m_jit.move(TrustedImm32(JSValue::BooleanTag), GPRInfo::returnValueGPR2);
+
+ wasNotDefaultHasInstance.link(&m_jit);
+ jsValueResult(GPRInfo::returnValueGPR2, GPRInfo::returnValueGPR, m_compileIndex, UseChildrenCalledExplicitly);
+}
+
+JITCompiler::Call JITCodeGenerator::cachedGetById(GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget, NodeType nodeType)
+{
+ JITCompiler::DataLabelPtr structureToCompare;
+ JITCompiler::Jump structureCheck = m_jit.branchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(basePayloadGPR, JSCell::structureOffset()), structureToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(-1)));
+
+ m_jit.loadPtr(JITCompiler::Address(basePayloadGPR, JSObject::offsetOfPropertyStorage()), resultPayloadGPR);
+ JITCompiler::DataLabelCompact loadWithPatch = m_jit.loadPtrWithCompactAddressOffsetPatch(JITCompiler::Address(resultPayloadGPR, 0), resultPayloadGPR);
+ m_jit.move(TrustedImm32(JSValue::CellTag), resultTagGPR);
+
+ JITCompiler::Jump done = m_jit.jump();
+
+ structureCheck.link(&m_jit);
+
+ if (slowPathTarget.isSet())
+ slowPathTarget.link(&m_jit);
+
+ JITCompiler::Label slowCase = m_jit.label();
+
+ silentSpillAllRegisters(resultTagGPR, resultPayloadGPR);
+ m_jit.push(JITCompiler::TrustedImm32(reinterpret_cast<int>(identifier(identifierNumber))));
+ m_jit.push(TrustedImm32(JSValue::CellTag));
+ m_jit.push(basePayloadGPR);
+ m_jit.push(GPRInfo::callFrameRegister);
+ JITCompiler::Call functionCall;
+ switch (nodeType) {
+ case GetById:
+ functionCall = appendCallWithExceptionCheck(operationGetByIdOptimize);
+ break;
+
+ case GetMethod:
+ functionCall = appendCallWithExceptionCheck(operationGetMethodOptimize);
+ break;
+
+ default:
+ ASSERT_NOT_REACHED();
+ return JITCompiler::Call();
+ }
+ setupResults(resultTagGPR, resultPayloadGPR);
+ silentFillAllRegisters(resultTagGPR, resultPayloadGPR);
+
+ done.link(&m_jit);
+
+ JITCompiler::Label doneLabel = m_jit.label();
+
+ int16_t checkImmToCall = safeCast<int16_t>(m_jit.differenceBetween(structureToCompare, functionCall));
+ int16_t callToCheck = safeCast<int16_t>(m_jit.differenceBetween(functionCall, structureCheck));
+ int16_t callToLoad = safeCast<int16_t>(m_jit.differenceBetween(functionCall, loadWithPatch));
+ int16_t callToSlowCase = safeCast<int16_t>(m_jit.differenceBetween(functionCall, slowCase));
+ int16_t callToDone = safeCast<int16_t>(m_jit.differenceBetween(functionCall, doneLabel));
+
+ m_jit.addPropertyAccess(functionCall, checkImmToCall, callToCheck, callToLoad, callToSlowCase, callToDone, safeCast<int8_t>(basePayloadGPR), safeCast<int8_t>(resultTagGPR), safeCast<int8_t>(resultPayloadGPR), safeCast<int8_t>(scratchGPR));
+
+ return functionCall;
+}
+
+void JITCodeGenerator::writeBarrier(MacroAssembler& jit, GPRReg owner, GPRReg scratch1, GPRReg scratch2, WriteBarrierUseKind useKind)
+{
+ UNUSED_PARAM(jit);
+ UNUSED_PARAM(owner);
+ UNUSED_PARAM(scratch1);
+ UNUSED_PARAM(scratch2);
+ UNUSED_PARAM(useKind);
+ ASSERT(owner != scratch1);
+ ASSERT(owner != scratch2);
+ ASSERT(scratch1 != scratch2);
+
+#if ENABLE(WRITE_BARRIER_PROFILING)
+ JITCompiler::emitCount(jit, WriteBarrierCounters::jitCounterFor(useKind));
+#endif
+ markCellCard(jit, owner, scratch1, scratch2);
+}
+
+void JITCodeGenerator::markCellCard(MacroAssembler& jit, GPRReg owner, GPRReg scratch1, GPRReg scratch2)
+{
+ UNUSED_PARAM(jit);
+ UNUSED_PARAM(owner);
+ UNUSED_PARAM(scratch1);
+ UNUSED_PARAM(scratch2);
+
+#if ENABLE(GGC)
+ jit.move(owner, scratch1);
+ jit.andPtr(TrustedImm32(static_cast<int32_t>(MarkedBlock::blockMask)), scratch1);
+ jit.move(owner, scratch2);
+ jit.andPtr(TrustedImm32(static_cast<int32_t>(~MarkedBlock::blockMask)), scratch2);
+ jit.rshift32(TrustedImm32(MarkedBlock::log2CardSize), scratch2);
+ jit.store8(TrustedImm32(1), MacroAssembler::BaseIndex(scratch1, scratch2, MacroAssembler::TimesOne, MarkedBlock::offsetOfCards()));
+#endif
+}
+
+void JITCodeGenerator::writeBarrier(GPRReg ownerGPR, GPRReg valueGPR, NodeIndex valueIndex, WriteBarrierUseKind useKind, GPRReg scratch1, GPRReg scratch2)
+{
+ UNUSED_PARAM(ownerGPR);
+ UNUSED_PARAM(valueGPR);
+ UNUSED_PARAM(scratch1);
+ UNUSED_PARAM(scratch2);
+ UNUSED_PARAM(useKind);
+
+ if (isKnownNotCell(valueIndex))
+ return;
+
+#if ENABLE(WRITE_BARRIER_PROFILING)
+ JITCompiler::emitCount(jit, WriteBarrierCounters::jitCounterFor(useKind));
+#endif
+
+#if ENABLE(GGC)
+ JITCompiler::Jump rhsNotCell;
+ bool hadCellCheck = false;
+ if (!isKnownCell(valueIndex) && !isCellPrediction(m_jit.graph().getPrediction(m_jit.graph()[valueIndex]))) {
+ hadCellCheck = true;
+ rhsNotCell = m_jit.branch32(MacroAssembler::NotEqual, valueGPR, TrustedImm32(JSValue::CellTag));
+ }
+
+ GPRTemporary temp1;
+ GPRTemporary temp2;
+ if (scratch1 == InvalidGPRReg) {
+ GPRTemporary scratchGPR(this);
+ temp1.adopt(scratchGPR);
+ scratch1 = temp1.gpr();
+ }
+ if (scratch2 == InvalidGPRReg) {
+ GPRTemporary scratchGPR(this);
+ temp2.adopt(scratchGPR);
+ scratch2 = temp2.gpr();
+ }
+
+ markCellCard(m_jit, ownerGPR, scratch1, scratch2);
+ if (hadCellCheck)
+ rhsNotCell.link(&m_jit);
+#endif
+}
+
+void JITCodeGenerator::writeBarrier(JSCell* owner, GPRReg valueGPR, NodeIndex valueIndex, WriteBarrierUseKind useKind, GPRReg scratch)
+{
+ UNUSED_PARAM(owner);
+ UNUSED_PARAM(valueGPR);
+ UNUSED_PARAM(scratch);
+ UNUSED_PARAM(useKind);
+
+ if (isKnownNotCell(valueIndex))
+ return;
+
+#if ENABLE(WRITE_BARRIER_PROFILING)
+ JITCompiler::emitCount(jit, WriteBarrierCounters::jitCounterFor(useKind));
+#endif
+
+#if ENABLE(GGC)
+ JITCompiler::Jump rhsNotCell;
+ bool hadCellCheck = false;
+ if (!isKnownCell(valueIndex) && !isCellPrediction(m_jit.graph().getPrediction(m_jit.graph()[valueIndex]))) {
+ hadCellCheck = true;
+ rhsNotCell = m_jit.branch32(MacroAssembler::NotEqual, valueGPR, TrustedImm32(JSValue::CellTag));
+ }
+
+ GPRTemporary temp;
+ if (scratch == InvalidGPRReg) {
+ GPRTemporary scratchGPR(this);
+ temp.adopt(scratchGPR);
+ scratch = temp.gpr();
+ }
+
+ uint8_t* cardAddress = Heap::addressOfCardFor(owner);
+ m_jit.move(JITCompiler::TrustedImmPtr(cardAddress), scratch);
+ m_jit.store8(JITCompiler::TrustedImm32(1), JITCompiler::Address(scratch));
+
+ if (hadCellCheck)
+ rhsNotCell.link(&m_jit);
+#endif
+}
+
+void JITCodeGenerator::cachedPutById(GPRReg basePayloadGPR, GPRReg valueTagGPR, GPRReg valuePayloadGPR, NodeIndex valueIndex, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget)
+{
+ JITCompiler::DataLabelPtr structureToCompare;
+ JITCompiler::Jump structureCheck = m_jit.branchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(basePayloadGPR, JSCell::structureOffset()), structureToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(-1)));
+
+ writeBarrier(basePayloadGPR, valueTagGPR, valueIndex, WriteBarrierForPropertyAccess, scratchGPR);
+
+ m_jit.loadPtr(JITCompiler::Address(basePayloadGPR, JSObject::offsetOfPropertyStorage()), scratchGPR);
+ JITCompiler::DataLabel32 storeWithPatch = m_jit.storePtrWithAddressOffsetPatch(valuePayloadGPR, JITCompiler::Address(scratchGPR, 0));
+
+ JITCompiler::Jump done = m_jit.jump();
+
+ structureCheck.link(&m_jit);
+
+ if (slowPathTarget.isSet())
+ slowPathTarget.link(&m_jit);
+
+ JITCompiler::Label slowCase = m_jit.label();
+
+ silentSpillAllRegisters(InvalidGPRReg);
+ m_jit.push(JITCompiler::TrustedImm32(reinterpret_cast<int>(identifier(identifierNumber))));
+ m_jit.push(TrustedImm32(JSValue::CellTag));
+ m_jit.push(basePayloadGPR);
+ m_jit.push(valueTagGPR);
+ m_jit.push(valuePayloadGPR);
+ m_jit.push(GPRInfo::callFrameRegister);
+ V_DFGOperation_EJJI optimizedCall;
+ if (m_jit.codeBlock()->isStrictMode()) {
+ if (putKind == Direct)
+ optimizedCall = operationPutByIdDirectStrictOptimize;
+ else
+ optimizedCall = operationPutByIdStrictOptimize;
+ } else {
+ if (putKind == Direct)
+ optimizedCall = operationPutByIdDirectNonStrictOptimize;
+ else
+ optimizedCall = operationPutByIdNonStrictOptimize;
+ }
+ JITCompiler::Call functionCall = appendCallWithExceptionCheck(optimizedCall);
+ silentFillAllRegisters(InvalidGPRReg);
+
+ done.link(&m_jit);
+ JITCompiler::Label doneLabel = m_jit.label();
+
+ int16_t checkImmToCall = safeCast<int16_t>(m_jit.differenceBetween(structureToCompare, functionCall));
+ int16_t callToCheck = safeCast<int16_t>(m_jit.differenceBetween(functionCall, structureCheck));
+ int16_t callToStore = safeCast<int16_t>(m_jit.differenceBetween(functionCall, storeWithPatch));
+ int16_t callToSlowCase = safeCast<int16_t>(m_jit.differenceBetween(functionCall, slowCase));
+ int16_t callToDone = safeCast<int16_t>(m_jit.differenceBetween(functionCall, doneLabel));
+
+ m_jit.addPropertyAccess(functionCall, checkImmToCall, callToCheck, callToStore, callToSlowCase, callToDone, safeCast<int8_t>(basePayloadGPR), safeCast<int8_t>(valueTagGPR), safeCast<int8_t>(valuePayloadGPR), safeCast<int8_t>(scratchGPR));
+}
+
+void JITCodeGenerator::cachedGetMethod(GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget)
+{
+ JITCompiler::Call slowCall;
+ JITCompiler::DataLabelPtr structToCompare, protoObj, protoStructToCompare, putFunction;
+
+ // m_jit.emitLoadPayload(baseIndex, scratchGPR);
+ JITCompiler::Jump wrongStructure = m_jit.branchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(basePayloadGPR, JSCell::structureOffset()), structToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(-1)));
+ protoObj = m_jit.moveWithPatch(JITCompiler::TrustedImmPtr(0), resultPayloadGPR);
+ JITCompiler::Jump wrongProtoStructure = m_jit.branchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(resultPayloadGPR, JSCell::structureOffset()), protoStructToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(-1)));
+
+ putFunction = m_jit.moveWithPatch(JITCompiler::TrustedImmPtr(0), resultPayloadGPR);
+
+ JITCompiler::Jump done = m_jit.jump();
+
+ wrongStructure.link(&m_jit);
+ wrongProtoStructure.link(&m_jit);
+
+ slowCall = cachedGetById(basePayloadGPR, resultTagGPR, resultPayloadGPR, scratchGPR, identifierNumber, slowPathTarget, GetMethod);
+
+ done.link(&m_jit);
+
+ m_jit.addMethodGet(slowCall, structToCompare, protoObj, protoStructToCompare, putFunction);
+}
+
+void JITCodeGenerator::nonSpeculativeNonPeepholeCompareNull(NodeIndex operand, bool invert)
+{
+ JSValueOperand arg(this, operand);
+ GPRReg argTagGPR = arg.tagGPR();
+ GPRReg argPayloadGPR = arg.payloadGPR();
+
+ GPRTemporary resultTag(this, arg);
+ GPRTemporary resultPayload(this, arg, false);
+ GPRReg resultTagGPR = resultTag.gpr();
+ GPRReg resultPayloadGPR = resultPayload.gpr();
+
+ JITCompiler::Jump notCell;
+ if (!isKnownCell(operand))
+ notCell = m_jit.branch32(MacroAssembler::NotEqual, argTagGPR, TrustedImm32(JSValue::CellTag));
+
+ m_jit.loadPtr(JITCompiler::Address(argPayloadGPR, JSCell::structureOffset()), resultPayloadGPR);
+ m_jit.test8(invert ? JITCompiler::Zero : JITCompiler::NonZero, JITCompiler::Address(resultPayloadGPR, Structure::typeInfoFlagsOffset()), JITCompiler::TrustedImm32(MasqueradesAsUndefined), resultPayloadGPR);
+
+ if (!isKnownCell(operand)) {
+ JITCompiler::JumpList done;
+ done.append(m_jit.jump());
+
+ notCell.link(&m_jit);
+ // null or undefined?
+ JITCompiler::Jump checkUndefined = m_jit.branch32(invert ? JITCompiler::Equal: JITCompiler::NotEqual, argTagGPR, JITCompiler::TrustedImm32(JSValue::NullTag));
+ m_jit.move(JITCompiler::TrustedImm32(1), resultPayloadGPR);
+ done.append(m_jit.jump());
+
+ checkUndefined.link(&m_jit);
+ m_jit.compare32(invert ? JITCompiler::NotEqual: JITCompiler::Equal, argTagGPR, JITCompiler::TrustedImm32(JSValue::UndefinedTag), resultPayloadGPR);
+
+ done.link(&m_jit);
+ }
+
+ m_jit.move(TrustedImm32(JSValue::BooleanTag), resultTagGPR);
+ jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, DataFormatJSBoolean);
+}
+
+void JITCodeGenerator::nonSpeculativePeepholeBranchNull(NodeIndex operand, NodeIndex branchNodeIndex, bool invert)
+{
+ Node& branchNode = m_jit.graph()[branchNodeIndex];
+ BlockIndex taken = m_jit.graph().blockIndexForBytecodeOffset(branchNode.takenBytecodeOffset());
+ BlockIndex notTaken = m_jit.graph().blockIndexForBytecodeOffset(branchNode.notTakenBytecodeOffset());
+
+ if (taken == (m_block + 1)) {
+ invert = !invert;
+ BlockIndex tmp = taken;
+ taken = notTaken;
+ notTaken = tmp;
+ }
+
+ JSValueOperand arg(this, operand);
+ GPRReg argTagGPR = arg.tagGPR();
+ GPRReg argPayloadGPR = arg.payloadGPR();
+
+ GPRTemporary result(this, arg);
+ GPRReg resultGPR = result.gpr();
+
+ JITCompiler::Jump notCell;
+
+ if (!isKnownCell(operand))
+ notCell = m_jit.branch32(MacroAssembler::NotEqual, argTagGPR, TrustedImm32(JSValue::CellTag));
+
+ m_jit.loadPtr(JITCompiler::Address(argPayloadGPR, JSCell::structureOffset()), resultGPR);
+ addBranch(m_jit.branchTest8(invert ? JITCompiler::Zero : JITCompiler::NonZero, JITCompiler::Address(resultGPR, Structure::typeInfoFlagsOffset()), JITCompiler::TrustedImm32(MasqueradesAsUndefined)), taken);
+
+ if (!isKnownCell(operand)) {
+ addBranch(m_jit.jump(), notTaken);
+
+ notCell.link(&m_jit);
+ // null or undefined?
+ addBranch(m_jit.branch32(invert ? JITCompiler::NotEqual: JITCompiler::Equal, argTagGPR, JITCompiler::TrustedImm32(JSValue::NullTag)), taken);
+ addBranch(m_jit.branch32(invert ? JITCompiler::NotEqual: JITCompiler::Equal, argTagGPR, JITCompiler::TrustedImm32(JSValue::UndefinedTag)), taken);
+ }
+
+ if (notTaken != (m_block + 1))
+ addBranch(m_jit.jump(), notTaken);
+}
+
+bool JITCodeGenerator::nonSpeculativeCompareNull(Node& node, NodeIndex operand, bool invert)
+{
+ NodeIndex branchNodeIndex = detectPeepHoleBranch();
+ if (branchNodeIndex != NoNode) {
+ ASSERT(node.adjustedRefCount() == 1);
+
+ nonSpeculativePeepholeBranchNull(operand, branchNodeIndex, invert);
+
+ use(node.child1());
+ use(node.child2());
+ m_compileIndex = branchNodeIndex;
+
+ return true;
+ }
+
+ nonSpeculativeNonPeepholeCompareNull(operand, invert);
+
+ return false;
+}
+
+void JITCodeGenerator::nonSpeculativePeepholeBranch(Node& node, NodeIndex branchNodeIndex, MacroAssembler::RelationalCondition cond, Z_DFGOperation_EJJ helperFunction)
+{
+ Node& branchNode = m_jit.graph()[branchNodeIndex];
+ BlockIndex taken = m_jit.graph().blockIndexForBytecodeOffset(branchNode.takenBytecodeOffset());
+ BlockIndex notTaken = m_jit.graph().blockIndexForBytecodeOffset(branchNode.notTakenBytecodeOffset());
+
+ JITCompiler::ResultCondition callResultCondition = JITCompiler::NonZero;
+
+ // The branch instruction will branch to the taken block.
+ // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
+ if (taken == (m_block + 1)) {
+ cond = JITCompiler::invert(cond);
+ callResultCondition = JITCompiler::Zero;
+ BlockIndex tmp = taken;
+ taken = notTaken;
+ notTaken = tmp;
+ }
+
+ JSValueOperand arg1(this, node.child1());
+ JSValueOperand arg2(this, node.child2());
+ GPRReg arg1TagGPR = arg1.tagGPR();
+ GPRReg arg1PayloadGPR = arg1.payloadGPR();
+ GPRReg arg2TagGPR = arg2.tagGPR();
+ GPRReg arg2PayloadGPR = arg2.payloadGPR();
+
+ JITCompiler::JumpList slowPath;
+
+ if (isKnownNotInteger(node.child1()) || isKnownNotInteger(node.child2())) {
+ GPRResult result(this);
+ GPRReg resultGPR = result.gpr();
+
+ arg1.use();
+ arg2.use();
+
+ flushRegisters();
+
+ callOperation(helperFunction, resultGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR);
+ addBranch(m_jit.branchTest8(callResultCondition, resultGPR), taken);
+ } else {
+ GPRTemporary result(this);
+ GPRReg resultGPR = result.gpr();
+
+ arg1.use();
+ arg2.use();
+
+ if (!isKnownInteger(node.child1()))
+ slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, arg1TagGPR, JITCompiler::TrustedImm32(JSValue::Int32Tag)));
+ if (!isKnownInteger(node.child2()))
+ slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, arg2TagGPR, JITCompiler::TrustedImm32(JSValue::Int32Tag)));
+
+ addBranch(m_jit.branch32(cond, arg1PayloadGPR, arg2PayloadGPR), taken);
+
+ if (!isKnownInteger(node.child1()) || !isKnownInteger(node.child2())) {
+ addBranch(m_jit.jump(), notTaken);
+
+ slowPath.link(&m_jit);
+
+ silentSpillAllRegisters(resultGPR);
+ m_jit.push(arg2TagGPR);
+ m_jit.push(arg2PayloadGPR);
+ m_jit.push(arg1TagGPR);
+ m_jit.push(arg1PayloadGPR);
+ m_jit.push(GPRInfo::callFrameRegister);
+ appendCallWithExceptionCheck(helperFunction);
+ m_jit.move(GPRInfo::returnValueGPR, resultGPR);
+ silentFillAllRegisters(resultGPR);
+
+ addBranch(m_jit.branchTest8(callResultCondition, resultGPR), taken);
+ }
+ }
+
+ if (notTaken != (m_block + 1))
+ addBranch(m_jit.jump(), notTaken);
+}
+
+void JITCodeGenerator::nonSpeculativeNonPeepholeCompare(Node& node, MacroAssembler::RelationalCondition cond, Z_DFGOperation_EJJ helperFunction)
+{
+ JSValueOperand arg1(this, node.child1());
+ JSValueOperand arg2(this, node.child2());
+ GPRReg arg1TagGPR = arg1.tagGPR();
+ GPRReg arg1PayloadGPR = arg1.payloadGPR();
+ GPRReg arg2TagGPR = arg2.tagGPR();
+ GPRReg arg2PayloadGPR = arg2.payloadGPR();
+
+ JITCompiler::JumpList slowPath;
+
+ if (isKnownNotInteger(node.child1()) || isKnownNotInteger(node.child2())) {
+ GPRResult result(this);
+ GPRResult2 result2(this);
+ GPRReg resultPayloadGPR = result.gpr();
+ GPRReg resultTagGPR = result2.gpr();
+
+ arg1.use();
+ arg2.use();
+
+ flushRegisters();
+
+ callOperation(helperFunction, resultPayloadGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR);
+
+ m_jit.move(TrustedImm32(JSValue::BooleanTag), resultTagGPR);
+ jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, DataFormatJSBoolean, UseChildrenCalledExplicitly);
+ } else {
+ GPRTemporary resultTag(this, arg1);
+ GPRTemporary resultPayload(this, arg1, false);
+ GPRReg resultTagGPR = resultPayload.gpr();
+ GPRReg resultPayloadGPR = resultPayload.gpr();
+
+ arg1.use();
+ arg2.use();
+
+ if (!isKnownInteger(node.child1()))
+ slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, arg1TagGPR, JITCompiler::TrustedImm32(JSValue::Int32Tag)));
+ if (!isKnownInteger(node.child2()))
+ slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, arg2TagGPR, JITCompiler::TrustedImm32(JSValue::Int32Tag)));
+
+ m_jit.compare32(cond, arg1PayloadGPR, arg2PayloadGPR, resultPayloadGPR);
+
+ if (!isKnownInteger(node.child1()) || !isKnownInteger(node.child2())) {
+ JITCompiler::Jump haveResult = m_jit.jump();
+
+ slowPath.link(&m_jit);
+
+ silentSpillAllRegisters(resultTagGPR, resultPayloadGPR);
+ m_jit.push(arg2TagGPR);
+ m_jit.push(arg2PayloadGPR);
+ m_jit.push(arg1TagGPR);
+ m_jit.push(arg1PayloadGPR);
+ m_jit.push(GPRInfo::callFrameRegister);
+ appendCallWithExceptionCheck(helperFunction);
+ m_jit.move(GPRInfo::returnValueGPR, resultPayloadGPR);
+ silentFillAllRegisters(resultTagGPR, resultPayloadGPR);
+
+ m_jit.andPtr(TrustedImm32(1), resultPayloadGPR);
+
+ haveResult.link(&m_jit);
+ }
+
+ m_jit.move(TrustedImm32(JSValue::BooleanTag), resultTagGPR);
+ jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, DataFormatJSBoolean, UseChildrenCalledExplicitly);
+ }
+}
+
+bool JITCodeGenerator::nonSpeculativeCompare(Node& node, MacroAssembler::RelationalCondition cond, Z_DFGOperation_EJJ helperFunction)
+{
+ NodeIndex branchNodeIndex = detectPeepHoleBranch();
+ if (branchNodeIndex != NoNode) {
+ ASSERT(node.adjustedRefCount() == 1);
+
+ nonSpeculativePeepholeBranch(node, branchNodeIndex, cond, helperFunction);
+
+ m_compileIndex = branchNodeIndex;
+
+ return true;
+ }
+
+ nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
+
+ return false;
+}
+
+void JITCodeGenerator::nonSpeculativePeepholeStrictEq(Node& node, NodeIndex branchNodeIndex, bool invert)
+{
+ Node& branchNode = m_jit.graph()[branchNodeIndex];
+ BlockIndex taken = m_jit.graph().blockIndexForBytecodeOffset(branchNode.takenBytecodeOffset());
+ BlockIndex notTaken = m_jit.graph().blockIndexForBytecodeOffset(branchNode.notTakenBytecodeOffset());
+
+ // The branch instruction will branch to the taken block.
+ // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
+ if (taken == (m_block + 1)) {
+ invert = !invert;
+ BlockIndex tmp = taken;
+ taken = notTaken;
+ notTaken = tmp;
+ }
+
+ JSValueOperand arg1(this, node.child1());
+ JSValueOperand arg2(this, node.child2());
+ GPRReg arg1TagGPR = arg1.tagGPR();
+ GPRReg arg1PayloadGPR = arg1.payloadGPR();
+ GPRReg arg2TagGPR = arg2.tagGPR();
+ GPRReg arg2PayloadGPR = arg2.payloadGPR();
+
+ GPRTemporary resultPayload(this, arg1, false);
+ GPRReg resultPayloadGPR = resultPayload.gpr();
+
+ arg1.use();
+ arg2.use();
+
+ if (isKnownCell(node.child1()) && isKnownCell(node.child2())) {
+ // see if we get lucky: if the arguments are cells and they reference the same
+ // cell, then they must be strictly equal.
+ addBranch(m_jit.branchPtr(JITCompiler::Equal, arg1PayloadGPR, arg2PayloadGPR), invert ? notTaken : taken);
+
+ silentSpillAllRegisters(resultPayloadGPR);
+ m_jit.push(arg2TagGPR);
+ m_jit.push(arg2PayloadGPR);
+ m_jit.push(arg1TagGPR);
+ m_jit.push(arg1PayloadGPR);
+ m_jit.push(GPRInfo::callFrameRegister);
+ appendCallWithExceptionCheck(operationCompareStrictEqCell);
+ m_jit.move(GPRInfo::returnValueGPR, resultPayloadGPR);
+ silentFillAllRegisters(resultPayloadGPR);
+
+ addBranch(m_jit.branchTest8(invert ? JITCompiler::NonZero : JITCompiler::Zero, resultPayloadGPR), taken);
+ } else {
+ // FIXME: Add fast paths for twoCells, number etc.
+
+ silentSpillAllRegisters(resultPayloadGPR);
+ m_jit.push(arg2TagGPR);
+ m_jit.push(arg2PayloadGPR);
+ m_jit.push(arg1TagGPR);
+ m_jit.push(arg1PayloadGPR);
+ m_jit.push(GPRInfo::callFrameRegister);
+ appendCallWithExceptionCheck(operationCompareStrictEq);
+ m_jit.move(GPRInfo::returnValueGPR, resultPayloadGPR);
+ silentFillAllRegisters(resultPayloadGPR);
+
+ addBranch(m_jit.branchTest8(invert ? JITCompiler::Zero : JITCompiler::NonZero, resultPayloadGPR), taken);
+ }
+
+ if (notTaken != (m_block + 1))
+ addBranch(m_jit.jump(), notTaken);
+}
+
+void JITCodeGenerator::nonSpeculativeNonPeepholeStrictEq(Node& node, bool invert)
+{
+ JSValueOperand arg1(this, node.child1());
+ JSValueOperand arg2(this, node.child2());
+ GPRReg arg1TagGPR = arg1.tagGPR();
+ GPRReg arg1PayloadGPR = arg1.payloadGPR();
+ GPRReg arg2TagGPR = arg2.tagGPR();
+ GPRReg arg2PayloadGPR = arg2.payloadGPR();
+
+ GPRTemporary resultTag(this, arg1);
+ GPRTemporary resultPayload(this, arg1, false);
+ GPRReg resultTagGPR = resultTag.gpr();
+ GPRReg resultPayloadGPR = resultPayload.gpr();
+
+ arg1.use();
+ arg2.use();
+
+ if (isKnownCell(node.child1()) && isKnownCell(node.child2())) {
+ // see if we get lucky: if the arguments are cells and they reference the same
+ // cell, then they must be strictly equal.
+ JITCompiler::Jump notEqualCase = m_jit.branchPtr(JITCompiler::NotEqual, arg1PayloadGPR, arg2PayloadGPR);
+
+ m_jit.move(JITCompiler::TrustedImm32(!invert), resultPayloadGPR);
+ JITCompiler::Jump done = m_jit.jump();
+
+ notEqualCase.link(&m_jit);
+
+ silentSpillAllRegisters(resultTagGPR, resultPayloadGPR);
+ m_jit.push(arg2TagGPR);
+ m_jit.push(arg2PayloadGPR);
+ m_jit.push(arg1TagGPR);
+ m_jit.push(arg1PayloadGPR);
+ m_jit.push(GPRInfo::callFrameRegister);
+ appendCallWithExceptionCheck(operationCompareStrictEqCell);
+ m_jit.move(GPRInfo::returnValueGPR, resultPayloadGPR);
+ silentFillAllRegisters(resultTagGPR, resultPayloadGPR);
+
+ m_jit.andPtr(JITCompiler::TrustedImm32(1), resultPayloadGPR);
+
+ done.link(&m_jit);
+ } else {
+ // FIXME: Add fast paths.
+
+ silentSpillAllRegisters(resultTagGPR, resultPayloadGPR);
+ m_jit.push(arg2TagGPR);
+ m_jit.push(arg2PayloadGPR);
+ m_jit.push(arg1TagGPR);
+ m_jit.push(arg1PayloadGPR);
+ m_jit.push(GPRInfo::callFrameRegister);
+ appendCallWithExceptionCheck(operationCompareStrictEq);
+ m_jit.move(GPRInfo::returnValueGPR, resultPayloadGPR);
+ silentFillAllRegisters(resultTagGPR, resultPayloadGPR);
+
+ m_jit.andPtr(JITCompiler::TrustedImm32(1), resultPayloadGPR);
+ }
+
+ m_jit.move(TrustedImm32(JSValue::BooleanTag), resultTagGPR);
+ jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, DataFormatJSBoolean, UseChildrenCalledExplicitly);
+}
+
+bool JITCodeGenerator::nonSpeculativeStrictEq(Node& node, bool invert)
+{
+ if (!invert && (isKnownNumeric(node.child1()) || isKnownNumeric(node.child2())))
+ return nonSpeculativeCompare(node, MacroAssembler::Equal, operationCompareStrictEq);
+
+ NodeIndex branchNodeIndex = detectPeepHoleBranch();
+ if (branchNodeIndex != NoNode) {
+ ASSERT(node.adjustedRefCount() == 1);
+
+ nonSpeculativePeepholeStrictEq(node, branchNodeIndex, invert);
+
+ m_compileIndex = branchNodeIndex;
+
+ return true;
+ }
+
+ nonSpeculativeNonPeepholeStrictEq(node, invert);
+
+ return false;
+}
+
+void JITCodeGenerator::emitBranch(Node& node)
+{
+ // FIXME: Add fast cases for known Boolean!
+ JSValueOperand value(this, node.child1());
+ value.fill();
+ GPRReg valueTagGPR = value.tagGPR();
+ GPRReg valuePayloadGPR = value.payloadGPR();
+
+ GPRTemporary result(this);
+ GPRReg resultGPR = result.gpr();
+
+ use(node.child1());
+
+ BlockIndex taken = m_jit.graph().blockIndexForBytecodeOffset(node.takenBytecodeOffset());
+ BlockIndex notTaken = m_jit.graph().blockIndexForBytecodeOffset(node.notTakenBytecodeOffset());
+
+ JITCompiler::Jump fastPath = m_jit.branch32(JITCompiler::Equal, valueTagGPR, JITCompiler::TrustedImm32(JSValue::Int32Tag));
+ JITCompiler::Jump slowPath = m_jit.branch32(JITCompiler::NotEqual, valueTagGPR, JITCompiler::TrustedImm32(JSValue::BooleanTag));
+
+ fastPath.link(&m_jit);
+ addBranch(m_jit.branchTest32(JITCompiler::Zero, valuePayloadGPR), notTaken);
+ addBranch(m_jit.jump(), taken);
+
+ slowPath.link(&m_jit);
+ silentSpillAllRegisters(resultGPR);
+ m_jit.push(valueTagGPR);
+ m_jit.push(valuePayloadGPR);
+ m_jit.push(GPRInfo::callFrameRegister);
+ appendCallWithExceptionCheck(dfgConvertJSValueToBoolean);
+ m_jit.move(GPRInfo::returnValueGPR, resultGPR);
+ silentFillAllRegisters(resultGPR);
+
+ addBranch(m_jit.branchTest8(JITCompiler::NonZero, resultGPR), taken);
+ if (notTaken != (m_block + 1))
+ addBranch(m_jit.jump(), notTaken);
+
+ noResult(m_compileIndex, UseChildrenCalledExplicitly);
+}
+
+void JITCodeGenerator::nonSpeculativeLogicalNot(Node& node)
+{
+ JSValueOperand arg1(this, node.child1());
+ GPRTemporary resultTag(this, arg1);
+ GPRTemporary resultPayload(this, arg1, false);
+ GPRReg arg1TagGPR = arg1.tagGPR();
+ GPRReg arg1PayloadGPR = arg1.payloadGPR();
+ GPRReg resultTagGPR = resultTag.gpr();
+ GPRReg resultPayloadGPR = resultPayload.gpr();
+
+ arg1.use();
+
+ JITCompiler::Jump fastCase = m_jit.branch32(JITCompiler::Equal, arg1TagGPR, TrustedImm32(JSValue::BooleanTag));
+
+ silentSpillAllRegisters(resultTagGPR, resultPayloadGPR);
+ m_jit.push(arg1TagGPR);
+ m_jit.push(arg1PayloadGPR);
+ m_jit.push(GPRInfo::callFrameRegister);
+ appendCallWithExceptionCheck(dfgConvertJSValueToBoolean);
+ m_jit.move(GPRInfo::returnValueGPR, resultPayloadGPR);
+ silentFillAllRegisters(resultTagGPR, resultPayloadGPR);
+ JITCompiler::Jump doNot = m_jit.jump();
+
+ fastCase.link(&m_jit);
+ m_jit.move(arg1PayloadGPR, resultPayloadGPR);
+
+ doNot.link(&m_jit);
+ m_jit.xor32(TrustedImm32(1), resultPayloadGPR);
+ m_jit.move(TrustedImm32(JSValue::BooleanTag), resultTagGPR);
+ jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, DataFormatJSBoolean, UseChildrenCalledExplicitly);
+}
+
+void JITCodeGenerator::emitCall(Node& node)
+{
+ // FIXME: It's not supported yet!
+ ASSERT_NOT_REACHED();
+}
+
+void JITCodeGenerator::speculationCheck(MacroAssembler::Jump jumpToFail)
+{
+ ASSERT(m_isSpeculative);
+ static_cast<SpeculativeJIT*>(this)->speculationCheck(jumpToFail);
+}
+
+#ifndef NDEBUG
+static const char* dataFormatString(DataFormat format)
+{
+ // These values correspond to the DataFormat enum.
+ const char* strings[] = {
+ "[ ]",
+ "[ i]",
+ "[ d]",
+ "[ c]",
+ "Err!",
+ "Err!",
+ "Err!",
+ "Err!",
+ "[J ]",
+ "[Ji]",
+ "[Jd]",
+ "[Jc]",
+ "Err!",
+ "Err!",
+ "Err!",
+ "Err!",
+ };
+ return strings[format];
+}
+
+void JITCodeGenerator::dump(const char* label)
+{
+ if (label)
+ fprintf(stderr, "<%s>\n", label);
+
+ fprintf(stderr, " gprs:\n");
+ m_gprs.dump();
+ fprintf(stderr, " fprs:\n");
+ m_fprs.dump();
+ fprintf(stderr, " VirtualRegisters:\n");
+ for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
+ GenerationInfo& info = m_generationInfo[i];
+ if (info.alive())
+ fprintf(stderr, " % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
+ else
+ fprintf(stderr, " % 3d:[__][__]", i);
+ if (info.registerFormat() == DataFormatDouble)
+ fprintf(stderr, ":fpr%d\n", info.fpr());
+ else if (info.registerFormat() != DataFormatNone && !(info.registerFormat() & DataFormatJS)) {
+ ASSERT(info.gpr() != InvalidGPRReg);
+ fprintf(stderr, ":%s\n", GPRInfo::debugName(info.gpr()));
+ } else
+ fprintf(stderr, "\n");
+ }
+ if (label)
+ fprintf(stderr, "</%s>\n", label);
+}
+#endif
+
+
+#if ENABLE(DFG_CONSISTENCY_CHECK)
+void JITCodeGenerator::checkConsistency()
+{
+ bool failed = false;
+
+ for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
+ if (iter.isLocked()) {
+ fprintf(stderr, "DFG_CONSISTENCY_CHECK failed: gpr %s is locked.\n", iter.debugName());
+ failed = true;
+ }
+ }
+ for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
+ if (iter.isLocked()) {
+ fprintf(stderr, "DFG_CONSISTENCY_CHECK failed: fpr %s is locked.\n", iter.debugName());
+ failed = true;
+ }
+ }
+
+ for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
+ VirtualRegister virtualRegister = (VirtualRegister)i;
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+ if (!info.alive())
+ continue;
+ switch (info.registerFormat()) {
+ case DataFormatNone:
+ case DataFormatJS:
+ case DataFormatJSInteger:
+ case DataFormatJSDouble:
+ case DataFormatJSCell:
+ break;
+ case DataFormatInteger:
+ case DataFormatCell: {
+ GPRReg gpr = info.gpr();
+ ASSERT(gpr != InvalidGPRReg);
+ if (m_gprs.name(gpr) != virtualRegister) {
+ fprintf(stderr, "DFG_CONSISTENCY_CHECK failed: name mismatch for virtual register %d (gpr %s).\n", virtualRegister, GPRInfo::debugName(gpr));
+ failed = true;
+ }
+ break;
+ }
+ case DataFormatDouble: {
+ FPRReg fpr = info.fpr();
+ ASSERT(fpr != InvalidFPRReg);
+ if (m_fprs.name(fpr) != virtualRegister) {
+ fprintf(stderr, "DFG_CONSISTENCY_CHECK failed: name mismatch for virtual register %d (fpr %s).\n", virtualRegister, FPRInfo::debugName(fpr));
+ failed = true;
+ }
+ break;
+ }
+ }
+ }
+
+ for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
+ VirtualRegister virtualRegister = iter.name();
+ if (virtualRegister == InvalidVirtualRegister)
+ continue;
+
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+ if (!(info.registerFormat() & DataFormatJS)) {
+ if (iter.regID() != info.gpr()) {
+ fprintf(stderr, "DFG_CONSISTENCY_CHECK failed: name mismatch for gpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
+ failed = true;
+ }
+ } else {
+ if (iter.regID() != info.tagGPR() && iter.regID() != info.payloadGPR()) {
+ fprintf(stderr, "DFG_CONSISTENCY_CHECK failed: name mismatch for gpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
+ failed = true;
+ }
+ }
+ }
+
+ for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
+ VirtualRegister virtualRegister = iter.name();
+ if (virtualRegister == InvalidVirtualRegister)
+ continue;
+
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+ if (iter.regID() != info.fpr()) {
+ fprintf(stderr, "DFG_CONSISTENCY_CHECK failed: name mismatch for fpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
+ failed = true;
+ }
+ }
+
+ if (failed) {
+ dump();
+ CRASH();
+ }
+}
+#endif
+
+GPRTemporary::GPRTemporary()
+ : m_jit(0)
+ , m_gpr(InvalidGPRReg)
+{
+}
+
+GPRTemporary::GPRTemporary(JITCodeGenerator* jit)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+{
+ // we currenty lazily allocate the reg, as the number of regs on X86 is limited.
+}
+
+GPRTemporary::GPRTemporary(JITCodeGenerator* jit, GPRReg specific)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+{
+ m_gpr = m_jit->allocate(specific);
+}
+
+GPRTemporary::GPRTemporary(JITCodeGenerator* jit, SpeculateIntegerOperand& op1)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+{
+ if (m_jit->canReuse(op1.index()))
+ m_gpr = m_jit->reuse(op1.gpr());
+ else
+ m_gpr = m_jit->allocate();
+}
+
+GPRTemporary::GPRTemporary(JITCodeGenerator* jit, SpeculateIntegerOperand& op1, SpeculateIntegerOperand& op2)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+{
+ if (m_jit->canReuse(op1.index()))
+ m_gpr = m_jit->reuse(op1.gpr());
+ else if (m_jit->canReuse(op2.index()))
+ m_gpr = m_jit->reuse(op2.gpr());
+ else
+ m_gpr = m_jit->allocate();
+}
+
+GPRTemporary::GPRTemporary(JITCodeGenerator* jit, SpeculateStrictInt32Operand& op1)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+{
+ if (m_jit->canReuse(op1.index()))
+ m_gpr = m_jit->reuse(op1.gpr());
+ else
+ m_gpr = m_jit->allocate();
+}
+
+GPRTemporary::GPRTemporary(JITCodeGenerator* jit, IntegerOperand& op1)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+{
+ if (m_jit->canReuse(op1.index()))
+ m_gpr = m_jit->reuse(op1.gpr());
+ else
+ m_gpr = m_jit->allocate();
+}
+
+GPRTemporary::GPRTemporary(JITCodeGenerator* jit, IntegerOperand& op1, IntegerOperand& op2)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+{
+ if (m_jit->canReuse(op1.index()))
+ m_gpr = m_jit->reuse(op1.gpr());
+ else if (m_jit->canReuse(op2.index()))
+ m_gpr = m_jit->reuse(op2.gpr());
+ else
+ m_gpr = m_jit->allocate();
+}
+
+GPRTemporary::GPRTemporary(JITCodeGenerator* jit, SpeculateCellOperand& op1)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+{
+ if (m_jit->canReuse(op1.index()))
+ m_gpr = m_jit->reuse(op1.gpr());
+ else
+ m_gpr = m_jit->allocate();
+}
+
+GPRTemporary::GPRTemporary(JITCodeGenerator* jit, SpeculateBooleanOperand& op1)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+{
+ if (m_jit->canReuse(op1.index()))
+ m_gpr = m_jit->reuse(op1.gpr());
+ else
+ m_gpr = m_jit->allocate();
+}
+
+GPRTemporary::GPRTemporary(JITCodeGenerator* jit, JSValueOperand& op1, bool tag)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+{
+ if (!op1.isDouble() && m_jit->canReuse(op1.index()))
+ m_gpr = m_jit->reuse(tag ? op1.tagGPR() : op1.payloadGPR());
+ else
+ m_gpr = m_jit->allocate();
+}
+
+GPRTemporary::GPRTemporary(JITCodeGenerator* jit, StorageOperand& op1)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+{
+ if (m_jit->canReuse(op1.index()))
+ m_gpr = m_jit->reuse(op1.gpr());
+ else
+ m_gpr = m_jit->allocate();
+}
+
+void GPRTemporary::adopt(GPRTemporary& other)
+{
+ ASSERT(!m_jit);
+ ASSERT(m_gpr == InvalidGPRReg);
+ ASSERT(other.m_jit);
+ ASSERT(other.m_gpr != InvalidGPRReg);
+ m_jit = other.m_jit;
+ m_gpr = other.m_gpr;
+ other.m_jit = 0;
+ other.m_gpr = InvalidGPRReg;
+}
+
+FPRTemporary::FPRTemporary(JITCodeGenerator* jit)
+ : m_jit(jit)
+ , m_fpr(InvalidFPRReg)
+{
+ m_fpr = m_jit->fprAllocate();
+}
+
+FPRTemporary::FPRTemporary(JITCodeGenerator* jit, DoubleOperand& op1)
+ : m_jit(jit)
+ , m_fpr(InvalidFPRReg)
+{
+ if (m_jit->canReuse(op1.index()))
+ m_fpr = m_jit->reuse(op1.fpr());
+ else
+ m_fpr = m_jit->fprAllocate();
+}
+
+FPRTemporary::FPRTemporary(JITCodeGenerator* jit, DoubleOperand& op1, DoubleOperand& op2)
+ : m_jit(jit)
+ , m_fpr(InvalidFPRReg)
+{
+ if (m_jit->canReuse(op1.index()))
+ m_fpr = m_jit->reuse(op1.fpr());
+ else if (m_jit->canReuse(op2.index()))
+ m_fpr = m_jit->reuse(op2.fpr());
+ else
+ m_fpr = m_jit->fprAllocate();
+}
+
+FPRTemporary::FPRTemporary(JITCodeGenerator* jit, SpeculateDoubleOperand& op1)
+ : m_jit(jit)
+ , m_fpr(InvalidFPRReg)
+{
+ if (m_jit->canReuse(op1.index()))
+ m_fpr = m_jit->reuse(op1.fpr());
+ else
+ m_fpr = m_jit->fprAllocate();
+}
+
+FPRTemporary::FPRTemporary(JITCodeGenerator* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
+ : m_jit(jit)
+ , m_fpr(InvalidFPRReg)
+{
+ if (m_jit->canReuse(op1.index()))
+ m_fpr = m_jit->reuse(op1.fpr());
+ else if (m_jit->canReuse(op2.index()))
+ m_fpr = m_jit->reuse(op2.fpr());
+ else
+ m_fpr = m_jit->fprAllocate();
+}
+
+FPRTemporary::FPRTemporary(JITCodeGenerator* jit, JSValueOperand& op1)
+ : m_jit(jit)
+ , m_fpr(InvalidFPRReg)
+{
+ if (op1.isDouble() && m_jit->canReuse(op1.index()))
+ m_fpr = m_jit->reuse(op1.fpr());
+ else
+ m_fpr = m_jit->fprAllocate();
+}
+
+} } // namespace JSC::DFG
+
+#endif
+#endif
#include "DFGJITCompiler.h"
#if ENABLE(DFG_JIT)
+#if USE(JSVALUE64)
#include "CodeBlock.h"
#include "DFGJITCodeGenerator.h"
} } // namespace JSC::DFG
#endif
+#endif
JSGlobalData* globalData() { return m_globalData; }
AssemblerType_T& assembler() { return m_assembler; }
-#if CPU(X86_64)
+#if CPU(X86_64) || CPU(X86)
void preserveReturnAddressAfterCall(GPRReg reg)
{
pop(reg);
return Address(global, varNumber * sizeof(Register));
}
+ static Address tagForGlobalVar(GPRReg global, int32_t varNumber)
+ {
+ return Address(global, varNumber * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
+ }
+
+ static Address payloadForGlobalVar(GPRReg global, int32_t varNumber)
+ {
+ return Address(global, varNumber * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
+ }
+
static Address addressFor(VirtualRegister virtualRegister)
{
return Address(GPRInfo::callFrameRegister, virtualRegister * sizeof(Register));
Call appendCallWithExceptionCheck(const FunctionPtr& function, CodeOrigin codeOrigin)
{
Call functionCall = call();
+#if USE(JSVALUE64)
Jump exceptionCheck = branchTestPtr(NonZero, AbsoluteAddress(&globalData()->exception));
+#elif USE(JSVALUE32_64)
+ Jump exceptionCheck = branch32(NotEqual, AbsoluteAddress(reinterpret_cast<char*>(&globalData()->exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag));
+#endif
m_calls.append(CallRecord(functionCall, function, exceptionCheck, codeOrigin));
return functionCall;
}
bool valueOfBooleanConstant(NodeIndex nodeIndex) { return graph().valueOfBooleanConstant(codeBlock(), nodeIndex); }
JSFunction* valueOfFunctionConstant(NodeIndex nodeIndex) { return graph().valueOfFunctionConstant(codeBlock(), nodeIndex); }
+#if USE(JSVALUE32_64)
+ void* addressOfDoubleConstant(NodeIndex nodeIndex)
+ {
+ ASSERT(isNumberConstant(nodeIndex));
+ unsigned constantIndex = graph()[nodeIndex].constantNumber();
+ return &(codeBlock()->constantRegister(FirstConstantRegisterIndex + constantIndex));
+ }
+
+ void emitLoadTag(NodeIndex, GPRReg tag);
+ void emitLoadPayload(NodeIndex, GPRReg payload);
+
+ void emitLoad(const JSValue&, GPRReg tag, GPRReg payload);
+ void emitLoad(NodeIndex, GPRReg tag, GPRReg payload);
+ void emitLoad2(NodeIndex index1, GPRReg tag1, GPRReg payload1, NodeIndex index2, GPRReg tag2, GPRReg payload2);
+
+ void emitLoadDouble(NodeIndex, FPRReg value);
+ void emitLoadInt32ToDouble(NodeIndex, FPRReg value);
+
+ void emitStore(NodeIndex, GPRReg tag, GPRReg payload);
+ void emitStore(NodeIndex, const JSValue constant);
+ void emitStoreInt32(NodeIndex, GPRReg payload, bool indexIsInt32 = false);
+ void emitStoreInt32(NodeIndex, TrustedImm32 payload, bool indexIsInt32 = false);
+ void emitStoreCell(NodeIndex, GPRReg payload, bool indexIsCell = false);
+ void emitStoreBool(NodeIndex, GPRReg payload, bool indexIsBool = false);
+ void emitStoreDouble(NodeIndex, FPRReg value);
+#endif
+
// These methods JIT generate dynamic, debug-only checks - akin to ASSERTs.
#if ENABLE(DFG_JIT_ASSERT)
void jitAssertIsInt32(GPRReg);
#endif
// These methods convert between doubles, and doubles boxed and JSValues.
+#if USE(JSVALUE64)
GPRReg boxDouble(FPRReg fpr, GPRReg gpr)
{
moveDoubleToPtr(fpr, gpr);
movePtrToDouble(gpr, fpr);
return fpr;
}
+#elif USE(JSVALUE32_64)
+ // FIXME: The box/unbox of doubles could be improved without exchanging data through memory,
+ // for example on x86 some SSE instructions can help do this.
+ void boxDouble(FPRReg fpr, GPRReg tagGPR, GPRReg payloadGPR, VirtualRegister virtualRegister)
+ {
+ storeDouble(fpr, addressFor(virtualRegister));
+ load32(tagFor(virtualRegister), tagGPR);
+ load32(payloadFor(virtualRegister), payloadGPR);
+ }
+ void unboxDouble(GPRReg tagGPR, GPRReg payloadGPR, FPRReg fpr, VirtualRegister virtualRegister)
+ {
+ jitAssertIsJSDouble(tagGPR);
+ store32(tagGPR, tagFor(virtualRegister));
+ store32(payloadGPR, payloadFor(virtualRegister));
+ loadDouble(addressFor(virtualRegister), fpr);
+ }
+#endif
#if ENABLE(SAMPLING_COUNTERS)
// Debug profiling tool.
void clearSamplingFlag(int32_t flag);
#endif
+#if USE(JSVALUE64)
void addPropertyAccess(JITCompiler::Call functionCall, int16_t deltaCheckImmToCall, int16_t deltaCallToStructCheck, int16_t deltaCallToLoadOrStore, int16_t deltaCallToSlowCase, int16_t deltaCallToDone, int8_t baseGPR, int8_t valueGPR, int8_t scratchGPR)
{
m_propertyAccesses.append(PropertyAccessRecord(functionCall, deltaCheckImmToCall, deltaCallToStructCheck, deltaCallToLoadOrStore, deltaCallToSlowCase, deltaCallToDone, baseGPR, valueGPR, scratchGPR));
}
-
+#elif USE(JSVALUE32_64)
+ void addPropertyAccess(JITCompiler::Call functionCall, int16_t deltaCheckImmToCall, int16_t deltaCallToStructCheck, int16_t deltaCallToLoadOrStore, int16_t deltaCallToSlowCase, int16_t deltaCallToDone, int8_t baseGPR, int8_t valueTagGPR, int8_t valueGPR, int8_t scratchGPR)
+ {
+ m_propertyAccesses.append(PropertyAccessRecord(functionCall, deltaCheckImmToCall, deltaCallToStructCheck, deltaCallToLoadOrStore, deltaCallToSlowCase, deltaCallToDone, baseGPR, valueTagGPR, valueGPR, scratchGPR));
+ }
+#endif
+
void addMethodGet(Call slowCall, DataLabelPtr structToCompare, DataLabelPtr protoObj, DataLabelPtr protoStructToCompare, DataLabelPtr putFunction)
{
m_methodGets.append(MethodGetRecord(slowCall, structToCompare, protoObj, protoStructToCompare, putFunction));
Label m_startOfCode;
struct PropertyAccessRecord {
+#if USE(JSVALUE64)
PropertyAccessRecord(Call functionCall, int16_t deltaCheckImmToCall, int16_t deltaCallToStructCheck, int16_t deltaCallToLoadOrStore, int16_t deltaCallToSlowCase, int16_t deltaCallToDone, int8_t baseGPR, int8_t valueGPR, int8_t scratchGPR)
+#elif USE(JSVALUE32_64)
+ PropertyAccessRecord(Call functionCall, int16_t deltaCheckImmToCall, int16_t deltaCallToStructCheck, int16_t deltaCallToLoadOrStore, int16_t deltaCallToSlowCase, int16_t deltaCallToDone, int8_t baseGPR, int8_t valueTagGPR, int8_t valueGPR, int8_t scratchGPR)
+#endif
: m_functionCall(functionCall)
, m_deltaCheckImmToCall(deltaCheckImmToCall)
, m_deltaCallToStructCheck(deltaCallToStructCheck)
, m_deltaCallToSlowCase(deltaCallToSlowCase)
, m_deltaCallToDone(deltaCallToDone)
, m_baseGPR(baseGPR)
+#if USE(JSVALUE32_64)
+ , m_valueTagGPR(valueTagGPR)
+#endif
, m_valueGPR(valueGPR)
, m_scratchGPR(scratchGPR)
{
int16_t m_deltaCallToSlowCase;
int16_t m_deltaCallToDone;
int8_t m_baseGPR;
+#if USE(JSVALUE32_64)
+ int8_t m_valueTagGPR;
+#endif
int8_t m_valueGPR;
int8_t m_scratchGPR;
};
--- /dev/null
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DFGJITCompiler.h"
+
+#if ENABLE(DFG_JIT)
+#if USE(JSVALUE32_64)
+
+#include "CodeBlock.h"
+#include "DFGJITCodeGenerator.h"
+#include "DFGJITCompilerInlineMethods.h"
+#include "DFGOperations.h"
+#include "DFGRegisterBank.h"
+#include "DFGSpeculativeJIT.h"
+#include "JSGlobalData.h"
+#include "LinkBuffer.h"
+
+namespace JSC { namespace DFG {
+
+// This method used to fill a numeric value to a FPR when linking speculative -> non-speculative.
+void JITCompiler::fillNumericToDouble(NodeIndex nodeIndex, FPRReg fpr, GPRReg temporary)
+{
+ Node& node = graph()[nodeIndex];
+
+ if (node.hasConstant()) {
+ ASSERT(isNumberConstant(nodeIndex));
+ loadDouble(addressOfDoubleConstant(nodeIndex), fpr);
+ } else {
+ load32(tagFor(node.virtualRegister()), temporary);
+ Jump isInteger = branch32(MacroAssembler::Equal, temporary, TrustedImm32(JSValue::Int32Tag));
+ loadDouble(addressFor(node.virtualRegister()), fpr);
+ Jump hasUnboxedDouble = jump();
+ isInteger.link(this);
+ load32(payloadFor(node.virtualRegister()), temporary);
+ convertInt32ToDouble(temporary, fpr);
+ hasUnboxedDouble.link(this);
+ }
+}
+
+// This method used to fill an integer value to a GPR when linking speculative -> non-speculative.
+void JITCompiler::fillInt32ToInteger(NodeIndex nodeIndex, GPRReg gpr)
+{
+ Node& node = graph()[nodeIndex];
+
+ if (node.hasConstant()) {
+ ASSERT(isInt32Constant(nodeIndex));
+ move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex)), gpr);
+ } else {
+#if ENABLE(DFG_JIT_ASSERT)
+ // Redundant load, just so we can check the tag!
+ load32(tagFor(node.virtualRegister()), gpr);
+ jitAssertIsJSInt32(gpr);
+#endif
+ load32(payloadFor(node.virtualRegister()), gpr);
+ }
+}
+
+// This method used to fill a JSValue to a GPR when linking speculative -> non-speculative.
+void JITCompiler::fillToJS(NodeIndex nodeIndex, GPRReg gpr)
+{
+ ASSERT_NOT_REACHED();
+}
+
+void JITCompiler::exitSpeculativeWithOSR(const OSRExit& exit, SpeculationRecovery* recovery, Vector<BytecodeAndMachineOffset>& decodedCodeMap)
+{
+ // 1) Pro-forma stuff.
+ exit.m_check.link(this);
+
+#if ENABLE(DFG_DEBUG_VERBOSE)
+ fprintf(stderr, "OSR exit for Node @%d (bc#%u) at JIT offset 0x%x ", (int)exit.m_nodeIndex, exit.m_bytecodeIndex, debugOffset());
+ exit.dump(stderr);
+#endif
+#if ENABLE(DFG_JIT_BREAK_ON_SPECULATION_FAILURE)
+ breakpoint();
+#endif
+
+#if ENABLE(DFG_VERBOSE_SPECULATION_FAILURE)
+ SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo;
+ debugInfo->codeBlock = m_codeBlock;
+ debugInfo->debugOffset = debugOffset();
+
+ debugCall(debugOperationPrintSpeculationFailure, debugInfo);
+#endif
+
+#if ENABLE(DFG_SUCCESS_STATS)
+ static SamplingCounter counter("SpeculationFailure");
+ emitCount(counter);
+#endif
+
+ // 2) Perform speculation recovery. This only comes into play when an operation
+ // starts mutating state before verifying the speculation it has already made.
+
+ if (recovery) {
+ switch (recovery->type()) {
+ case SpeculativeAdd:
+ sub32(recovery->src(), recovery->dest());
+ break;
+
+ case BooleanSpeculationCheck:
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ // 3) Figure out how many scratch slots we'll need. We need one for every GPR/FPR
+ // whose destination is now occupied by a DFG virtual register, and we need
+ // one for every displaced virtual register if there are more than
+ // GPRInfo::numberOfRegisters of them. Also see if there are any constants,
+ // any undefined slots, any FPR slots, and any unboxed ints.
+
+ Vector<bool> poisonedVirtualRegisters(exit.m_variables.size());
+ for (unsigned i = 0; i < poisonedVirtualRegisters.size(); ++i)
+ poisonedVirtualRegisters[i] = false;
+
+ unsigned numberOfPoisonedVirtualRegisters = 0;
+ unsigned numberOfDisplacedVirtualRegisters = 0;
+
+ // Booleans for fast checks. We expect that most OSR exits do not have to rebox
+ // Int32s, have no FPRs, and have no constants. If there are constants, we
+ // expect most of them to be jsUndefined(); if that's true then we handle that
+ // specially to minimize code size and execution time.
+ bool haveUnboxedInt32s = false;
+ bool haveFPRs = false;
+ bool haveConstants = false;
+ bool haveUndefined = false;
+
+ for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
+ const ValueRecovery& recovery = exit.valueRecovery(index);
+ switch (recovery.technique()) {
+ case DisplacedInRegisterFile:
+ numberOfDisplacedVirtualRegisters++;
+ ASSERT((int)recovery.virtualRegister() >= 0);
+
+ // See if we might like to store to this virtual register before doing
+ // virtual register shuffling. If so, we say that the virtual register
+ // is poisoned: it cannot be stored to until after displaced virtual
+ // registers are handled. We track poisoned virtual register carefully
+ // to ensure this happens efficiently. Note that we expect this case
+ // to be rare, so the handling of it is optimized for the cases in
+ // which it does not happen.
+ if (recovery.virtualRegister() < (int)exit.m_variables.size()) {
+ switch (exit.m_variables[recovery.virtualRegister()].technique()) {
+ case InGPR:
+ case UnboxedInt32InGPR:
+ case InPair:
+ case InFPR:
+ if (!poisonedVirtualRegisters[recovery.virtualRegister()]) {
+ poisonedVirtualRegisters[recovery.virtualRegister()] = true;
+ numberOfPoisonedVirtualRegisters++;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+
+ case UnboxedInt32InGPR:
+ haveUnboxedInt32s = true;
+ break;
+
+ case InFPR:
+ haveFPRs = true;
+ break;
+
+ case Constant:
+ haveConstants = true;
+ if (recovery.constant().isUndefined())
+ haveUndefined = true;
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ EncodedJSValue* scratchBuffer = static_cast<EncodedJSValue*>(globalData()->scratchBufferForSize(sizeof(EncodedJSValue) * (numberOfPoisonedVirtualRegisters + ((numberOfDisplacedVirtualRegisters * 2) <= GPRInfo::numberOfRegisters ? 0 : numberOfDisplacedVirtualRegisters))));
+
+ // From here on, the code assumes that it is profitable to maximize the distance
+ // between when something is computed and when it is stored.
+
+ // 4) Perform all reboxing of integers.
+ // Currently we don't rebox for JSValue32_64.
+
+ // 5) Dump all non-poisoned GPRs. For poisoned GPRs, save them into the scratch storage.
+ // Note that GPRs do not have a fast change (like haveFPRs) because we expect that
+ // most OSR failure points will have at least one GPR that needs to be dumped.
+
+ unsigned scratchIndex = 0;
+ for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
+ const ValueRecovery& recovery = exit.valueRecovery(index);
+ int operand = exit.operandForIndex(index);
+ switch (recovery.technique()) {
+ case InGPR:
+ if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)]) {
+ store32(TrustedImm32(JSValue::CellTag), reinterpret_cast<char*>(scratchBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
+ store32(recovery.gpr(), reinterpret_cast<char*>(scratchBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
+ scratchIndex++;
+ } else {
+ store32(TrustedImm32(JSValue::CellTag), tagFor((VirtualRegister)operand));
+ store32(recovery.gpr(), payloadFor((VirtualRegister)operand));
+ }
+ break;
+ case UnboxedInt32InGPR:
+ if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)]) {
+ store32(TrustedImm32(JSValue::Int32Tag), reinterpret_cast<char*>(scratchBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
+ store32(recovery.gpr(), reinterpret_cast<char*>(scratchBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
+ scratchIndex++;
+ } else {
+ store32(TrustedImm32(JSValue::Int32Tag), tagFor((VirtualRegister)operand));
+ store32(recovery.gpr(), payloadFor((VirtualRegister)operand));
+ }
+ break;
+ case InPair:
+ if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)]) {
+ store32(recovery.tagGPR(), reinterpret_cast<char*>(scratchBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
+ store32(recovery.payloadGPR(), reinterpret_cast<char*>(scratchBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
+ scratchIndex++;
+ } else {
+ store32(recovery.tagGPR(), tagFor((VirtualRegister)operand));
+ store32(recovery.payloadGPR(), payloadFor((VirtualRegister)operand));
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ // At this point all GPRs are available for scratch use.
+
+ if (haveFPRs) {
+ // 6) Box all doubles (relies on there being more GPRs than FPRs)
+ // For JSValue32_64, no need to box doubles.
+
+ // 7) Dump all doubles into the register file, or to the scratch storage if
+ // the destination virtual register is poisoned.
+
+ for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
+ const ValueRecovery& recovery = exit.valueRecovery(index);
+ if (recovery.technique() != InFPR)
+ continue;
+ if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)])
+ storeDouble(recovery.fpr(), scratchBuffer + scratchIndex++);
+ else
+ storeDouble(recovery.fpr(), addressFor((VirtualRegister)exit.operandForIndex(index)));
+ }
+ }
+
+ ASSERT(scratchIndex == numberOfPoisonedVirtualRegisters);
+
+ // 8) Reshuffle displaced virtual registers. Optimize for the case that
+ // the number of displaced virtual registers is not more than the number
+ // of available physical registers.
+
+ if (numberOfDisplacedVirtualRegisters) {
+ if (numberOfDisplacedVirtualRegisters * 2 <= GPRInfo::numberOfRegisters) {
+ // So far this appears to be the case that triggers all the time, but
+ // that is far from guaranteed.
+
+ unsigned displacementIndex = 0;
+ for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
+ const ValueRecovery& recovery = exit.valueRecovery(index);
+ if (recovery.technique() != DisplacedInRegisterFile)
+ continue;
+ load32(payloadFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++));
+ load32(tagFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++));
+ }
+
+ displacementIndex = 0;
+ for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
+ const ValueRecovery& recovery = exit.valueRecovery(index);
+ if (recovery.technique() != DisplacedInRegisterFile)
+ continue;
+ store32(GPRInfo::toRegister(displacementIndex++), payloadFor((VirtualRegister)exit.operandForIndex(index)));
+ store32(GPRInfo::toRegister(displacementIndex++), tagFor((VirtualRegister)exit.operandForIndex(index)));
+ }
+ } else {
+ // FIXME: This should use the shuffling algorithm that we use
+ // for speculative->non-speculative jumps, if we ever discover that
+ // some hot code with lots of live values that get displaced and
+ // spilled really enjoys frequently failing speculation.
+
+ // For now this code is engineered to be correct but probably not
+ // super. In particular, it correctly handles cases where for example
+ // the displacements are a permutation of the destination values, like
+ //
+ // 1 -> 2
+ // 2 -> 1
+ //
+ // It accomplishes this by simply lifting all of the virtual registers
+ // from their old (DFG JIT) locations and dropping them in a scratch
+ // location in memory, and then transferring from that scratch location
+ // to their new (old JIT) locations.
+
+ for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
+ const ValueRecovery& recovery = exit.valueRecovery(index);
+ if (recovery.technique() != DisplacedInRegisterFile)
+ continue;
+ load32(payloadFor(recovery.virtualRegister()), GPRInfo::regT0);
+ load32(tagFor(recovery.virtualRegister()), GPRInfo::regT1);
+ store32(GPRInfo::regT0, reinterpret_cast<char*>(scratchBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
+ store32(GPRInfo::regT1, reinterpret_cast<char*>(scratchBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
+ scratchIndex++;
+ }
+
+ scratchIndex = numberOfPoisonedVirtualRegisters;
+ for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
+ const ValueRecovery& recovery = exit.valueRecovery(index);
+ if (recovery.technique() != DisplacedInRegisterFile)
+ continue;
+ load32(reinterpret_cast<char*>(scratchBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0);
+ load32(reinterpret_cast<char*>(scratchBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag), GPRInfo::regT1);
+ store32(GPRInfo::regT0, payloadFor((VirtualRegister)exit.operandForIndex(index)));
+ store32(GPRInfo::regT1, tagFor((VirtualRegister)exit.operandForIndex(index)));
+ scratchIndex++;
+ }
+
+ ASSERT(scratchIndex == numberOfPoisonedVirtualRegisters + numberOfDisplacedVirtualRegisters);
+ }
+ }
+
+ // 9) Dump all poisoned virtual registers.
+
+ scratchIndex = 0;
+ if (numberOfPoisonedVirtualRegisters) {
+ for (int virtualRegister = 0; virtualRegister < (int)exit.m_variables.size(); ++virtualRegister) {
+ if (!poisonedVirtualRegisters[virtualRegister])
+ continue;
+
+ const ValueRecovery& recovery = exit.m_variables[virtualRegister];
+ switch (recovery.technique()) {
+ case InGPR:
+ case UnboxedInt32InGPR:
+ case InFPR:
+ case InPair:
+ load32(reinterpret_cast<char*>(scratchBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0);
+ load32(reinterpret_cast<char*>(scratchBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag), GPRInfo::regT1);
+ store32(GPRInfo::regT0, payloadFor((VirtualRegister)virtualRegister));
+ store32(GPRInfo::regT1, tagFor((VirtualRegister)virtualRegister));
+ scratchIndex++;
+ break;
+
+ default:
+ break;
+ }
+ }
+ }
+ ASSERT(scratchIndex == numberOfPoisonedVirtualRegisters);
+
+ // 10) Dump all constants. Optimize for Undefined, since that's a constant we see
+ // often.
+
+ if (haveConstants) {
+ if (haveUndefined) {
+ move(TrustedImm32(jsUndefined().payload()), GPRInfo::regT0);
+ move(TrustedImm32(jsUndefined().tag()), GPRInfo::regT1);
+ }
+
+ for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
+ const ValueRecovery& recovery = exit.valueRecovery(index);
+ if (recovery.technique() != Constant)
+ continue;
+ if (recovery.constant().isUndefined()) {
+ store32(GPRInfo::regT0, payloadFor((VirtualRegister)exit.operandForIndex(index)));
+ store32(GPRInfo::regT1, tagFor((VirtualRegister)exit.operandForIndex(index)));
+ } else {
+ store32(TrustedImm32(recovery.constant().payload()), payloadFor((VirtualRegister)exit.operandForIndex(index)));
+ store32(TrustedImm32(recovery.constant().tag()), tagFor((VirtualRegister)exit.operandForIndex(index)));
+ }
+ }
+ }
+
+ // 11) Adjust the old JIT's execute counter. Since we are exiting OSR, we know
+ // that all new calls into this code will go to the new JIT, so the execute
+ // counter only affects call frames that performed OSR exit and call frames
+ // that were still executing the old JIT at the time of another call frame's
+ // OSR exit. We want to ensure that the following is true:
+ //
+ // (a) Code the performs an OSR exit gets a chance to reenter optimized
+ // code eventually, since optimized code is faster. But we don't
+ // want to do such reentery too aggressively (see (c) below).
+ //
+ // (b) If there is code on the call stack that is still running the old
+ // JIT's code and has never OSR'd, then it should get a chance to
+ // perform OSR entry despite the fact that we've exited.
+ //
+ // (c) Code the performs an OSR exit should not immediately retry OSR
+ // entry, since both forms of OSR are expensive. OSR entry is
+ // particularly expensive.
+ //
+ // (d) Frequent OSR failures, even those that do not result in the code
+ // running in a hot loop, result in recompilation getting triggered.
+ //
+ // To ensure (c), we'd like to set the execute counter to
+ // counterValueForOptimizeAfterWarmUp(). This seems like it would endanger
+ // (a) and (b), since then every OSR exit would delay the opportunity for
+ // every call frame to perform OSR entry. Essentially, if OSR exit happens
+ // frequently and the function has few loops, then the counter will never
+ // become non-negative and OSR entry will never be triggered. OSR entry
+ // will only happen if a loop gets hot in the old JIT, which does a pretty
+ // good job of ensuring (a) and (b). But that doesn't take care of (d),
+ // since each speculation failure would reset the execute counter.
+ // So we check here if the number of speculation failures is significantly
+ // larger than the number of successes (we want 90% success rate), and if
+ // there have been a large enough number of failures. If so, we set the
+ // counter to 0; otherwise we set the counter to
+ // counterValueForOptimizeAfterWarmUp().
+
+ move(TrustedImmPtr(codeBlock()), GPRInfo::regT0);
+
+ load32(Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeFailCounter()), GPRInfo::regT2);
+ load32(Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeSuccessCounter()), GPRInfo::regT1);
+ add32(Imm32(1), GPRInfo::regT2);
+ add32(Imm32(-1), GPRInfo::regT1);
+ store32(GPRInfo::regT2, Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeFailCounter()));
+ store32(GPRInfo::regT1, Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeSuccessCounter()));
+
+ move(TrustedImmPtr(codeBlock()->alternative()), GPRInfo::regT0);
+
+ Jump fewFails = branch32(BelowOrEqual, GPRInfo::regT2, Imm32(codeBlock()->largeFailCountThreshold()));
+ mul32(Imm32(codeBlock()->desiredSuccessFailRatio()), GPRInfo::regT2, GPRInfo::regT2);
+
+ Jump lowFailRate = branch32(BelowOrEqual, GPRInfo::regT2, GPRInfo::regT1);
+
+ // Reoptimize as soon as possible.
+ store32(Imm32(CodeBlock::counterValueForOptimizeNextInvocation()), Address(GPRInfo::regT0, CodeBlock::offsetOfExecuteCounter()));
+ Jump doneAdjusting = jump();
+
+ fewFails.link(this);
+ lowFailRate.link(this);
+
+ store32(Imm32(codeBlock()->alternative()->counterValueForOptimizeAfterLongWarmUp()), Address(GPRInfo::regT0, CodeBlock::offsetOfExecuteCounter()));
+
+ doneAdjusting.link(this);
+
+ // 12) Load the result of the last bytecode operation into regT0.
+
+ if (exit.m_lastSetOperand != std::numeric_limits<int>::max()) {
+ load32(payloadFor((VirtualRegister)exit.m_lastSetOperand), GPRInfo::cachedResultRegister);
+ load32(tagFor((VirtualRegister)exit.m_lastSetOperand), GPRInfo::cachedResultRegister2);
+ }
+
+ // 13) Fix call frame.
+
+ ASSERT(codeBlock()->alternative()->getJITType() == JITCode::BaselineJIT);
+ storePtr(TrustedImmPtr(codeBlock()->alternative()), addressFor((VirtualRegister)RegisterFile::CodeBlock));
+
+ // 14) Jump into the corresponding baseline JIT code.
+
+ BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned, BytecodeAndMachineOffset::getBytecodeIndex>(decodedCodeMap.begin(), decodedCodeMap.size(), exit.m_bytecodeIndex);
+
+ ASSERT(mapping);
+ ASSERT(mapping->m_bytecodeIndex == exit.m_bytecodeIndex);
+
+ void* jumpTarget = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(codeBlock()->alternative()->getJITCode().start()) + mapping->m_machineCodeOffset);
+
+ ASSERT(GPRInfo::regT2 != GPRInfo::cachedResultRegister && GPRInfo::regT2 != GPRInfo::cachedResultRegister2);
+
+ move(TrustedImmPtr(jumpTarget), GPRInfo::regT2);
+ jump(GPRInfo::regT2);
+
+#if ENABLE(DFG_DEBUG_VERBOSE)
+ fprintf(stderr, " -> %p\n", jumpTarget);
+#endif
+}
+
+void JITCompiler::linkOSRExits(SpeculativeJIT& speculative)
+{
+ Vector<BytecodeAndMachineOffset> decodedCodeMap;
+ ASSERT(codeBlock()->alternative());
+ ASSERT(codeBlock()->alternative()->getJITType() == JITCode::BaselineJIT);
+ ASSERT(codeBlock()->alternative()->jitCodeMap());
+ codeBlock()->alternative()->jitCodeMap()->decode(decodedCodeMap);
+
+ OSRExitVector::Iterator exitsIter = speculative.osrExits().begin();
+ OSRExitVector::Iterator exitsEnd = speculative.osrExits().end();
+
+ while (exitsIter != exitsEnd) {
+ const OSRExit& exit = *exitsIter;
+ exitSpeculativeWithOSR(exit, speculative.speculationRecovery(exit.m_recoveryIndex), decodedCodeMap);
+ ++exitsIter;
+ }
+}
+
+void JITCompiler::compileEntry()
+{
+ m_startOfCode = label();
+
+ // This code currently matches the old JIT. In the function header we need to
+ // pop the return address (since we do not allow any recursion on the machine
+ // stack), and perform a fast register file check.
+ // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56292
+ // We'll need to convert the remaining cti_ style calls (specifically the register file
+ // check) which will be dependent on stack layout. (We'd need to account for this in
+ // both normal return code and when jumping to an exception handler).
+ preserveReturnAddressAfterCall(GPRInfo::regT2);
+ emitPutToCallFrameHeader(GPRInfo::regT2, RegisterFile::ReturnPC);
+
+ addPtr(Imm32(1), AbsoluteAddress(codeBlock()->addressOfSpeculativeSuccessCounter()));
+}
+
+void JITCompiler::compileBody()
+{
+
+#if ENABLE(DFG_JIT_BREAK_ON_EVERY_FUNCTION)
+ // Handy debug tool!
+ breakpoint();
+#endif
+
+ Label speculativePathBegin = label();
+ SpeculativeJIT speculative(*this);
+ bool compiledSpeculative = speculative.compile();
+ ASSERT_UNUSED(compiledSpeculative, compiledSpeculative);
+
+ linkOSRExits(speculative);
+
+ // Iterate over the m_calls vector, checking for exception checks,
+ // and linking them to here.
+ for (unsigned i = 0; i < m_calls.size(); ++i) {
+ Jump& exceptionCheck = m_calls[i].m_exceptionCheck;
+ if (exceptionCheck.isSet()) {
+ exceptionCheck.link(this);
+ ++m_exceptionCheckCount;
+ }
+ }
+ // If any exception checks were linked, generate code to lookup a handler.
+ if (m_exceptionCheckCount) {
+ // lookupExceptionHandler is passed two arguments, exec (the CallFrame*), and
+ // an identifier for the operation that threw the exception, which we can use
+ // to look up handler information. The identifier we use is the return address
+ // of the call out from JIT code that threw the exception; this is still
+ // available on the stack, just below the stack pointer!
+ peek(GPRInfo::argumentGPR1, -1);
+ push(GPRInfo::argumentGPR1);
+ push(GPRInfo::callFrameRegister);
+ m_calls.append(CallRecord(call(), lookupExceptionHandler));
+ // lookupExceptionHandler leaves the handler CallFrame* in the returnValueGPR,
+ // and the address of the handler in returnValueGPR2.
+ jump(GPRInfo::returnValueGPR2);
+ }
+}
+
+void JITCompiler::link(LinkBuffer& linkBuffer)
+{
+ // Link the code, populate data in CodeBlock data structures.
+#if ENABLE(DFG_DEBUG_VERBOSE)
+ fprintf(stderr, "JIT code for %p start at [%p, %p)\n", m_codeBlock, linkBuffer.debugAddress(), static_cast<char*>(linkBuffer.debugAddress()) + linkBuffer.debugSize());
+#endif
+
+ // Link all calls out from the JIT code to their respective functions.
+ for (unsigned i = 0; i < m_calls.size(); ++i) {
+ if (m_calls[i].m_function.value())
+ linkBuffer.link(m_calls[i].m_call, m_calls[i].m_function);
+ }
+
+ if (m_codeBlock->needsCallReturnIndices()) {
+ m_codeBlock->callReturnIndexVector().reserveCapacity(m_exceptionCheckCount);
+ for (unsigned i = 0; i < m_calls.size(); ++i) {
+ if (m_calls[i].m_handlesExceptions) {
+ unsigned returnAddressOffset = linkBuffer.returnAddressOffset(m_calls[i].m_call);
+ unsigned exceptionInfo = m_calls[i].m_codeOrigin.bytecodeIndex();
+ m_codeBlock->callReturnIndexVector().append(CallReturnOffsetToBytecodeOffset(returnAddressOffset, exceptionInfo));
+ }
+ }
+ }
+
+ m_codeBlock->setNumberOfStructureStubInfos(m_propertyAccesses.size());
+ for (unsigned i = 0; i < m_propertyAccesses.size(); ++i) {
+ StructureStubInfo& info = m_codeBlock->structureStubInfo(i);
+ info.callReturnLocation = linkBuffer.locationOf(m_propertyAccesses[i].m_functionCall);
+ info.u.unset.deltaCheckImmToCall = m_propertyAccesses[i].m_deltaCheckImmToCall;
+ info.deltaCallToStructCheck = m_propertyAccesses[i].m_deltaCallToStructCheck;
+ info.u.unset.deltaCallToLoadOrStore = m_propertyAccesses[i].m_deltaCallToLoadOrStore;
+ info.deltaCallToSlowCase = m_propertyAccesses[i].m_deltaCallToSlowCase;
+ info.deltaCallToDone = m_propertyAccesses[i].m_deltaCallToDone;
+ info.baseGPR = m_propertyAccesses[i].m_baseGPR;
+ info.valueTagGPR = m_propertyAccesses[i].m_valueTagGPR;
+ info.valueGPR = m_propertyAccesses[i].m_valueGPR;
+ info.scratchGPR = m_propertyAccesses[i].m_scratchGPR;
+ }
+
+ m_codeBlock->setNumberOfCallLinkInfos(m_jsCalls.size());
+ for (unsigned i = 0; i < m_jsCalls.size(); ++i) {
+ CallLinkInfo& info = m_codeBlock->callLinkInfo(i);
+ info.isCall = m_jsCalls[i].m_isCall;
+ info.isDFG = true;
+ info.callReturnLocation = CodeLocationLabel(linkBuffer.locationOf(m_jsCalls[i].m_slowCall));
+ info.hotPathBegin = linkBuffer.locationOf(m_jsCalls[i].m_targetToCheck);
+ info.hotPathOther = linkBuffer.locationOfNearCall(m_jsCalls[i].m_fastCall);
+ }
+
+ m_codeBlock->addMethodCallLinkInfos(m_methodGets.size());
+ for (unsigned i = 0; i < m_methodGets.size(); ++i) {
+ MethodCallLinkInfo& info = m_codeBlock->methodCallLinkInfo(i);
+ info.cachedStructure.setLocation(linkBuffer.locationOf(m_methodGets[i].m_structToCompare));
+ info.cachedPrototypeStructure.setLocation(linkBuffer.locationOf(m_methodGets[i].m_protoStructToCompare));
+ info.cachedFunction.setLocation(linkBuffer.locationOf(m_methodGets[i].m_putFunction));
+ info.cachedPrototype.setLocation(linkBuffer.locationOf(m_methodGets[i].m_protoObj));
+ info.callReturnLocation = linkBuffer.locationOf(m_methodGets[i].m_slowCall);
+ }
+}
+
+void JITCompiler::compile(JITCode& entry)
+{
+ // Preserve the return address to the callframe.
+ compileEntry();
+ // Generate the body of the program.
+ compileBody();
+ // Link
+ LinkBuffer linkBuffer(*m_globalData, this);
+ link(linkBuffer);
+ entry = JITCode(linkBuffer.finalizeCode(), JITCode::DFGJIT);
+}
+
+void JITCompiler::compileFunction(JITCode& entry, MacroAssemblerCodePtr& entryWithArityCheck)
+{
+ compileEntry();
+
+ // === Function header code generation ===
+ // This is the main entry point, without performing an arity check.
+ // If we needed to perform an arity check we will already have moved the return address,
+ // so enter after this.
+ Label fromArityCheck(this);
+ // Setup a pointer to the codeblock in the CallFrameHeader.
+ emitPutImmediateToCallFrameHeader(m_codeBlock, RegisterFile::CodeBlock);
+ // Plant a check that sufficient space is available in the RegisterFile.
+ // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56291
+ addPtr(Imm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);
+ Jump registerFileCheck = branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->registerFile().addressOfEnd()), GPRInfo::regT1);
+ // Return here after register file check.
+ Label fromRegisterFileCheck = label();
+
+
+ // === Function body code generation ===
+ compileBody();
+
+ // === Function footer code generation ===
+ //
+ // Generate code to perform the slow register file check (if the fast one in
+ // the function header fails), and generate the entry point with arity check.
+ //
+ // Generate the register file check; if the fast check in the function head fails,
+ // we need to call out to a helper function to check whether more space is available.
+ // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
+ registerFileCheck.link(this);
+ move(stackPointerRegister, GPRInfo::argumentGPR0);
+ poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
+ Call callRegisterFileCheck = call();
+ jump(fromRegisterFileCheck);
+
+ // The fast entry point into a function does not check the correct number of arguments
+ // have been passed to the call (we only use the fast entry point where we can statically
+ // determine the correct number of arguments have been passed, or have already checked).
+ // In cases where an arity check is necessary, we enter here.
+ // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
+ Label arityCheck = label();
+ preserveReturnAddressAfterCall(GPRInfo::regT2);
+ emitPutToCallFrameHeader(GPRInfo::regT2, RegisterFile::ReturnPC);
+ branch32(Equal, GPRInfo::regT1, Imm32(m_codeBlock->m_numParameters)).linkTo(fromArityCheck, this);
+ move(stackPointerRegister, GPRInfo::argumentGPR0);
+ poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
+ Call callArityCheck = call();
+ move(GPRInfo::regT0, GPRInfo::callFrameRegister);
+ jump(fromArityCheck);
+
+
+ // === Link ===
+ LinkBuffer linkBuffer(*m_globalData, this);
+ link(linkBuffer);
+
+ // FIXME: switch the register file check & arity check over to DFGOpertaion style calls, not JIT stubs.
+ linkBuffer.link(callRegisterFileCheck, FunctionPtr(cti_register_file_check));
+ linkBuffer.link(callArityCheck, m_codeBlock->m_isConstructor ? FunctionPtr(cti_op_construct_arityCheck) : FunctionPtr(cti_op_call_arityCheck));
+
+ entryWithArityCheck = linkBuffer.locationOf(arityCheck);
+ entry = JITCode(linkBuffer.finalizeCode(), JITCode::DFGJIT);
+}
+
+#if ENABLE(DFG_JIT_ASSERT)
+void JITCompiler::jitAssertIsInt32(GPRReg gpr)
+{
+ UNUSED_PARAM(gpr);
+}
+
+void JITCompiler::jitAssertIsJSInt32(GPRReg gpr)
+{
+ Jump checkJSInt32 = branch32(Equal, gpr, TrustedImm32(JSValue::Int32Tag));
+ breakpoint();
+ checkJSInt32.link(this);
+}
+
+void JITCompiler::jitAssertIsJSNumber(GPRReg gpr)
+{
+ Jump checkJSInt32 = branch32(Equal, gpr, TrustedImm32(JSValue::Int32Tag));
+ Jump checkJSDouble = branch32(Below, gpr, TrustedImm32(JSValue::LowestTag));
+ breakpoint();
+ checkJSInt32.link(this);
+ checkJSDouble.link(this);
+}
+
+void JITCompiler::jitAssertIsJSDouble(GPRReg gpr)
+{
+ Jump checkJSDouble = branch32(Below, gpr, TrustedImm32(JSValue::LowestTag));
+ breakpoint();
+ checkJSDouble.link(this);
+}
+
+void JITCompiler::jitAssertIsCell(GPRReg gpr)
+{
+ Jump checkCell = branch32(Equal, gpr, TrustedImm32(JSValue::CellTag));
+ breakpoint();
+ checkCell.link(this);
+}
+#endif
+
+#if ENABLE(SAMPLING_COUNTERS) && CPU(X86) // Or any other little-endian 32-bit platform!
+void JITCompiler::emitCount(MacroAsembler& jit, AbstractSamplingCounter& counter, uint32_t increment)
+{
+ intptr_t hiWord = reinterpret_cast<intptr_t>(counter.addressOfCounter()) + sizeof(int32_t);
+ jit.add32(TrustedImm32(increment), AbsoluteAddress(counter.addressOfCounter()));
+ jit.addWithCarry32(TrustedImm32(0), AbsoluteAddress(reinterpret_cast<void*>(hiWord)));
+}
+#endif
+
+#if ENABLE(SAMPLING_FLAGS)
+void JITCompiler::setSamplingFlag(int32_t flag)
+{
+ ASSERT(flag >= 1);
+ ASSERT(flag <= 32);
+ or32(TrustedImm32(1u << (flag - 1)), AbsoluteAddress(SamplingFlags::addressOfFlags()));
+}
+
+void JITCompiler::clearSamplingFlag(int32_t flag)
+{
+ ASSERT(flag >= 1);
+ ASSERT(flag <= 32);
+ and32(TrustedImm32(~(1u << (flag - 1))), AbsoluteAddress(SamplingFlags::addressOfFlags()));
+}
+#endif
+
+} } // namespace JSC::DFG
+
+#endif
+#endif
--- /dev/null
+
+#ifndef DFGJITCompilerInlineMethods_h
+#define DFGJITCompilerInlineMethods_h
+
+
+#if ENABLE(DFG_JIT)
+#if USE(JSVALUE32_64)
+
+namespace JSC { namespace DFG {
+
+inline void JITCompiler::emitLoadTag(NodeIndex index, GPRReg tag)
+{
+ if (isConstant(index)) {
+ move(Imm32(valueOfJSConstant(index).tag()), tag);
+ return;
+ }
+
+ Node& node = graph()[index];
+ VirtualRegister virtualRegister = node.virtualRegister();
+ load32(tagFor(virtualRegister), tag);
+}
+
+inline void JITCompiler::emitLoadPayload(NodeIndex index, GPRReg payload)
+{
+ if (isConstant(index)) {
+ move(Imm32(valueOfJSConstant(index).payload()), payload);
+ return;
+ }
+
+ Node& node = graph()[index];
+ VirtualRegister virtualRegister = node.virtualRegister();
+ load32(payloadFor(virtualRegister), payload);
+}
+
+inline void JITCompiler::emitLoad(const JSValue& v, GPRReg tag, GPRReg payload)
+{
+ move(Imm32(v.payload()), payload);
+ move(Imm32(v.tag()), tag);
+}
+
+inline void JITCompiler::emitLoad(NodeIndex index, GPRReg tag, GPRReg payload)
+{
+ ASSERT(tag != payload);
+ emitLoadPayload(index, payload);
+ emitLoadTag(index, tag);
+ return;
+}
+
+inline void JITCompiler::emitLoad2(NodeIndex index1, GPRReg tag1, GPRReg payload1, NodeIndex index2, GPRReg tag2, GPRReg payload2)
+{
+ emitLoad(index2, tag2, payload2);
+ emitLoad(index1, tag1, payload1);
+}
+
+inline void JITCompiler::emitLoadDouble(NodeIndex index, FPRReg value)
+{
+ if (isConstant(index))
+ loadDouble(addressOfDoubleConstant(index), value);
+ else {
+ Node& node = graph()[index];
+ VirtualRegister virtualRegister = node.virtualRegister();
+ loadDouble(addressFor(virtualRegister), value);
+ }
+}
+
+inline void JITCompiler::emitLoadInt32ToDouble(NodeIndex index, FPRReg value)
+{
+ if (isConstant(index)) {
+ char* bytePointer = reinterpret_cast<char*>(addressOfDoubleConstant(index));
+ convertInt32ToDouble(AbsoluteAddress(bytePointer + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), value);
+ } else {
+ Node& node = graph()[index];
+ VirtualRegister virtualRegister = node.virtualRegister();
+ convertInt32ToDouble(payloadFor(virtualRegister), value);
+ }
+}
+
+inline void JITCompiler::emitStore(NodeIndex index, GPRReg tag, GPRReg payload)
+{
+ Node& node = graph()[index];
+ VirtualRegister virtualRegister = node.virtualRegister();
+ store32(payload, payloadFor(virtualRegister));
+ store32(tag, tagFor(virtualRegister));
+}
+
+inline void JITCompiler::emitStoreInt32(NodeIndex index, GPRReg payload, bool indexIsInt32)
+{
+ Node& node = graph()[index];
+ VirtualRegister virtualRegister = node.virtualRegister();
+ store32(payload, payloadFor(virtualRegister));
+ if (!indexIsInt32)
+ store32(TrustedImm32(JSValue::Int32Tag), tagFor(virtualRegister));
+}
+
+inline void JITCompiler::emitStoreInt32(NodeIndex index, TrustedImm32 payload, bool indexIsInt32)
+{
+ Node& node = graph()[index];
+ VirtualRegister virtualRegister = node.virtualRegister();
+ store32(payload, payloadFor(virtualRegister));
+ if (!indexIsInt32)
+ store32(TrustedImm32(JSValue::Int32Tag), tagFor(virtualRegister));
+}
+
+inline void JITCompiler::emitStoreCell(NodeIndex index, GPRReg payload, bool indexIsCell)
+{
+ Node& node = graph()[index];
+ VirtualRegister virtualRegister = node.virtualRegister();
+ store32(payload, payloadFor(virtualRegister));
+ if (!indexIsCell)
+ store32(TrustedImm32(JSValue::CellTag), tagFor(virtualRegister));
+}
+
+inline void JITCompiler::emitStoreBool(NodeIndex index, GPRReg payload, bool indexIsBool)
+{
+ Node& node = graph()[index];
+ VirtualRegister virtualRegister = node.virtualRegister();
+ store32(payload, payloadFor(virtualRegister));
+ if (!indexIsBool)
+ store32(TrustedImm32(JSValue::BooleanTag), tagFor(virtualRegister));
+}
+
+inline void JITCompiler::emitStoreDouble(NodeIndex index, FPRReg value)
+{
+ Node& node = graph()[index];
+ VirtualRegister virtualRegister = node.virtualRegister();
+ storeDouble(value, addressFor(virtualRegister));
+}
+
+inline void JITCompiler::emitStore(NodeIndex index, const JSValue constant)
+{
+ Node& node = graph()[index];
+ VirtualRegister virtualRegister = node.virtualRegister();
+ store32(Imm32(constant.payload()), payloadFor(virtualRegister));
+ store32(Imm32(constant.tag()), tagFor(virtualRegister));
+}
+
+} } // namespace JSC::DFG
+
+#endif // USE(JSVALUE32_64)
+#endif // ENABLE_DFG_JIT
+
+#endif
// a constant index, argument, or identifier) from a NodeIndex.
struct OpInfo {
explicit OpInfo(int value) : m_value(value) { }
+#if USE(JSVALUE64)
explicit OpInfo(unsigned value) : m_value(value) { }
+#endif
explicit OpInfo(uintptr_t value) : m_value(value) { }
explicit OpInfo(void* value) : m_value(reinterpret_cast<uintptr_t>(value)) { }
uintptr_t m_value;
#include "JSGlobalData.h"
#include "Operations.h"
+#if CPU(X86_64)
#define FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, register) \
asm( \
".globl _" STRINGIZE(function) "\n" \
"mov (%rsp), %" STRINGIZE(register) "\n" \
"jmp _" STRINGIZE(function) "WithReturnAddress" "\n" \
);
+#elif CPU(X86)
+#define FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, register) \
+ asm( \
+ ".globl " STRINGIZE(function) "\n" \
+ STRINGIZE(function) ":" "\n" \
+ "push (%esp)\n" \
+ "jmp " STRINGIZE(function) "WithReturnAddress" "\n" \
+ );
+#endif
#define FUNCTION_WRAPPER_WITH_ARG2_RETURN_ADDRESS(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, rsi)
#define FUNCTION_WRAPPER_WITH_ARG4_RETURN_ADDRESS(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, rcx)
#define FUNCTION_WRAPPER_WITH_ARG5_RETURN_ADDRESS(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, r8)
return JSValue::encode(baseValue.get(exec, *propertyName, slot));
}
+#if CPU(X86_64)
EncodedJSValue operationGetMethodOptimizeWithReturnAddress(ExecState*, EncodedJSValue, Identifier*, ReturnAddressPtr);
FUNCTION_WRAPPER_WITH_ARG4_RETURN_ADDRESS(operationGetMethodOptimize);
EncodedJSValue operationGetMethodOptimizeWithReturnAddress(ExecState* exec, EncodedJSValue encodedBase, Identifier* propertyName, ReturnAddressPtr returnAddress)
+#elif CPU(X86)
+FUNCTION_WRAPPER_WITH_ARG4_RETURN_ADDRESS(operationGetMethodOptimize);
+EncodedJSValue operationGetMethodOptimizeWithReturnAddress(ReturnAddressPtr returnAddress, ExecState* exec, EncodedJSValue encodedBase, Identifier* propertyName)
+#endif
{
JSValue baseValue = JSValue::decode(encodedBase);
PropertySlot slot(baseValue);
return JSValue::encode(result);
}
+#if CPU(X86_64)
EncodedJSValue operationGetByIdBuildListWithReturnAddress(ExecState*, EncodedJSValue, Identifier*, ReturnAddressPtr);
FUNCTION_WRAPPER_WITH_ARG4_RETURN_ADDRESS(operationGetByIdBuildList);
EncodedJSValue operationGetByIdBuildListWithReturnAddress(ExecState* exec, EncodedJSValue encodedBase, Identifier* propertyName, ReturnAddressPtr returnAddress)
+#elif CPU(X86)
+FUNCTION_WRAPPER_WITH_ARG4_RETURN_ADDRESS(operationGetByIdBuildList);
+EncodedJSValue operationGetByIdBuildListWithReturnAddress(ReturnAddressPtr returnAddress, ExecState* exec, EncodedJSValue encodedBase, Identifier* propertyName)
+#endif
{
JSValue baseValue = JSValue::decode(encodedBase);
PropertySlot slot(baseValue);
return JSValue::encode(result);
}
+#if CPU(X86_64)
EncodedJSValue operationGetByIdProtoBuildListWithReturnAddress(ExecState*, EncodedJSValue, Identifier*, ReturnAddressPtr);
FUNCTION_WRAPPER_WITH_ARG4_RETURN_ADDRESS(operationGetByIdProtoBuildList);
EncodedJSValue operationGetByIdProtoBuildListWithReturnAddress(ExecState* exec, EncodedJSValue encodedBase, Identifier* propertyName, ReturnAddressPtr returnAddress)
+#elif CPU(X86)
+FUNCTION_WRAPPER_WITH_ARG4_RETURN_ADDRESS(operationGetByIdProtoBuildList);
+EncodedJSValue operationGetByIdProtoBuildListWithReturnAddress(ReturnAddressPtr returnAddress, ExecState* exec, EncodedJSValue encodedBase, Identifier* propertyName)
+#endif
{
JSValue baseValue = JSValue::decode(encodedBase);
PropertySlot slot(baseValue);
return JSValue::encode(result);
}
+#if CPU(X86_64)
EncodedJSValue operationGetByIdOptimizeWithReturnAddress(ExecState*, EncodedJSValue, Identifier*, ReturnAddressPtr);
FUNCTION_WRAPPER_WITH_ARG4_RETURN_ADDRESS(operationGetByIdOptimize);
EncodedJSValue operationGetByIdOptimizeWithReturnAddress(ExecState* exec, EncodedJSValue encodedBase, Identifier* propertyName, ReturnAddressPtr returnAddress)
+#elif CPU(X86)
+FUNCTION_WRAPPER_WITH_ARG4_RETURN_ADDRESS(operationGetByIdOptimize);
+EncodedJSValue operationGetByIdOptimizeWithReturnAddress(ReturnAddressPtr returnAddress, ExecState* exec, EncodedJSValue encodedBase, Identifier* propertyName)
+#endif
{
JSValue baseValue = JSValue::decode(encodedBase);
PropertySlot slot(baseValue);
JSValue::decode(encodedBase).putDirect(exec, *propertyName, JSValue::decode(encodedValue), slot);
}
-void operationPutByIdStrictOptimizeWithReturnAddress(ExecState*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, Identifier* propertyName, ReturnAddressPtr);
+#if CPU(X86_64)
+void operationPutByIdStrictOptimizeWithReturnAddress(ExecState*, EncodedJSValue, EncodedJSValue, Identifier*, ReturnAddressPtr);
FUNCTION_WRAPPER_WITH_ARG5_RETURN_ADDRESS(operationPutByIdStrictOptimize);
void operationPutByIdStrictOptimizeWithReturnAddress(ExecState* exec, EncodedJSValue encodedValue, EncodedJSValue encodedBase, Identifier* propertyName, ReturnAddressPtr returnAddress)
+#elif CPU(X86)
+FUNCTION_WRAPPER_WITH_ARG5_RETURN_ADDRESS(operationPutByIdStrictOptimize);
+void operationPutByIdStrictOptimizeWithReturnAddress(ReturnAddressPtr returnAddress, ExecState* exec, EncodedJSValue encodedValue, EncodedJSValue encodedBase, Identifier* propertyName)
+#endif
{
JSValue value = JSValue::decode(encodedValue);
JSValue base = JSValue::decode(encodedBase);
stubInfo.seen = true;
}
-void operationPutByIdNonStrictOptimizeWithReturnAddress(ExecState*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, Identifier* propertyName, ReturnAddressPtr);
+#if CPU(X86_64)
+void operationPutByIdNonStrictOptimizeWithReturnAddress(ExecState*, EncodedJSValue, EncodedJSValue, Identifier*, ReturnAddressPtr);
FUNCTION_WRAPPER_WITH_ARG5_RETURN_ADDRESS(operationPutByIdNonStrictOptimize);
void operationPutByIdNonStrictOptimizeWithReturnAddress(ExecState* exec, EncodedJSValue encodedValue, EncodedJSValue encodedBase, Identifier* propertyName, ReturnAddressPtr returnAddress)
+#elif CPU(X86)
+FUNCTION_WRAPPER_WITH_ARG5_RETURN_ADDRESS(operationPutByIdNonStrictOptimize);
+void operationPutByIdNonStrictOptimizeWithReturnAddress(ReturnAddressPtr returnAddress, ExecState* exec, EncodedJSValue encodedValue, EncodedJSValue encodedBase, Identifier* propertyName)
+#endif
{
JSValue value = JSValue::decode(encodedValue);
JSValue base = JSValue::decode(encodedBase);
stubInfo.seen = true;
}
-void operationPutByIdDirectStrictOptimizeWithReturnAddress(ExecState*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, Identifier* propertyName, ReturnAddressPtr);
+#if CPU(X86_64)
+void operationPutByIdDirectStrictOptimizeWithReturnAddress(ExecState*, EncodedJSValue, EncodedJSValue, Identifier*, ReturnAddressPtr);
FUNCTION_WRAPPER_WITH_ARG5_RETURN_ADDRESS(operationPutByIdDirectStrictOptimize);
void operationPutByIdDirectStrictOptimizeWithReturnAddress(ExecState* exec, EncodedJSValue encodedValue, EncodedJSValue encodedBase, Identifier* propertyName, ReturnAddressPtr returnAddress)
+#elif CPU(X86)
+FUNCTION_WRAPPER_WITH_ARG5_RETURN_ADDRESS(operationPutByIdDirectStrictOptimize);
+void operationPutByIdDirectStrictOptimizeWithReturnAddress(ReturnAddressPtr returnAddress, ExecState* exec, EncodedJSValue encodedValue, EncodedJSValue encodedBase, Identifier* propertyName)
+#endif
{
JSValue value = JSValue::decode(encodedValue);
JSValue base = JSValue::decode(encodedBase);
stubInfo.seen = true;
}
-void operationPutByIdDirectNonStrictOptimizeWithReturnAddress(ExecState*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, Identifier* propertyName, ReturnAddressPtr);
+#if CPU(X86_64)
+void operationPutByIdDirectNonStrictOptimizeWithReturnAddress(ExecState*, EncodedJSValue, EncodedJSValue, Identifier*, ReturnAddressPtr);
FUNCTION_WRAPPER_WITH_ARG5_RETURN_ADDRESS(operationPutByIdDirectNonStrictOptimize);
void operationPutByIdDirectNonStrictOptimizeWithReturnAddress(ExecState* exec, EncodedJSValue encodedValue, EncodedJSValue encodedBase, Identifier* propertyName, ReturnAddressPtr returnAddress)
+#elif CPU(X86)
+FUNCTION_WRAPPER_WITH_ARG5_RETURN_ADDRESS(operationPutByIdDirectNonStrictOptimize);
+void operationPutByIdDirectNonStrictOptimizeWithReturnAddress(ReturnAddressPtr returnAddress, ExecState* exec, EncodedJSValue encodedValue, EncodedJSValue encodedBase, Identifier* propertyName)
+#endif
{
JSValue value = JSValue::decode(encodedValue);
JSValue base = JSValue::decode(encodedBase);
EncodedJSValue getHostCallReturnValue();
EncodedJSValue getHostCallReturnValueWithExecState(ExecState*);
+#if CPU(X86_64)
asm (
".globl _" STRINGIZE(getHostCallReturnValue) "\n"
"_" STRINGIZE(getHostCallReturnValue) ":" "\n"
"mov %r13, %rdi\n"
"jmp _" STRINGIZE(getHostCallReturnValueWithExecState) "\n"
);
+#elif CPU(X86)
+asm (
+".globl " STRINGIZE(getHostCallReturnValue) "\n"
+STRINGIZE(getHostCallReturnValue) ":" "\n"
+ "mov -40(%edi), %edi\n"
+ "push %edi\n"
+ "jmp " STRINGIZE(getHostCallReturnValueWithExecState) "\n"
+);
+#endif
EncodedJSValue getHostCallReturnValueWithExecState(ExecState* exec)
{
return codePtr.executableAddress();
}
+#if CPU(X86_64)
void* operationLinkCallWithReturnAddress(ExecState*, ReturnAddressPtr);
FUNCTION_WRAPPER_WITH_ARG2_RETURN_ADDRESS(operationLinkCall);
void* operationLinkCallWithReturnAddress(ExecState* execCallee, ReturnAddressPtr returnAddress)
+#elif CPU(X86)
+FUNCTION_WRAPPER_WITH_ARG2_RETURN_ADDRESS(operationLinkCall);
+void* operationLinkCallWithReturnAddress(ReturnAddressPtr returnAddress, ExecState* execCallee)
+#endif
{
return linkFor(execCallee, returnAddress, CodeForCall);
}
+#if CPU(X86_64)
void* operationLinkConstructWithReturnAddress(ExecState*, ReturnAddressPtr);
FUNCTION_WRAPPER_WITH_ARG2_RETURN_ADDRESS(operationLinkConstruct);
void* operationLinkConstructWithReturnAddress(ExecState* execCallee, ReturnAddressPtr returnAddress)
+#elif CPU(X86)
+FUNCTION_WRAPPER_WITH_ARG2_RETURN_ADDRESS(operationLinkConstruct);
+void* operationLinkConstructWithReturnAddress(ReturnAddressPtr returnAddress, ExecState* execCallee)
+#endif
{
return linkFor(execCallee, returnAddress, CodeForConstruct);
}
MacroAssembler stubJit;
GPRReg baseGPR = static_cast<GPRReg>(stubInfo.baseGPR);
+#if USE(JSVALUE32_64)
+ GPRReg resultTagGPR = static_cast<GPRReg>(stubInfo.valueTagGPR);
+#endif
GPRReg resultGPR = static_cast<GPRReg>(stubInfo.valueGPR);
GPRReg scratchGPR = static_cast<GPRReg>(stubInfo.scratchGPR);
bool needToRestoreScratch = false;
}
stubJit.loadPtr(protoObject->addressOfPropertyStorage(), resultGPR);
+#if USE(JSVALUE64)
stubJit.loadPtr(MacroAssembler::Address(resultGPR, offset * sizeof(WriteBarrier<Unknown>)), resultGPR);
+#elif USE(JSVALUE32_64)
+ stubJit.load32(MacroAssembler::Address(resultGPR, offset * sizeof(WriteBarrier<Unknown>) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
+ stubJit.load32(MacroAssembler::Address(resultGPR, offset * sizeof(WriteBarrier<Unknown>) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR);
+#endif
MacroAssembler::Jump success, fail;
if (isJSArray(globalData, baseValue) && propertyName == exec->propertyNames().length) {
GPRReg baseGPR = static_cast<GPRReg>(stubInfo.baseGPR);
+#if USE(JSVALUE32_64)
+ GPRReg resultTagGPR = static_cast<GPRReg>(stubInfo.valueTagGPR);
+#endif
GPRReg resultGPR = static_cast<GPRReg>(stubInfo.valueGPR);
GPRReg scratchGPR = static_cast<GPRReg>(stubInfo.scratchGPR);
bool needToRestoreScratch = false;
stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSArray::storageOffset()), scratchGPR);
stubJit.load32(MacroAssembler::Address(scratchGPR, OBJECT_OFFSETOF(ArrayStorage, m_length)), scratchGPR);
failureCases.append(stubJit.branch32(MacroAssembler::LessThan, scratchGPR, MacroAssembler::TrustedImm32(0)));
-
+
+#if USE(JSVALUE64)
stubJit.orPtr(GPRInfo::tagTypeNumberRegister, scratchGPR, resultGPR);
+#elif USE(JSVALUE32_64)
+ stubJit.move(scratchGPR, resultGPR);
+ stubJit.move(JITCompiler::TrustedImm32(0xffffffff), resultTagGPR); // JSValue::Int32Tag
+#endif
MacroAssembler::Jump success, fail;
stubInfo.u.getByIdSelfList.listSize++;
GPRReg baseGPR = static_cast<GPRReg>(stubInfo.baseGPR);
+#if USE(JSVALUE32_64)
+ GPRReg resultTagGPR = static_cast<GPRReg>(stubInfo.valueTagGPR);
+#endif
GPRReg resultGPR = static_cast<GPRReg>(stubInfo.valueGPR);
MacroAssembler stubJit;
MacroAssembler::Jump wrongStruct = stubJit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::structureOffset()), MacroAssembler::TrustedImmPtr(structure));
stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::offsetOfPropertyStorage()), resultGPR);
+#if USE(JSVALUE64)
stubJit.loadPtr(MacroAssembler::Address(resultGPR, slot.cachedOffset() * sizeof(JSValue)), resultGPR);
+#elif USE(JSVALUE32_64)
+ stubJit.load32(MacroAssembler::Address(resultGPR, slot.cachedOffset() * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
+ stubJit.load32(MacroAssembler::Address(resultGPR, slot.cachedOffset() * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR);
+#endif
MacroAssembler::Jump success = stubJit.jump();
StructureChain* prototypeChain = structure->prototypeChain(exec);
GPRReg baseGPR = static_cast<GPRReg>(stubInfo.baseGPR);
+#if USE(JSVALUE32_64)
+ GPRReg valueTagGPR = static_cast<GPRReg>(stubInfo.valueTagGPR);
+#endif
GPRReg valueGPR = static_cast<GPRReg>(stubInfo.valueGPR);
GPRReg scratchGPR = static_cast<GPRReg>(stubInfo.scratchGPR);
bool needToRestoreScratch = false;
#endif
stubJit.storePtr(MacroAssembler::TrustedImmPtr(structure), MacroAssembler::Address(baseGPR, JSCell::structureOffset()));
+#if USE(JSVALUE64)
if (structure->isUsingInlineStorage())
stubJit.storePtr(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + slot.cachedOffset() * sizeof(JSValue)));
else {
stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::offsetOfPropertyStorage()), scratchGPR);
stubJit.storePtr(valueGPR, MacroAssembler::Address(scratchGPR, slot.cachedOffset() * sizeof(JSValue)));
}
+#elif USE(JSVALUE32_64)
+ if (structure->isUsingInlineStorage()) {
+ stubJit.store32(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + slot.cachedOffset() * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
+ stubJit.store32(valueTagGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + slot.cachedOffset() * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
+ } else {
+ stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::offsetOfPropertyStorage()), scratchGPR);
+ stubJit.store32(valueGPR, MacroAssembler::Address(scratchGPR, slot.cachedOffset() * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
+ stubJit.store32(valueTagGPR, MacroAssembler::Address(scratchGPR, slot.cachedOffset() * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
+ }
+#endif
MacroAssembler::Jump success;
MacroAssembler::Jump failure;
#include "DFGSpeculativeJIT.h"
#if ENABLE(DFG_JIT)
+#if USE(JSVALUE64)
namespace JSC { namespace DFG {
} } // namespace JSC::DFG
#endif
+#endif
// It's in a register.
InGPR,
UnboxedInt32InGPR,
+#if USE(JSVALUE32_64)
+ InPair,
+#endif
InFPR,
// It's in the register file, but at a different location.
DisplacedInRegisterFile,
static ValueRecovery inGPR(GPRReg gpr, DataFormat dataFormat)
{
ASSERT(dataFormat != DataFormatNone);
+#if USE(JSVALUE32_64)
+ ASSERT(dataFormat == DataFormatInteger || dataFormat == DataFormatCell);
+#endif
ValueRecovery result;
if (dataFormat == DataFormatInteger)
result.m_technique = UnboxedInt32InGPR;
return result;
}
+#if USE(JSVALUE32_64)
+ static ValueRecovery inPair(GPRReg tagGPR, GPRReg payloadGPR)
+ {
+ ValueRecovery result;
+ result.m_technique = InPair;
+ result.m_source.pair.tagGPR = tagGPR;
+ result.m_source.pair.payloadGPR = payloadGPR;
+ return result;
+ }
+#endif
+
static ValueRecovery inFPR(FPRReg fpr)
{
ValueRecovery result;
return m_source.gpr;
}
+#if USE(JSVALUE32_64)
+ GPRReg tagGPR() const
+ {
+ ASSERT(m_technique == InPair);
+ return m_source.pair.tagGPR;
+ }
+
+ GPRReg payloadGPR() const
+ {
+ ASSERT(m_technique == InPair);
+ return m_source.pair.payloadGPR;
+ }
+#endif
+
FPRReg fpr() const
{
ASSERT(m_technique == InFPR);
union {
GPRReg gpr;
FPRReg fpr;
+#if USE(JSVALUE32_64)
+ struct {
+ GPRReg tagGPR;
+ GPRReg payloadGPR;
+ } pair;
+#endif
VirtualRegister virtualReg;
EncodedJSValue constant;
} m_source;
void compilePeepHoleDoubleBranch(Node&, NodeIndex branchNodeIndex, JITCompiler::DoubleCondition);
void compilePeepHoleObjectEquality(Node&, NodeIndex branchNodeIndex, void* vptr);
void compileObjectEquality(Node&, void* vptr);
-
+
+#if USE(JSVALUE64)
JITCompiler::Jump convertToDouble(GPRReg value, FPRReg result, GPRReg tmp);
+#elif USE(JSVALUE32_64)
+ JITCompiler::Jump convertToDouble(JSValueOperand&, FPRReg result);
+#endif
// Add a speculation check without additional recovery.
void speculationCheck(MacroAssembler::Jump jumpToFail)
--- /dev/null
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DFGSpeculativeJIT.h"
+
+#if ENABLE(DFG_JIT)
+#if USE(JSVALUE32_64)
+#include "DFGJITCompilerInlineMethods.h"
+
+namespace JSC { namespace DFG {
+
+template<bool strict>
+GPRReg SpeculativeJIT::fillSpeculateIntInternal(NodeIndex nodeIndex, DataFormat& returnFormat)
+{
+#if ENABLE(DFG_DEBUG_VERBOSE)
+ fprintf(stderr, "SpecInt@%d ", nodeIndex);
+#endif
+ Node& node = m_jit.graph()[nodeIndex];
+ VirtualRegister virtualRegister = node.virtualRegister();
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+
+ switch (info.registerFormat()) {
+ case DataFormatNone: {
+ GPRReg gpr = allocate();
+
+ if (node.hasConstant()) {
+ m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
+ if (isInt32Constant(nodeIndex)) {
+ m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex)), gpr);
+ info.fillInteger(gpr);
+ returnFormat = DataFormatInteger;
+ return gpr;
+ }
+ terminateSpeculativeExecution();
+ returnFormat = DataFormatInteger;
+ return allocate();
+ }
+
+ ASSERT(info.spillFormat() & DataFormatJS);
+
+ m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
+
+ // If we know this was spilled as an integer we can fill without checking.
+ // FIXME: Currently we always assume strict integers.
+ m_jit.load32(JITCompiler::payloadFor(virtualRegister), gpr);
+ info.fillInteger(gpr);
+ returnFormat = DataFormatInteger;
+ return gpr;
+ }
+
+ case DataFormatJSInteger:
+ case DataFormatJS: {
+ // Check the value is an integer.
+ GPRReg tagGPR = info.tagGPR();
+ GPRReg payloadGPR = info.payloadGPR();
+ m_gprs.lock(tagGPR);
+ m_gprs.lock(payloadGPR);
+ if (info.registerFormat() != DataFormatJSInteger)
+ speculationCheck(m_jit.branch32(MacroAssembler::NotEqual, tagGPR, TrustedImm32(JSValue::Int32Tag)));
+ m_gprs.unlock(tagGPR);
+ m_gprs.release(tagGPR);
+ info.fillInteger(payloadGPR);
+ // If !strict we're done, return.
+ returnFormat = DataFormatInteger;
+ return payloadGPR;
+ }
+
+ case DataFormatInteger: {
+ GPRReg gpr = info.gpr();
+ m_gprs.lock(gpr);
+ returnFormat = DataFormatInteger;
+ return gpr;
+ }
+
+ case DataFormatDouble:
+ case DataFormatCell:
+ case DataFormatBoolean:
+ case DataFormatJSDouble:
+ case DataFormatJSCell:
+ case DataFormatJSBoolean: {
+ terminateSpeculativeExecution();
+ returnFormat = DataFormatInteger;
+ return allocate();
+ }
+
+ case DataFormatStorage:
+ ASSERT_NOT_REACHED();
+ }
+
+ ASSERT_NOT_REACHED();
+ return InvalidGPRReg;
+}
+
+#ifndef NDEBUG
+void ValueSource::dump(FILE* out) const
+{
+ fprintf(out, "Node(%d)", m_nodeIndex);
+}
+
+void ValueRecovery::dump(FILE* out) const
+{
+ switch (technique()) {
+ case AlreadyInRegisterFile:
+ fprintf(out, "-");
+ break;
+ case InGPR:
+ fprintf(out, "%%%s", GPRInfo::debugName(gpr()));
+ break;
+ case UnboxedInt32InGPR:
+ fprintf(out, "int32(%%%s)", GPRInfo::debugName(gpr()));
+ break;
+ case InFPR:
+ fprintf(out, "%%%s", FPRInfo::debugName(fpr()));
+ break;
+#if USE(JSVALUE32_64)
+ case InPair:
+ fprintf(out, "pair(%%%s, %%%s)", GPRInfo::debugName(tagGPR()), GPRInfo::debugName(payloadGPR()));
+ break;
+#endif
+ case DisplacedInRegisterFile:
+ fprintf(out, "*%d", virtualRegister());
+ break;
+ case Constant:
+ fprintf(out, "[%s]", constant().description());
+ break;
+ case DontKnow:
+ fprintf(out, "!");
+ break;
+ default:
+ fprintf(out, "?%d", technique());
+ break;
+ }
+}
+#endif
+
+OSRExit::OSRExit(MacroAssembler::Jump check, SpeculativeJIT* jit, unsigned recoveryIndex)
+ : m_check(check)
+ , m_nodeIndex(jit->m_compileIndex)
+ , m_bytecodeIndex(jit->m_bytecodeIndexForOSR)
+ , m_recoveryIndex(recoveryIndex)
+ , m_arguments(jit->m_arguments.size())
+ , m_variables(jit->m_variables.size())
+ , m_lastSetOperand(jit->m_lastSetOperand)
+{
+ ASSERT(m_bytecodeIndex != std::numeric_limits<uint32_t>::max());
+ for (unsigned argument = 0; argument < m_arguments.size(); ++argument)
+ m_arguments[argument] = jit->computeValueRecoveryFor(jit->m_arguments[argument]);
+ for (unsigned variable = 0; variable < m_variables.size(); ++variable)
+ m_variables[variable] = jit->computeValueRecoveryFor(jit->m_variables[variable]);
+}
+
+#ifndef NDEBUG
+void OSRExit::dump(FILE* out) const
+{
+ for (unsigned argument = 0; argument < m_arguments.size(); ++argument)
+ m_arguments[argument].dump(out);
+ fprintf(out, " : ");
+ for (unsigned variable = 0; variable < m_variables.size(); ++variable)
+ m_variables[variable].dump(out);
+}
+#endif
+
+GPRReg SpeculativeJIT::fillSpeculateInt(NodeIndex nodeIndex, DataFormat& returnFormat)
+{
+ return fillSpeculateIntInternal<false>(nodeIndex, returnFormat);
+}
+
+GPRReg SpeculativeJIT::fillSpeculateIntStrict(NodeIndex nodeIndex)
+{
+ DataFormat mustBeDataFormatInteger;
+ GPRReg result = fillSpeculateIntInternal<true>(nodeIndex, mustBeDataFormatInteger);
+ ASSERT(mustBeDataFormatInteger == DataFormatInteger);
+ return result;
+}
+
+FPRReg SpeculativeJIT::fillSpeculateDouble(NodeIndex nodeIndex)
+{
+#if ENABLE(DFG_DEBUG_VERBOSE)
+ fprintf(stderr, "SpecDouble@%d ", nodeIndex);
+#endif
+ Node& node = m_jit.graph()[nodeIndex];
+ VirtualRegister virtualRegister = node.virtualRegister();
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+
+ if (info.registerFormat() == DataFormatNone) {
+
+ if (node.hasConstant()) {
+ if (isInt32Constant(nodeIndex)) {
+ GPRReg gpr = allocate();
+ m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex)), gpr);
+ m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
+ info.fillInteger(gpr);
+ unlock(gpr);
+ } else if (isNumberConstant(nodeIndex)) {
+ FPRReg fpr = fprAllocate();
+ m_jit.loadDouble(addressOfDoubleConstant(nodeIndex), fpr);
+ m_fprs.retain(fpr, virtualRegister, SpillOrderDouble);
+ info.fillDouble(fpr);
+ return fpr;
+ } else {
+ terminateSpeculativeExecution();
+ return fprAllocate();
+ }
+ } else {
+ DataFormat spillFormat = info.spillFormat();
+ ASSERT(spillFormat & DataFormatJS);
+ if (spillFormat == DataFormatJSDouble) {
+ FPRReg fpr = fprAllocate();
+ m_jit.loadDouble(JITCompiler::addressFor(virtualRegister), fpr);
+ m_fprs.retain(fpr, virtualRegister, SpillOrderSpilled);
+ info.fillDouble(fpr);
+ return fpr;
+ }
+ GPRReg tag = allocate();
+ GPRReg payload = allocate();
+ m_jit.emitLoad(nodeIndex, tag, payload);
+ m_gprs.retain(tag, virtualRegister, SpillOrderSpilled);
+ m_gprs.retain(payload, virtualRegister, SpillOrderSpilled);
+ info.fillJSValue(tag, payload, spillFormat);
+ unlock(tag);
+ unlock(payload);
+ }
+ }
+
+ switch (info.registerFormat()) {
+ case DataFormatNone:
+ case DataFormatCell:
+ case DataFormatBoolean:
+ case DataFormatStorage:
+ // Should have filled, above.
+ ASSERT_NOT_REACHED();
+
+ case DataFormatJSCell:
+ case DataFormatJS:
+ case DataFormatJSInteger:
+ case DataFormatJSBoolean: {
+ GPRReg tagGPR = info.tagGPR();
+ GPRReg payloadGPR = info.payloadGPR();
+ FPRReg fpr = fprAllocate();
+
+ m_gprs.lock(tagGPR);
+ m_gprs.lock(payloadGPR);
+
+ JITCompiler::Jump hasUnboxedDouble;
+
+ if (info.registerFormat() != DataFormatJSInteger) {
+ JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::NotEqual, tagGPR, TrustedImm32(JSValue::Int32Tag));
+ speculationCheck(m_jit.branch32(MacroAssembler::AboveOrEqual, tagGPR, TrustedImm32(JSValue::LowestTag)));
+ unboxDouble(tagGPR, payloadGPR, fpr, virtualRegister);
+ hasUnboxedDouble = m_jit.jump();
+ isInteger.link(&m_jit);
+ }
+
+ m_jit.convertInt32ToDouble(payloadGPR, fpr);
+
+ if (info.registerFormat() != DataFormatJSInteger)
+ hasUnboxedDouble.link(&m_jit);
+
+ m_gprs.release(tagGPR);
+ m_gprs.release(payloadGPR);
+ m_gprs.unlock(tagGPR);
+ m_gprs.unlock(payloadGPR);
+ m_fprs.retain(fpr, virtualRegister, SpillOrderDouble);
+ info.fillDouble(fpr);
+ info.killSpilled();
+ return fpr;
+ }
+
+ case DataFormatInteger: {
+ FPRReg fpr = fprAllocate();
+ GPRReg gpr = info.gpr();
+ m_gprs.lock(gpr);
+ m_jit.convertInt32ToDouble(gpr, fpr);
+ m_gprs.unlock(gpr);
+ return fpr;
+ }
+
+ case DataFormatJSDouble:
+ case DataFormatDouble: {
+ FPRReg fpr = info.fpr();
+ m_fprs.lock(fpr);
+ return fpr;
+ }
+ }
+
+ ASSERT_NOT_REACHED();
+ return InvalidFPRReg;
+}
+
+GPRReg SpeculativeJIT::fillSpeculateCell(NodeIndex nodeIndex)
+{
+#if ENABLE(DFG_DEBUG_VERBOSE)
+ fprintf(stderr, "SpecCell@%d ", nodeIndex);
+#endif
+ Node& node = m_jit.graph()[nodeIndex];
+ VirtualRegister virtualRegister = node.virtualRegister();
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+
+ switch (info.registerFormat()) {
+ case DataFormatNone: {
+
+ GPRReg gpr = allocate();
+ if (node.hasConstant()) {
+ JSValue jsValue = valueOfJSConstant(nodeIndex);
+ if (jsValue.isCell()) {
+ m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
+ m_jit.move(MacroAssembler::TrustedImmPtr(jsValue.asCell()), gpr);
+ info.fillCell(gpr);
+ return gpr;
+ }
+ terminateSpeculativeExecution();
+ return gpr;
+ }
+ ASSERT(info.spillFormat() & DataFormatJS);
+ m_jit.load32(JITCompiler::tagFor(virtualRegister), gpr);
+ if (info.spillFormat() != DataFormatJSCell)
+ speculationCheck(m_jit.branch32(MacroAssembler::NotEqual, gpr, TrustedImm32(JSValue::CellTag)));
+ m_jit.load32(JITCompiler::payloadFor(virtualRegister), gpr);
+ m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
+ info.fillCell(gpr);
+ return gpr;
+ }
+
+ case DataFormatCell: {
+ GPRReg gpr = info.gpr();
+ m_gprs.lock(gpr);
+ return gpr;
+ }
+
+ case DataFormatJSCell:
+ case DataFormatJS: {
+ GPRReg tagGPR = info.tagGPR();
+ GPRReg payloadGPR = info.payloadGPR();
+ m_gprs.lock(tagGPR);
+ m_gprs.lock(payloadGPR);
+ if (info.spillFormat() != DataFormatJSCell)
+ speculationCheck(m_jit.branch32(MacroAssembler::NotEqual, tagGPR, TrustedImm32(JSValue::CellTag)));
+ m_gprs.unlock(tagGPR);
+ m_gprs.release(tagGPR);
+ m_gprs.release(payloadGPR);
+ m_gprs.retain(payloadGPR, virtualRegister, SpillOrderCell);
+ info.fillCell(payloadGPR);
+ return payloadGPR;
+ }
+
+ case DataFormatJSInteger:
+ case DataFormatInteger:
+ case DataFormatJSDouble:
+ case DataFormatDouble:
+ case DataFormatJSBoolean:
+ case DataFormatBoolean: {
+ terminateSpeculativeExecution();
+ return allocate();
+ }
+
+ case DataFormatStorage:
+ ASSERT_NOT_REACHED();
+ }
+
+ ASSERT_NOT_REACHED();
+ return InvalidGPRReg;
+}
+
+GPRReg SpeculativeJIT::fillSpeculateBoolean(NodeIndex nodeIndex)
+{
+#if ENABLE(DFG_DEBUG_VERBOSE)
+ fprintf(stderr, "SpecBool@%d ", nodeIndex);
+#endif
+ return InvalidGPRReg;
+}
+
+void SpeculativeJIT::compilePeepHoleIntegerBranch(Node& node, NodeIndex branchNodeIndex, JITCompiler::RelationalCondition condition)
+{
+ Node& branchNode = m_jit.graph()[branchNodeIndex];
+ BlockIndex taken = m_jit.graph().blockIndexForBytecodeOffset(branchNode.takenBytecodeOffset());
+ BlockIndex notTaken = m_jit.graph().blockIndexForBytecodeOffset(branchNode.notTakenBytecodeOffset());
+
+ // The branch instruction will branch to the taken block.
+ // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
+ if (taken == (m_block + 1)) {
+ condition = JITCompiler::invert(condition);
+ BlockIndex tmp = taken;
+ taken = notTaken;
+ notTaken = tmp;
+ }
+
+ if (isInt32Constant(node.child1())) {
+ int32_t imm = valueOfInt32Constant(node.child1());
+ SpeculateIntegerOperand op2(this, node.child2());
+ addBranch(m_jit.branch32(condition, JITCompiler::Imm32(imm), op2.gpr()), taken);
+ } else if (isInt32Constant(node.child2())) {
+ SpeculateIntegerOperand op1(this, node.child1());
+ int32_t imm = valueOfInt32Constant(node.child2());
+ addBranch(m_jit.branch32(condition, op1.gpr(), JITCompiler::Imm32(imm)), taken);
+ } else {
+ SpeculateIntegerOperand op1(this, node.child1());
+ SpeculateIntegerOperand op2(this, node.child2());
+ addBranch(m_jit.branch32(condition, op1.gpr(), op2.gpr()), taken);
+ }
+
+ // Check for fall through, otherwise we need to jump.
+ if (notTaken != (m_block + 1))
+ addBranch(m_jit.jump(), notTaken);
+}
+
+JITCompiler::Jump SpeculativeJIT::convertToDouble(JSValueOperand& op, FPRReg result)
+{
+ JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, op.tagGPR(), TrustedImm32(JSValue::Int32Tag));
+ JITCompiler::Jump notNumber = m_jit.branch32(MacroAssembler::AboveOrEqual, op.payloadGPR(), TrustedImm32(JSValue::LowestTag));
+
+ unboxDouble(op.tagGPR(), op.payloadGPR(), result, m_jit.graph()[op.index()].virtualRegister());
+ JITCompiler::Jump done = m_jit.jump();
+
+ isInteger.link(&m_jit);
+ m_jit.convertInt32ToDouble(op.payloadGPR(), result);
+
+ done.link(&m_jit);
+
+ return notNumber;
+}
+
+void SpeculativeJIT::compilePeepHoleDoubleBranch(Node& node, NodeIndex branchNodeIndex, JITCompiler::DoubleCondition condition)
+{
+ Node& branchNode = m_jit.graph()[branchNodeIndex];
+ BlockIndex taken = m_jit.graph().blockIndexForBytecodeOffset(branchNode.takenBytecodeOffset());
+ BlockIndex notTaken = m_jit.graph().blockIndexForBytecodeOffset(branchNode.notTakenBytecodeOffset());
+
+ SpeculateDoubleOperand op1(this, node.child1());
+ SpeculateDoubleOperand op2(this, node.child2());
+
+ addBranch(m_jit.branchDouble(condition, op1.fpr(), op2.fpr()), taken);
+
+ if (notTaken != (m_block + 1))
+ addBranch(m_jit.jump(), notTaken);
+}
+
+void SpeculativeJIT::compilePeepHoleObjectEquality(Node& node, NodeIndex branchNodeIndex, void* vptr)
+{
+ Node& branchNode = m_jit.graph()[branchNodeIndex];
+ BlockIndex taken = m_jit.graph().blockIndexForBytecodeOffset(branchNode.takenBytecodeOffset());
+ BlockIndex notTaken = m_jit.graph().blockIndexForBytecodeOffset(branchNode.notTakenBytecodeOffset());
+
+ MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
+
+ if (taken == (m_block + 1)) {
+ condition = MacroAssembler::NotEqual;
+ BlockIndex tmp = taken;
+ taken = notTaken;
+ notTaken = tmp;
+ }
+
+ SpeculateCellOperand op1(this, node.child1());
+ SpeculateCellOperand op2(this, node.child2());
+
+ GPRReg op1GPR = op1.gpr();
+ GPRReg op2GPR = op2.gpr();
+
+ speculationCheck(m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(op1GPR), MacroAssembler::TrustedImmPtr(vptr)));
+ speculationCheck(m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(op2GPR), MacroAssembler::TrustedImmPtr(vptr)));
+
+ addBranch(m_jit.branchPtr(condition, op1GPR, op2GPR), taken);
+ if (notTaken != (m_block + 1))
+ addBranch(m_jit.jump(), notTaken);
+}
+
+void SpeculativeJIT::compileObjectEquality(Node& node, void* vptr)
+{
+ SpeculateCellOperand op1(this, node.child1());
+ SpeculateCellOperand op2(this, node.child2());
+ GPRTemporary resultTag(this, op1);
+ GPRTemporary resultPayload(this, op1);
+
+ GPRReg op1GPR = op1.gpr();
+ GPRReg op2GPR = op2.gpr();
+ GPRReg resultTagGPR = resultTag.gpr();
+ GPRReg resultPayloadGPR = resultPayload.gpr();
+
+ speculationCheck(m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(op1GPR), MacroAssembler::TrustedImmPtr(vptr)));
+ speculationCheck(m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(op2GPR), MacroAssembler::TrustedImmPtr(vptr)));
+
+ MacroAssembler::Jump falseCase = m_jit.branchPtr(MacroAssembler::NotEqual, op1GPR, op2GPR);
+ m_jit.move(Imm32(1), resultPayloadGPR);
+ MacroAssembler::Jump done = m_jit.jump();
+ falseCase.link(&m_jit);
+ m_jit.move(Imm32(0), resultPayloadGPR);
+ done.link(&m_jit);
+
+ m_jit.move(Imm32(JSValue::BooleanTag), resultTagGPR);
+ jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, DataFormatJSBoolean);
+}
+
+// Returns true if the compare is fused with a subsequent branch.
+bool SpeculativeJIT::compare(Node& node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, Z_DFGOperation_EJJ operation)
+{
+ // Fused compare & branch.
+ NodeIndex branchNodeIndex = detectPeepHoleBranch();
+ if (branchNodeIndex != NoNode) {
+ // detectPeepHoleBranch currently only permits the branch to be the very next node,
+ // so can be no intervening nodes to also reference the compare.
+ ASSERT(node.adjustedRefCount() == 1);
+
+ if (shouldSpeculateInteger(node.child1(), node.child2())) {
+ compilePeepHoleIntegerBranch(node, branchNodeIndex, condition);
+ use(node.child1());
+ use(node.child2());
+ } else if (shouldSpeculateNumber(node.child1(), node.child2())) {
+ compilePeepHoleDoubleBranch(node, branchNodeIndex, doubleCondition);
+ use(node.child1());
+ use(node.child2());
+ } else if (node.op == CompareEq && shouldSpeculateFinalObject(node.child1(), node.child2())) {
+ compilePeepHoleObjectEquality(node, branchNodeIndex, m_jit.globalData()->jsFinalObjectVPtr);
+ use(node.child1());
+ use(node.child2());
+ } else if (node.op == CompareEq && shouldSpeculateArray(node.child1(), node.child2())) {
+ compilePeepHoleObjectEquality(node, branchNodeIndex, m_jit.globalData()->jsArrayVPtr);
+ // } else if (isKnownNumeric(node.child1()) || isKnownNumeric(node.child2())) {
+ // compilePeepHoleDoubleBranch(node, branchNodeIndex, doubleCondition, operation);
+ use(node.child1());
+ use(node.child2());
+ } else
+ nonSpeculativePeepholeBranch(node, branchNodeIndex, condition, operation);
+
+ m_compileIndex = branchNodeIndex;
+ return true;
+ }
+
+ // if (isKnownNotInteger(node.child1()) || isKnownNotInteger(node.child2()))
+ if (shouldSpeculateFinalObject(node.child1(), node.child2()))
+ compileObjectEquality(node, m_jit.globalData()->jsFinalObjectVPtr);
+ else if (shouldSpeculateArray(node.child1(), node.child2()))
+ compileObjectEquality(node, m_jit.globalData()->jsArrayVPtr);
+ else if (!shouldSpeculateNumber(node.child1()) && !shouldSpeculateNumber(node.child2()))
+ nonSpeculativeNonPeepholeCompare(node, condition, operation);
+ else if ((shouldSpeculateNumber(node.child1()) || shouldSpeculateNumber(node.child2())) && !shouldSpeculateInteger(node.child1(), node.child2())) {
+ // Normal case, not fused to branch.
+ SpeculateDoubleOperand op1(this, node.child1());
+ SpeculateDoubleOperand op2(this, node.child2());
+ GPRTemporary resultTag(this);
+ GPRTemporary resultPayload(this);
+
+ m_jit.move(Imm32(1), resultPayload.gpr());
+ MacroAssembler::Jump trueCase = m_jit.branchDouble(doubleCondition, op1.fpr(), op2.fpr());
+ m_jit.move(Imm32(0), resultPayload.gpr());
+ trueCase.link(&m_jit);
+
+ m_jit.move(TrustedImm32(JSValue::BooleanTag), resultTag.gpr());
+ jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex, DataFormatJSBoolean);
+ } else {
+ // Normal case, not fused to branch.
+ SpeculateIntegerOperand op1(this, node.child1());
+ SpeculateIntegerOperand op2(this, node.child2());
+ GPRTemporary resultTag(this, op1, op2);
+ GPRTemporary resultPayload(this);
+
+ m_jit.compare32(condition, op1.gpr(), op2.gpr(), resultPayload.gpr());
+
+ // If we add a DataFormatBool, we should use it here.
+ m_jit.move(TrustedImm32(JSValue::BooleanTag), resultTag.gpr());
+ jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex, DataFormatJSBoolean);
+ }
+
+ return false;
+}
+
+void SpeculativeJIT::compile(Node& node)
+{
+ NodeType op = node.op;
+
+ switch (op) {
+ case JSConstant:
+ initConstantInfo(m_compileIndex);
+ break;
+
+ case GetLocal: {
+ GPRTemporary result(this);
+ PredictedType prediction = m_jit.graph().getPrediction(node.local());
+ VirtualRegister virtualRegister = node.virtualRegister();
+ m_jit.load32(JITCompiler::payloadFor(node.local()), result.gpr());
+ if (isInt32Prediction(prediction)) {
+ // Like integerResult, but don't useChildren - our children are phi nodes,
+ // and don't represent values within this dataflow with virtual registers.
+ m_gprs.retain(result.gpr(), virtualRegister, SpillOrderInteger);
+ m_generationInfo[virtualRegister].initInteger(m_compileIndex, node.refCount(), result.gpr());
+ } else {
+ // Like jsValueResult, but don't useChildren - our children are phi nodes,
+ // and don't represent values within this dataflow with virtual registers.
+ GPRTemporary tag(this);
+ m_jit.load32(JITCompiler::tagFor(node.local()), tag.gpr());
+ m_gprs.retain(result.gpr(), virtualRegister, SpillOrderJS);
+ m_gprs.retain(tag.gpr(), virtualRegister, SpillOrderJS);
+
+ DataFormat format;
+ if (isArrayPrediction(prediction))
+ format = DataFormatJSCell;
+ else if (isBooleanPrediction(prediction))
+ format = DataFormatJSBoolean;
+ else
+ format = DataFormatJS;
+
+ m_generationInfo[virtualRegister].initJSValue(m_compileIndex, node.refCount(), tag.gpr(), result.gpr(), format);
+ }
+ break;
+ }
+
+ case SetLocal: {
+ // SetLocal doubles as a hint as to where a node will be stored and
+ // as a speculation point. So before we speculate make sure that we
+ // know where the child of this node needs to go in the virtual
+ // register file.
+ compileMovHint(node);
+
+ // As far as OSR is concerned, we're on the bytecode index corresponding
+ // to the *next* instruction, since we've already "executed" the
+ // SetLocal and whatever other DFG Nodes are associated with the same
+ // bytecode index as the SetLocal.
+ ASSERT(m_bytecodeIndexForOSR == node.codeOrigin.bytecodeIndex());
+ Node& nextNode = m_jit.graph()[m_compileIndex+1];
+
+ // This assertion will fail if we ever emit multiple SetLocal's for
+ // a single bytecode instruction. That's unlikely to happen. But if
+ // it does, the solution is to to have this perform a search until
+ // it finds a Node with a different bytecode index from the one we've
+ // got, and to abstractly execute the SetLocal's along the way. Or,
+ // better yet, handle all of the SetLocal's at once: abstract interpret
+ // all of them, then emit code for all of them, with OSR exiting to
+ // the next non-SetLocal instruction. Note the special case for when
+ // both this SetLocal and the next op have a bytecode index of 0; this
+ // occurs for SetLocal's generated at the top of the code block to
+ // initialize locals to undefined. Ideally, we'd have a way of marking
+ // in the CodeOrigin that a SetLocal is synthetic. This will make the
+ // assertion more sensible-looking. We should then also assert that
+ // synthetic SetLocal's don't have speculation checks, since they
+ // should only be dropping values that we statically know we are
+ // allowed to drop into the variables. DFGPropagator will guarantee
+ // this, since it should have at least an approximation (if not
+ // exact knowledge) of the type of the SetLocal's child node, and
+ // should merge that information into the local that is being set.
+ ASSERT(m_bytecodeIndexForOSR != nextNode.codeOrigin.bytecodeIndex()
+ || (!m_bytecodeIndexForOSR && !nextNode.codeOrigin.bytecodeIndex()));
+ m_bytecodeIndexForOSR = nextNode.codeOrigin.bytecodeIndex();
+
+ PredictedType predictedType = m_jit.graph().getPrediction(node.local());
+ if (isInt32Prediction(predictedType)) {
+ SpeculateIntegerOperand value(this, node.child1());
+ m_jit.store32(value.gpr(), JITCompiler::payloadFor(node.local()));
+ noResult(m_compileIndex);
+ } else if (isArrayPrediction(predictedType)) {
+ SpeculateCellOperand cell(this, node.child1());
+ GPRReg cellGPR = cell.gpr();
+ speculationCheck(m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(cellGPR), MacroAssembler::TrustedImmPtr(m_jit.globalData()->jsArrayVPtr)));
+ m_jit.storePtr(cellGPR, JITCompiler::payloadFor(node.local()));
+ noResult(m_compileIndex);
+ } else { // FIXME: Add BooleanPrediction handling
+ JSValueOperand value(this, node.child1());
+ m_jit.store32(value.payloadGPR(), JITCompiler::payloadFor(node.local()));
+ m_jit.store32(value.tagGPR(), JITCompiler::tagFor(node.local()));
+ noResult(m_compileIndex);
+ }
+
+ // Indicate that it's no longer necessary to retrieve the value of
+ // this bytecode variable from registers or other locations in the register file.
+ valueSourceReferenceForOperand(node.local()) = ValueSource();
+ break;
+ }
+
+ case BitAnd:
+ case BitOr:
+ case BitXor:
+ if (isInt32Constant(node.child1())) {
+ SpeculateIntegerOperand op2(this, node.child2());
+ GPRTemporary result(this, op2);
+
+ bitOp(op, valueOfInt32Constant(node.child1()), op2.gpr(), result.gpr());
+
+ integerResult(result.gpr(), m_compileIndex);
+ } else if (isInt32Constant(node.child2())) {
+ SpeculateIntegerOperand op1(this, node.child1());
+ GPRTemporary result(this, op1);
+
+ bitOp(op, valueOfInt32Constant(node.child2()), op1.gpr(), result.gpr());
+
+ integerResult(result.gpr(), m_compileIndex);
+ } else {
+ SpeculateIntegerOperand op1(this, node.child1());
+ SpeculateIntegerOperand op2(this, node.child2());
+ GPRTemporary result(this, op1, op2);
+
+ GPRReg reg1 = op1.gpr();
+ GPRReg reg2 = op2.gpr();
+ bitOp(op, reg1, reg2, result.gpr());
+
+ integerResult(result.gpr(), m_compileIndex);
+ }
+ break;
+
+ case BitRShift:
+ case BitLShift:
+ case BitURShift:
+ if (isInt32Constant(node.child2())) {
+ SpeculateIntegerOperand op1(this, node.child1());
+ GPRTemporary result(this, op1);
+
+ shiftOp(op, op1.gpr(), valueOfInt32Constant(node.child2()) & 0x1f, result.gpr());
+
+ integerResult(result.gpr(), m_compileIndex);
+ } else {
+ // Do not allow shift amount to be used as the result, MacroAssembler does not permit this.
+ SpeculateIntegerOperand op1(this, node.child1());
+ SpeculateIntegerOperand op2(this, node.child2());
+ GPRTemporary result(this, op1);
+
+ GPRReg reg1 = op1.gpr();
+ GPRReg reg2 = op2.gpr();
+ shiftOp(op, reg1, reg2, result.gpr());
+
+ integerResult(result.gpr(), m_compileIndex);
+ }
+ break;
+
+ case UInt32ToNumber: {
+ if (!nodeCanSpeculateInteger(node.arithNodeFlags())) {
+ // We know that this sometimes produces doubles. So produce a double every
+ // time. This at least allows subsequent code to not have weird conditionals.
+
+ IntegerOperand op1(this, node.child1());
+ FPRTemporary result(this);
+ GPRTemporary address(this, op1);
+
+ GPRReg inputGPR = op1.gpr();
+ FPRReg outputFPR = result.fpr();
+ GPRReg addressGPR = address.gpr();
+
+ m_jit.convertInt32ToDouble(inputGPR, outputFPR);
+
+ JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
+ m_jit.move(JITCompiler::TrustedImmPtr(&twoToThe32), addressGPR);
+ m_jit.addDouble(JITCompiler::Address(addressGPR, 0), outputFPR);
+ positive.link(&m_jit);
+
+ doubleResult(outputFPR, m_compileIndex);
+ }
+
+ IntegerOperand op1(this, node.child1());
+ GPRTemporary result(this, op1);
+
+ // Test the operand is positive.
+ speculationCheck(m_jit.branch32(MacroAssembler::LessThan, op1.gpr(), TrustedImm32(0)));
+
+ m_jit.move(op1.gpr(), result.gpr());
+ integerResult(result.gpr(), m_compileIndex, op1.format());
+ break;
+ }
+
+ case ValueToInt32: {
+ if (shouldNotSpeculateInteger(node.child1())) {
+ // Do it the safe way.
+ nonSpeculativeValueToInt32(node);
+ break;
+ }
+
+ if (shouldNotSpeculateInteger(node.child1())) {
+ // Do it the safe way.
+ nonSpeculativeValueToInt32(node);
+ break;
+ }
+
+ SpeculateIntegerOperand op1(this, node.child1());
+ GPRTemporary result(this, op1);
+ m_jit.move(op1.gpr(), result.gpr());
+ integerResult(result.gpr(), m_compileIndex, op1.format());
+ break;
+ }
+
+ case ValueToNumber: {
+ if (shouldNotSpeculateInteger(node.child1())) {
+ SpeculateDoubleOperand op1(this, node.child1());
+ FPRTemporary result(this, op1);
+ m_jit.moveDouble(op1.fpr(), result.fpr());
+ doubleResult(result.fpr(), m_compileIndex);
+ break;
+ }
+
+ SpeculateIntegerOperand op1(this, node.child1());
+ GPRTemporary result(this, op1);
+ m_jit.move(op1.gpr(), result.gpr());
+ integerResult(result.gpr(), m_compileIndex, op1.format());
+ break;
+ }
+
+ case ValueToDouble: {
+ SpeculateDoubleOperand op1(this, node.child1());
+ FPRTemporary result(this, op1);
+ m_jit.moveDouble(op1.fpr(), result.fpr());
+ doubleResult(result.fpr(), m_compileIndex);
+ break;
+ }
+
+ case ValueAdd:
+ case ArithAdd: {
+ if (shouldSpeculateInteger(node.child1(), node.child2()) && nodeCanSpeculateInteger(node.arithNodeFlags())) {
+ if (isInt32Constant(node.child1())) {
+ int32_t imm1 = valueOfInt32Constant(node.child1());
+ SpeculateIntegerOperand op2(this, node.child2());
+ GPRTemporary result(this);
+
+ if (nodeCanTruncateInteger(node.arithNodeFlags())) {
+ m_jit.move(op2.gpr(), result.gpr());
+ m_jit.add32(Imm32(imm1), result.gpr());
+ } else
+ speculationCheck(m_jit.branchAdd32(MacroAssembler::Overflow, op2.gpr(), Imm32(imm1), result.gpr()));
+
+ integerResult(result.gpr(), m_compileIndex);
+ break;
+ }
+
+ if (isInt32Constant(node.child2())) {
+ SpeculateIntegerOperand op1(this, node.child1());
+ int32_t imm2 = valueOfInt32Constant(node.child2());
+ GPRTemporary result(this);
+
+ if (nodeCanTruncateInteger(node.arithNodeFlags())) {
+ m_jit.move(op1.gpr(), result.gpr());
+ m_jit.add32(Imm32(imm2), result.gpr());
+ } else
+ speculationCheck(m_jit.branchAdd32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
+
+ integerResult(result.gpr(), m_compileIndex);
+ break;
+ }
+
+ SpeculateIntegerOperand op1(this, node.child1());
+ SpeculateIntegerOperand op2(this, node.child2());
+ GPRTemporary result(this, op1, op2);
+
+ GPRReg gpr1 = op1.gpr();
+ GPRReg gpr2 = op2.gpr();
+ GPRReg gprResult = result.gpr();
+
+ if (nodeCanTruncateInteger(node.arithNodeFlags())) {
+ if (gpr1 == gprResult)
+ m_jit.add32(gpr2, gprResult);
+ else {
+ m_jit.move(gpr2, gprResult);
+ m_jit.add32(gpr1, gprResult);
+ }
+ } else {
+ MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
+
+ if (gpr1 == gprResult)
+ speculationCheck(check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
+ else if (gpr2 == gprResult)
+ speculationCheck(check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
+ else
+ speculationCheck(check);
+ }
+
+ integerResult(gprResult, m_compileIndex);
+ break;
+ }
+
+ if (shouldSpeculateNumber(node.child1(), node.child2())) {
+ SpeculateDoubleOperand op1(this, node.child1());
+ SpeculateDoubleOperand op2(this, node.child2());
+ FPRTemporary result(this, op1, op2);
+
+ FPRReg reg1 = op1.fpr();
+ FPRReg reg2 = op2.fpr();
+ m_jit.addDouble(reg1, reg2, result.fpr());
+
+ doubleResult(result.fpr(), m_compileIndex);
+ break;
+ }
+
+ ASSERT(op == ValueAdd);
+
+ JSValueOperand op1(this, node.child1());
+ JSValueOperand op2(this, node.child2());
+
+ GPRReg op1TagGPR = op1.tagGPR();
+ GPRReg op1PayloadGPR = op1.payloadGPR();
+ GPRReg op2TagGPR = op2.tagGPR();
+ GPRReg op2PayloadGPR = op2.payloadGPR();
+
+ flushRegisters();
+
+ GPRResult2 resultTag(this);
+ GPRResult resultPayload(this);
+ if (isKnownNotNumber(node.child1()) || isKnownNotNumber(node.child2()))
+ callOperation(operationValueAddNotNumber, resultTag.gpr(), resultPayload.gpr(), op1TagGPR, op1PayloadGPR, op2TagGPR, op2PayloadGPR);
+ else
+ callOperation(operationValueAdd, resultTag.gpr(), resultPayload.gpr(), op1TagGPR, op1PayloadGPR, op2TagGPR, op2PayloadGPR);
+
+ jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex);
+ break;
+ }
+
+ case ArithSub: {
+ if (shouldSpeculateInteger(node.child1(), node.child2()) && nodeCanSpeculateInteger(node.arithNodeFlags())) {
+ if (isInt32Constant(node.child2())) {
+ SpeculateIntegerOperand op1(this, node.child1());
+ int32_t imm2 = valueOfInt32Constant(node.child2());
+ GPRTemporary result(this);
+
+ if (nodeCanTruncateInteger(node.arithNodeFlags())) {
+ m_jit.move(op1.gpr(), result.gpr());
+ m_jit.sub32(Imm32(imm2), result.gpr());
+ } else
+ speculationCheck(m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
+
+ integerResult(result.gpr(), m_compileIndex);
+ break;
+ }
+
+ SpeculateIntegerOperand op1(this, node.child1());
+ SpeculateIntegerOperand op2(this, node.child2());
+ GPRTemporary result(this);
+
+ if (nodeCanTruncateInteger(node.arithNodeFlags())) {
+ m_jit.move(op1.gpr(), result.gpr());
+ m_jit.sub32(op2.gpr(), result.gpr());
+ } else
+ speculationCheck(m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), op2.gpr(), result.gpr()));
+
+ integerResult(result.gpr(), m_compileIndex);
+ break;
+ }
+
+ SpeculateDoubleOperand op1(this, node.child1());
+ SpeculateDoubleOperand op2(this, node.child2());
+ FPRTemporary result(this, op1);
+
+ FPRReg reg1 = op1.fpr();
+ FPRReg reg2 = op2.fpr();
+ m_jit.subDouble(reg1, reg2, result.fpr());
+
+ doubleResult(result.fpr(), m_compileIndex);
+ break;
+ }
+
+ case ArithMul: {
+ if (shouldSpeculateInteger(node.child1(), node.child2()) && nodeCanSpeculateInteger(node.arithNodeFlags())) {
+ SpeculateIntegerOperand op1(this, node.child1());
+ SpeculateIntegerOperand op2(this, node.child2());
+ GPRTemporary result(this);
+
+ GPRReg reg1 = op1.gpr();
+ GPRReg reg2 = op2.gpr();
+
+ // What is unfortunate is that we cannot take advantage of nodeCanTruncateInteger()
+ // here. A multiply on integers performed in the double domain and then truncated to
+ // an integer will give a different result than a multiply performed in the integer
+ // domain and then truncated, if the integer domain result would have resulted in
+ // something bigger than what a 32-bit integer can hold. JavaScript mandates that
+ // the semantics are always as if the multiply had been performed in the double
+ // domain.
+
+ speculationCheck(m_jit.branchMul32(MacroAssembler::Overflow, reg1, reg2, result.gpr()));
+
+ // Check for negative zero, if the users of this node care about such things.
+ if (!nodeCanIgnoreNegativeZero(node.arithNodeFlags())) {
+ MacroAssembler::Jump resultNonZero = m_jit.branchTest32(MacroAssembler::NonZero, result.gpr());
+ speculationCheck(m_jit.branch32(MacroAssembler::LessThan, reg1, TrustedImm32(0)));
+ speculationCheck(m_jit.branch32(MacroAssembler::LessThan, reg2, TrustedImm32(0)));
+ resultNonZero.link(&m_jit);
+ }
+
+ integerResult(result.gpr(), m_compileIndex);
+ break;
+ }
+
+ SpeculateDoubleOperand op1(this, node.child1());
+ SpeculateDoubleOperand op2(this, node.child2());
+ FPRTemporary result(this, op1, op2);
+
+ FPRReg reg1 = op1.fpr();
+ FPRReg reg2 = op2.fpr();
+
+ m_jit.mulDouble(reg1, reg2, result.fpr());
+
+ doubleResult(result.fpr(), m_compileIndex);
+ break;
+ }
+
+ case ArithDiv: {
+ if (shouldSpeculateInteger(node.child1(), node.child2()) && nodeCanSpeculateInteger(node.arithNodeFlags())) {
+ SpeculateIntegerOperand op1(this, node.child1());
+ SpeculateIntegerOperand op2(this, node.child2());
+ GPRTemporary eax(this, X86Registers::eax);
+ GPRTemporary edx(this, X86Registers::edx);
+ GPRReg op1GPR = op1.gpr();
+ GPRReg op2GPR = op2.gpr();
+
+ speculationCheck(m_jit.branchTest32(JITCompiler::Zero, op2GPR));
+
+ // If the user cares about negative zero, then speculate that we're not about
+ // to produce negative zero.
+ if (!nodeCanIgnoreNegativeZero(node.arithNodeFlags())) {
+ MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
+ speculationCheck(m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
+ numeratorNonZero.link(&m_jit);
+ }
+
+ GPRReg temp2 = InvalidGPRReg;
+ if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
+ temp2 = allocate();
+ m_jit.move(op2GPR, temp2);
+ op2GPR = temp2;
+ }
+
+ m_jit.move(op1GPR, eax.gpr());
+ m_jit.assembler().cdq();
+ m_jit.assembler().idivl_r(op2GPR);
+
+ // Check that there was no remainder. If there had been, then we'd be obligated to
+ // produce a double result instead.
+ speculationCheck(m_jit.branchTest32(JITCompiler::NonZero, edx.gpr()));
+
+ integerResult(eax.gpr(), m_compileIndex);
+ break;
+ }
+
+ SpeculateDoubleOperand op1(this, node.child1());
+ SpeculateDoubleOperand op2(this, node.child2());
+ FPRTemporary result(this, op1);
+
+ FPRReg reg1 = op1.fpr();
+ FPRReg reg2 = op2.fpr();
+ m_jit.divDouble(reg1, reg2, result.fpr());
+
+ doubleResult(result.fpr(), m_compileIndex);
+ break;
+ }
+
+ case ArithMod: {
+ if (shouldNotSpeculateInteger(node.child1()) || shouldNotSpeculateInteger(node.child2())
+ || !nodeCanSpeculateInteger(node.arithNodeFlags())) {
+ SpeculateDoubleOperand op1(this, node.child1());
+ SpeculateDoubleOperand op2(this, node.child2());
+
+ FPRReg op1FPR = op1.fpr();
+ FPRReg op2FPR = op2.fpr();
+
+ flushRegisters();
+
+ FPRResult result(this);
+
+ callOperation(fmod, result.fpr(), op1FPR, op2FPR);
+
+ doubleResult(result.fpr(), m_compileIndex);
+ break;
+ }
+
+ SpeculateIntegerOperand op1(this, node.child1());
+ SpeculateIntegerOperand op2(this, node.child2());
+ GPRTemporary eax(this, X86Registers::eax);
+ GPRTemporary edx(this, X86Registers::edx);
+ GPRReg op1Gpr = op1.gpr();
+ GPRReg op2Gpr = op2.gpr();
+
+ speculationCheck(m_jit.branchTest32(JITCompiler::Zero, op2Gpr));
+
+ GPRReg temp2 = InvalidGPRReg;
+ if (op2Gpr == X86Registers::eax || op2Gpr == X86Registers::edx) {
+ temp2 = allocate();
+ m_jit.move(op2Gpr, temp2);
+ op2Gpr = temp2;
+ }
+
+ m_jit.move(op1Gpr, eax.gpr());
+ m_jit.assembler().cdq();
+ m_jit.assembler().idivl_r(op2Gpr);
+
+ if (temp2 != InvalidGPRReg)
+ unlock(temp2);
+
+ integerResult(edx.gpr(), m_compileIndex);
+ break;
+ }
+
+ case ArithAbs: {
+ if (shouldSpeculateInteger(node.child1()) && nodeCanSpeculateInteger(node.arithNodeFlags())) {
+ SpeculateIntegerOperand op1(this, node.child1());
+ GPRTemporary result(this, op1);
+ GPRTemporary scratch(this);
+
+ m_jit.zeroExtend32ToPtr(op1.gpr(), result.gpr());
+ m_jit.rshift32(result.gpr(), MacroAssembler::TrustedImm32(31), scratch.gpr());
+ m_jit.add32(scratch.gpr(), result.gpr());
+ m_jit.xor32(scratch.gpr(), result.gpr());
+ speculationCheck(m_jit.branch32(MacroAssembler::Equal, result.gpr(), MacroAssembler::TrustedImm32(1 << 31)));
+ integerResult(result.gpr(), m_compileIndex);
+ break;
+ }
+
+ SpeculateDoubleOperand op1(this, node.child1());
+ FPRTemporary result(this);
+
+ static const double negativeZeroConstant = -0.0;
+
+ m_jit.loadDouble(&negativeZeroConstant, result.fpr());
+ m_jit.andnotDouble(op1.fpr(), result.fpr());
+ doubleResult(result.fpr(), m_compileIndex);
+ break;
+ }
+
+ case ArithMin:
+ case ArithMax: {
+ if (shouldSpeculateInteger(node.child1(), node.child2()) && nodeCanSpeculateInteger(node.arithNodeFlags())) {
+ SpeculateStrictInt32Operand op1(this, node.child1());
+ SpeculateStrictInt32Operand op2(this, node.child2());
+ GPRTemporary result(this, op1);
+
+ MacroAssembler::Jump op1Less = m_jit.branch32(op == ArithMin ? MacroAssembler::LessThan : MacroAssembler::GreaterThan, op1.gpr(), op2.gpr());
+ m_jit.move(op2.gpr(), result.gpr());
+ if (op1.gpr() != result.gpr()) {
+ MacroAssembler::Jump done = m_jit.jump();
+ op1Less.link(&m_jit);
+ m_jit.move(op1.gpr(), result.gpr());
+ done.link(&m_jit);
+ } else
+ op1Less.link(&m_jit);
+
+ integerResult(result.gpr(), m_compileIndex);
+ break;
+ }
+
+ SpeculateDoubleOperand op1(this, node.child1());
+ SpeculateDoubleOperand op2(this, node.child2());
+ FPRTemporary result(this, op1);
+
+ MacroAssembler::JumpList done;
+
+ MacroAssembler::Jump op1Less = m_jit.branchDouble(op == ArithMin ? MacroAssembler::DoubleLessThan : MacroAssembler::DoubleGreaterThan, op1.fpr(), op2.fpr());
+
+ // op2 is eather the lesser one or one of then is NaN
+ MacroAssembler::Jump op2Less = m_jit.branchDouble(op == ArithMin ? MacroAssembler::DoubleGreaterThan : MacroAssembler::DoubleLessThan, op1.fpr(), op2.fpr());
+
+ // Unordered case. We don't know which of op1, op2 is NaN. Manufacture NaN by adding
+ // op1 + op2 and putting it into result.
+ m_jit.addDouble(op1.fpr(), op2.fpr(), result.fpr());
+ done.append(m_jit.jump());
+
+ op2Less.link(&m_jit);
+ m_jit.moveDouble(op2.fpr(), result.fpr());
+
+ if (op1.fpr() != result.fpr()) {
+ done.append(m_jit.jump());
+
+ op1Less.link(&m_jit);
+ m_jit.moveDouble(op1.fpr(), result.fpr());
+ } else
+ op1Less.link(&m_jit);
+
+ done.link(&m_jit);
+
+ doubleResult(result.fpr(), m_compileIndex);
+ break;
+ }
+
+ case ArithSqrt: {
+ SpeculateDoubleOperand op1(this, node.child1());
+ FPRTemporary result(this, op1);
+
+ m_jit.sqrtDouble(op1.fpr(), result.fpr());
+
+ doubleResult(result.fpr(), m_compileIndex);
+ break;
+ }
+
+ case LogicalNot: {
+ // FIXME: Need to add fast paths for known booleans.
+ JSValueOperand value(this, node.child1());
+ GPRTemporary resultTag(this, value);
+ GPRTemporary resultPayload(this, value, false);
+ speculationCheck(m_jit.branch32(JITCompiler::NotEqual, value.tagGPR(), TrustedImm32(JSValue::BooleanTag)));
+ m_jit.move(value.payloadGPR(), resultPayload.gpr());
+ m_jit.xor32(TrustedImm32(1), resultPayload.gpr());
+ m_jit.move(TrustedImm32(JSValue::BooleanTag), resultTag.gpr());
+
+ // If we add a DataFormatBool, we should use it here.
+ jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex, DataFormatJSBoolean);
+ break;
+ }
+
+ case CompareLess:
+ if (compare(node, JITCompiler::LessThan, JITCompiler::DoubleLessThan, operationCompareLess))
+ return;
+ break;
+
+ case CompareLessEq:
+ if (compare(node, JITCompiler::LessThanOrEqual, JITCompiler::DoubleLessThanOrEqual, operationCompareLessEq))
+ return;
+ break;
+
+ case CompareGreater:
+ if (compare(node, JITCompiler::GreaterThan, JITCompiler::DoubleGreaterThan, operationCompareGreater))
+ return;
+ break;
+
+ case CompareGreaterEq:
+ if (compare(node, JITCompiler::GreaterThanOrEqual, JITCompiler::DoubleGreaterThanOrEqual, operationCompareGreaterEq))
+ return;
+ break;
+
+ case CompareEq:
+ if (isNullConstant(node.child1())) {
+ if (nonSpeculativeCompareNull(node, node.child2()))
+ return;
+ break;
+ }
+ if (isNullConstant(node.child2())) {
+ if (nonSpeculativeCompareNull(node, node.child1()))
+ return;
+ break;
+ }
+ if (compare(node, JITCompiler::Equal, JITCompiler::DoubleEqual, operationCompareEq))
+ return;
+ break;
+
+ case CompareStrictEq:
+ if (nonSpeculativeStrictEq(node))
+ return;
+ break;
+
+ case GetByVal: {
+ ASSERT(node.child3() == NoNode);
+ SpeculateCellOperand base(this, node.child1());
+ SpeculateStrictInt32Operand property(this, node.child2());
+ GPRTemporary storage(this);
+ GPRTemporary resultTag(this, base);
+
+ GPRReg baseReg = base.gpr();
+ GPRReg propertyReg = property.gpr();
+ GPRReg storageReg = storage.gpr();
+
+ if (!m_compileOkay)
+ return;
+
+ // Get the array storage. We haven't yet checked this is a JSArray, so this is only safe if
+ // an access with offset JSArray::storageOffset() is valid for all JSCells!
+ m_jit.loadPtr(MacroAssembler::Address(baseReg, JSArray::storageOffset()), storageReg);
+
+ // Check that base is an array, and that property is contained within m_vector (< m_vectorLength).
+ // If we have predicted the base to be type array, we can skip the check.
+ Node& baseNode = m_jit.graph()[node.child1()];
+ if (baseNode.op != GetLocal || !isArrayPrediction(m_jit.graph().getPrediction(baseNode.local())))
+ speculationCheck(m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg), MacroAssembler::TrustedImmPtr(m_jit.globalData()->jsArrayVPtr)));
+ speculationCheck(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(baseReg, JSArray::vectorLengthOffset())));
+
+ // FIXME: In cases where there are subsequent by_val accesses to the same base it might help to cache
+ // the storage pointer - especially if there happens to be another register free right now. If we do so,
+ // then we'll need to allocate a new temporary for result.
+ m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag.gpr());
+ speculationCheck(m_jit.branch32(MacroAssembler::Equal, resultTag.gpr(), TrustedImm32(JSValue::EmptyValueTag)));
+ m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), storageReg);
+
+ jsValueResult(resultTag.gpr(), storageReg, m_compileIndex);
+ break;
+ }
+
+ case PutByVal: {
+ SpeculateCellOperand base(this, node.child1());
+ SpeculateStrictInt32Operand property(this, node.child2());
+ JSValueOperand value(this, node.child3());
+ GPRTemporary scratch(this, base);
+
+ // Map base, property & value into registers, allocate a scratch register.
+ GPRReg baseReg = base.gpr();
+ GPRReg propertyReg = property.gpr();
+ GPRReg valueTagReg = value.tagGPR();
+ GPRReg valuePayloadReg = value.payloadGPR();
+ GPRReg scratchReg = scratch.gpr();
+
+ if (!m_compileOkay)
+ return;
+
+ writeBarrier(baseReg, valueTagReg, node.child3(), WriteBarrierForPropertyAccess, scratchReg);
+
+ // Check that base is an array, and that property is contained within m_vector (< m_vectorLength).
+ // If we have predicted the base to be type array, we can skip the check.
+ Node& baseNode = m_jit.graph()[node.child1()];
+ if (baseNode.op != GetLocal || !isArrayPrediction(m_jit.graph().getPrediction(baseNode.local())))
+ speculationCheck(m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg), MacroAssembler::TrustedImmPtr(m_jit.globalData()->jsArrayVPtr)));
+
+ base.use();
+ property.use();
+ value.use();
+
+ MacroAssembler::Jump withinArrayBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(baseReg, JSArray::vectorLengthOffset()));
+
+ // Code to handle put beyond array bounds.
+ silentSpillAllRegisters(scratchReg);
+ m_jit.push(valueTagReg);
+ m_jit.push(valuePayloadReg);
+ m_jit.push(propertyReg);
+ m_jit.push(baseReg);
+ m_jit.push(GPRInfo::callFrameRegister);
+ JITCompiler::Call functionCall = appendCallWithExceptionCheck(operationPutByValBeyondArrayBounds);
+ silentFillAllRegisters(scratchReg);
+ JITCompiler::Jump wasBeyondArrayBounds = m_jit.jump();
+
+ withinArrayBounds.link(&m_jit);
+
+ // Get the array storage.
+ GPRReg storageReg = scratchReg;
+ m_jit.loadPtr(MacroAssembler::Address(baseReg, JSArray::storageOffset()), storageReg);
+
+ // Check if we're writing to a hole; if so increment m_numValuesInVector.
+ MacroAssembler::Jump notHoleValue = m_jit.branch32(MacroAssembler::NotEqual, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag));
+ m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageReg, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
+
+ // If we're writing to a hole we might be growing the array;
+ MacroAssembler::Jump lengthDoesNotNeedUpdate = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, OBJECT_OFFSETOF(ArrayStorage, m_length)));
+ m_jit.add32(TrustedImm32(1), propertyReg);
+ m_jit.store32(propertyReg, MacroAssembler::Address(storageReg, OBJECT_OFFSETOF(ArrayStorage, m_length)));
+ m_jit.sub32(TrustedImm32(1), propertyReg);
+
+ lengthDoesNotNeedUpdate.link(&m_jit);
+ notHoleValue.link(&m_jit);
+
+ // Store the value to the array.
+ m_jit.store32(valueTagReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+ m_jit.store32(valuePayloadReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
+
+ wasBeyondArrayBounds.link(&m_jit);
+
+ noResult(m_compileIndex, UseChildrenCalledExplicitly);
+ break;
+ }
+
+ case PutByValAlias: {
+ SpeculateCellOperand base(this, node.child1());
+ SpeculateStrictInt32Operand property(this, node.child2());
+ JSValueOperand value(this, node.child3());
+ GPRTemporary scratch(this, base);
+
+ GPRReg baseReg = base.gpr();
+ GPRReg scratchReg = scratch.gpr();
+
+ writeBarrier(baseReg, value.tagGPR(), node.child3(), WriteBarrierForPropertyAccess, scratchReg);
+
+ // Get the array storage.
+ GPRReg storageReg = scratchReg;
+ m_jit.loadPtr(MacroAssembler::Address(baseReg, JSArray::storageOffset()), storageReg);
+
+ // Store the value to the array.
+ GPRReg propertyReg = property.gpr();
+ m_jit.store32(value.tagGPR(), MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+ m_jit.store32(value.payloadGPR(), MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
+
+ noResult(m_compileIndex);
+ break;
+ }
+
+ case DFG::Jump: {
+ BlockIndex taken = m_jit.graph().blockIndexForBytecodeOffset(node.takenBytecodeOffset());
+ if (taken != (m_block + 1))
+ addBranch(m_jit.jump(), taken);
+ noResult(m_compileIndex);
+ break;
+ }
+
+ case Branch:
+ if (isStrictInt32(node.child1()) || shouldSpeculateInteger(node.child1())) {
+ SpeculateStrictInt32Operand op(this, node.child1());
+
+ BlockIndex taken = m_jit.graph().blockIndexForBytecodeOffset(node.takenBytecodeOffset());
+ BlockIndex notTaken = m_jit.graph().blockIndexForBytecodeOffset(node.notTakenBytecodeOffset());
+
+ MacroAssembler::ResultCondition condition = MacroAssembler::NonZero;
+
+ if (taken == (m_block + 1)) {
+ condition = MacroAssembler::Zero;
+ BlockIndex tmp = taken;
+ taken = notTaken;
+ notTaken = tmp;
+ }
+
+ addBranch(m_jit.branchTest32(condition, op.gpr()), taken);
+ if (notTaken != (m_block + 1))
+ addBranch(m_jit.jump(), notTaken);
+
+ noResult(m_compileIndex);
+ break;
+ }
+ emitBranch(node);
+ break;
+
+ case Return: {
+ ASSERT(GPRInfo::callFrameRegister != GPRInfo::regT2);
+ ASSERT(GPRInfo::regT1 != GPRInfo::returnValueGPR);
+ ASSERT(GPRInfo::returnValueGPR != GPRInfo::callFrameRegister);
+
+#if ENABLE(DFG_SUCCESS_STATS)
+ static SamplingCounter counter("SpeculativeJIT");
+ m_jit.emitCount(counter);
+#endif
+
+ // Return the result in returnValueGPR.
+ JSValueOperand op1(this, node.child1());
+ op1.fill();
+ if (op1.isDouble())
+ boxDouble(op1.fpr(), GPRInfo::returnValueGPR2, GPRInfo::returnValueGPR, m_jit.graph()[op1.index()].virtualRegister());
+ else {
+ if (op1.payloadGPR() == GPRInfo::returnValueGPR2 && op1.tagGPR() == GPRInfo::returnValueGPR)
+ m_jit.swap(GPRInfo::returnValueGPR, GPRInfo::returnValueGPR2);
+ else if (op1.payloadGPR() == GPRInfo::returnValueGPR2) {
+ m_jit.move(op1.payloadGPR(), GPRInfo::returnValueGPR);
+ m_jit.move(op1.tagGPR(), GPRInfo::returnValueGPR2);
+ } else {
+ m_jit.move(op1.tagGPR(), GPRInfo::returnValueGPR2);
+ m_jit.move(op1.payloadGPR(), GPRInfo::returnValueGPR);
+ }
+ }
+
+ // Grab the return address.
+ m_jit.emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, GPRInfo::regT2);
+ // Restore our caller's "r".
+ m_jit.emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, GPRInfo::callFrameRegister);
+ // Return.
+ m_jit.restoreReturnAddressBeforeReturn(GPRInfo::regT2);
+ m_jit.ret();
+
+ noResult(m_compileIndex);
+ break;
+ }
+
+ case Throw:
+ case ThrowReferenceError: {
+ // We expect that throw statements are rare and are intended to exit the code block
+ // anyway, so we just OSR back to the old JIT for now.
+ terminateSpeculativeExecution();
+ break;
+ }
+
+ case ToPrimitive: {
+ if (shouldSpeculateInteger(node.child1())) {
+ // It's really profitable to speculate integer, since it's really cheap,
+ // it means we don't have to do any real work, and we emit a lot less code.
+
+ SpeculateIntegerOperand op1(this, node.child1());
+ GPRTemporary result(this, op1);
+
+ ASSERT(op1.format() == DataFormatInteger);
+ m_jit.move(op1.gpr(), result.gpr());
+
+ integerResult(result.gpr(), m_compileIndex);
+ break;
+ }
+
+ // FIXME: Add string speculation here.
+
+ bool wasPrimitive = isKnownNumeric(node.child1()) || isKnownBoolean(node.child1());
+
+ JSValueOperand op1(this, node.child1());
+ GPRTemporary resultTag(this, op1);
+ GPRTemporary resultPayload(this, op1, false);
+
+ GPRReg op1TagGPR = op1.tagGPR();
+ GPRReg op1PayloadGPR = op1.payloadGPR();
+ GPRReg resultTagGPR = resultTag.gpr();
+ GPRReg resultPayloadGPR = resultPayload.gpr();
+
+ op1.use();
+
+ if (wasPrimitive) {
+ m_jit.move(op1TagGPR, resultTagGPR);
+ m_jit.move(op1PayloadGPR, resultPayloadGPR);
+ } else {
+ MacroAssembler::JumpList alreadyPrimitive;
+
+ alreadyPrimitive.append(m_jit.branch32(MacroAssembler::NotEqual, op1TagGPR, TrustedImm32(JSValue::CellTag)));
+ alreadyPrimitive.append(m_jit.branchPtr(MacroAssembler::Equal, MacroAssembler::Address(op1PayloadGPR), MacroAssembler::TrustedImmPtr(m_jit.globalData()->jsStringVPtr)));
+
+ silentSpillAllRegisters(resultTagGPR, resultPayloadGPR);
+ m_jit.push(op1TagGPR);
+ m_jit.push(op1PayloadGPR);
+ m_jit.push(GPRInfo::callFrameRegister);
+ appendCallWithExceptionCheck(operationToPrimitive);
+ setupResults(resultTagGPR, resultPayloadGPR);
+ silentFillAllRegisters(resultTagGPR, resultPayloadGPR);
+
+ MacroAssembler::Jump done = m_jit.jump();
+
+ alreadyPrimitive.link(&m_jit);
+ m_jit.move(op1TagGPR, resultTagGPR);
+ m_jit.move(op1PayloadGPR, resultPayloadGPR);
+
+ done.link(&m_jit);
+ }
+
+ jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly);
+ break;
+ }
+
+ case StrCat: {
+ // We really don't want to grow the register file just to do a StrCat. Say we
+ // have 50 functions on the stack that all have a StrCat in them that has
+ // upwards of 10 operands. In the DFG this would mean that each one gets
+ // some random virtual register, and then to do the StrCat we'd need a second
+ // span of 10 operands just to have somewhere to copy the 10 operands to, where
+ // they'd be contiguous and we could easily tell the C code how to find them.
+ // Ugly! So instead we use the scratchBuffer infrastructure in JSGlobalData. That
+ // way, those 50 functions will share the same scratchBuffer for offloading their
+ // StrCat operands. It's about as good as we can do, unless we start doing
+ // virtual register coalescing to ensure that operands to StrCat get spilled
+ // in exactly the place where StrCat wants them, or else have the StrCat
+ // refer to those operands' SetLocal instructions to force them to spill in
+ // the right place. Basically, any way you cut it, the current approach
+ // probably has the best balance of performance and sensibility in the sense
+ // that it does not increase the complexity of the DFG JIT just to make StrCat
+ // fast and pretty.
+
+ EncodedJSValue* buffer = static_cast<EncodedJSValue*>(m_jit.globalData()->scratchBufferForSize(sizeof(EncodedJSValue) * node.numChildren()));
+
+ for (unsigned operandIdx = 0; operandIdx < node.numChildren(); ++operandIdx) {
+ JSValueOperand operand(this, m_jit.graph().m_varArgChildren[node.firstChild() + operandIdx]);
+ GPRReg opTagGPR = operand.tagGPR();
+ GPRReg opPayloadGPR = operand.payloadGPR();
+ operand.use();
+
+ m_jit.store32(opTagGPR, reinterpret_cast<char*>(buffer + operandIdx) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
+ m_jit.store32(opPayloadGPR, reinterpret_cast<char*>(buffer + operandIdx) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
+ }
+
+ flushRegisters();
+
+ GPRResult resultPayload(this);
+ GPRResult2 resultTag(this);
+
+ callOperation(operationStrCat, resultTag.gpr(), resultPayload.gpr(), buffer, node.numChildren());
+
+ jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex, UseChildrenCalledExplicitly);
+ break;
+ }
+
+ case ConvertThis: {
+ SpeculateCellOperand thisValue(this, node.child1());
+
+ speculationCheck(m_jit.branchPtr(JITCompiler::Equal, JITCompiler::Address(thisValue.gpr()), JITCompiler::TrustedImmPtr(m_jit.globalData()->jsStringVPtr)));
+
+ cellResult(thisValue.gpr(), m_compileIndex);
+ break;
+ }
+
+ case CreateThis: {
+ // Note that there is not so much profit to speculate here. The only things we
+ // speculate on are (1) that it's a cell, since that eliminates cell checks
+ // later if the proto is reused, and (2) if we have a FinalObject prediction
+ // then we speculate because we want to get recompiled if it isn't (since
+ // otherwise we'd start taking slow path a lot).
+
+ SpeculateCellOperand proto(this, node.child1());
+ GPRTemporary result(this);
+ GPRTemporary scratch(this);
+
+ GPRReg protoGPR = proto.gpr();
+ GPRReg resultGPR = result.gpr();
+ GPRReg scratchGPR = scratch.gpr();
+
+ proto.use();
+
+ MacroAssembler::JumpList slowPath;
+
+ // Need to verify that the prototype is an object. If we have reason to believe
+ // that it's a FinalObject then we speculate on that directly. Otherwise we
+ // do the slow (structure-based) check.
+ if (shouldSpeculateFinalObject(node.child1()))
+ speculationCheck(m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(protoGPR), MacroAssembler::TrustedImmPtr(m_jit.globalData()->jsFinalObjectVPtr)));
+ else {
+ m_jit.loadPtr(MacroAssembler::Address(protoGPR, JSCell::structureOffset()), scratchGPR);
+ slowPath.append(m_jit.branch8(MacroAssembler::Below, MacroAssembler::Address(scratchGPR, Structure::typeInfoTypeOffset()), MacroAssembler::TrustedImm32(ObjectType)));
+ }
+
+ // Load the inheritorID (the Structure that objects who have protoGPR as the prototype
+ // use to refer to that prototype). If the inheritorID is not set, go to slow path.
+ m_jit.loadPtr(MacroAssembler::Address(protoGPR, JSObject::offsetOfInheritorID()), scratchGPR);
+ slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, scratchGPR));
+
+ MarkedSpace::SizeClass* sizeClass = &m_jit.globalData()->heap.sizeClassForObject(sizeof(JSFinalObject));
+
+ m_jit.loadPtr(&sizeClass->firstFreeCell, resultGPR);
+ slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, resultGPR));
+
+ // The object is half-allocated: we have what we know is a fresh object, but
+ // it's still on the GC's free list.
+
+ // Ditch the inheritorID by placing it into the structure, so that we can reuse
+ // scratchGPR.
+ m_jit.storePtr(scratchGPR, MacroAssembler::Address(resultGPR, JSObject::structureOffset()));
+
+ // Now that we have scratchGPR back, remove the object from the free list
+ m_jit.loadPtr(MacroAssembler::Address(resultGPR), scratchGPR);
+ m_jit.storePtr(scratchGPR, &sizeClass->firstFreeCell);
+
+ // Initialize the object's vtable
+ m_jit.storePtr(MacroAssembler::TrustedImmPtr(m_jit.globalData()->jsFinalObjectVPtr), MacroAssembler::Address(resultGPR));
+
+ // Initialize the object's inheritorID.
+ m_jit.storePtr(MacroAssembler::TrustedImmPtr(0), MacroAssembler::Address(resultGPR, JSObject::offsetOfInheritorID()));
+
+ // Initialize the object's property storage pointer.
+ m_jit.addPtr(MacroAssembler::TrustedImm32(sizeof(JSObject)), resultGPR, scratchGPR);
+ m_jit.storePtr(scratchGPR, MacroAssembler::Address(resultGPR, JSFinalObject::offsetOfPropertyStorage()));
+
+ MacroAssembler::Jump done = m_jit.jump();
+
+ slowPath.link(&m_jit);
+
+ silentSpillAllRegisters(resultGPR);
+ m_jit.push(TrustedImm32(JSValue::CellTag));
+ m_jit.push(protoGPR);
+ m_jit.push(GPRInfo::callFrameRegister);
+ appendCallWithExceptionCheck(operationCreateThis);
+ m_jit.move(GPRInfo::returnValueGPR, resultGPR);
+ silentFillAllRegisters(resultGPR);
+
+ done.link(&m_jit);
+
+ cellResult(resultGPR, m_compileIndex, UseChildrenCalledExplicitly);
+ break;
+ }
+
+ case GetCallee: {
+ GPRTemporary result(this);
+ m_jit.loadPtr(JITCompiler::addressFor(static_cast<VirtualRegister>(RegisterFile::Callee)), result.gpr());
+ cellResult(result.gpr(), m_compileIndex);
+ break;
+ }
+
+ case GetScopeChain: {
+ GPRTemporary result(this);
+ GPRReg resultGPR = result.gpr();
+
+ m_jit.loadPtr(JITCompiler::addressFor(static_cast<VirtualRegister>(RegisterFile::ScopeChain)), resultGPR);
+ bool checkTopLevel = m_jit.codeBlock()->codeType() == FunctionCode && m_jit.codeBlock()->needsFullScopeChain();
+ int skip = node.scopeChainDepth();
+ ASSERT(skip || !checkTopLevel);
+ if (checkTopLevel && skip--) {
+ JITCompiler::Jump activationNotCreated;
+ if (checkTopLevel)
+ activationNotCreated = m_jit.branchTestPtr(JITCompiler::Zero, JITCompiler::addressFor(static_cast<VirtualRegister>(m_jit.codeBlock()->activationRegister())));
+ m_jit.loadPtr(JITCompiler::Address(resultGPR, OBJECT_OFFSETOF(ScopeChainNode, next)), resultGPR);
+ activationNotCreated.link(&m_jit);
+ }
+ while (skip--)
+ m_jit.loadPtr(JITCompiler::Address(resultGPR, OBJECT_OFFSETOF(ScopeChainNode, next)), resultGPR);
+
+ m_jit.loadPtr(JITCompiler::Address(resultGPR, OBJECT_OFFSETOF(ScopeChainNode, object)), resultGPR);
+
+ cellResult(resultGPR, m_compileIndex);
+ break;
+ }
+ case GetScopedVar: {
+ SpeculateCellOperand scopeChain(this, node.child1());
+ GPRTemporary resultTag(this);
+ GPRTemporary resultPayload(this);
+ GPRReg resultTagGPR = resultTag.gpr();
+ GPRReg resultPayloadGPR = resultPayload.gpr();
+ m_jit.loadPtr(JITCompiler::Address(scopeChain.gpr(), JSVariableObject::offsetOfRegisters()), resultPayloadGPR);
+ m_jit.load32(JITCompiler::Address(resultPayloadGPR, node.varNumber() * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
+ m_jit.load32(JITCompiler::Address(resultPayloadGPR, node.varNumber() * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultPayloadGPR);
+ jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex);
+ break;
+ }
+ case PutScopedVar: {
+ SpeculateCellOperand scopeChain(this, node.child1());
+ GPRTemporary scratchRegister(this);
+ GPRReg scratchGPR = scratchRegister.gpr();
+ m_jit.loadPtr(JITCompiler::Address(scopeChain.gpr(), JSVariableObject::offsetOfRegisters()), scratchGPR);
+ JSValueOperand value(this, node.child2());
+ m_jit.store32(value.tagGPR(), JITCompiler::Address(scratchGPR, node.varNumber() * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
+ m_jit.store32(value.payloadGPR(), JITCompiler::Address(scratchGPR, node.varNumber() * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
+ writeBarrier(scopeChain.gpr(), value.tagGPR(), node.child2(), WriteBarrierForVariableAccess, scratchGPR);
+ noResult(m_compileIndex);
+ break;
+ }
+
+ case GetById: {
+ SpeculateCellOperand base(this, node.child1());
+ GPRTemporary resultTag(this, base);
+ GPRTemporary resultPayload(this);
+
+ GPRReg baseGPR = base.gpr();
+ GPRReg resultTagGPR = resultTag.gpr();
+ GPRReg resultPayloadGPR = resultPayload.gpr();
+ GPRReg scratchGPR;
+
+ if (resultTagGPR == baseGPR)
+ scratchGPR = resultPayloadGPR;
+ else
+ scratchGPR = resultTagGPR;
+
+ base.use();
+
+ cachedGetById(baseGPR, resultTagGPR, resultPayloadGPR, scratchGPR, node.identifierNumber());
+
+ jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly);
+ break;
+ }
+
+ case CheckStructure: {
+ SpeculateCellOperand base(this, node.child1());
+ GPRTemporary result(this, base);
+
+ GPRReg baseGPR = base.gpr();
+ GPRReg resultGPR = result.gpr();
+
+ speculationCheck(m_jit.branchPtr(JITCompiler::NotEqual, JITCompiler::Address(baseGPR, JSCell::structureOffset()), JITCompiler::TrustedImmPtr(node.structure())));
+
+ m_jit.loadPtr(JITCompiler::Address(baseGPR, JSObject::offsetOfPropertyStorage()), resultGPR);
+
+ storageResult(resultGPR, m_compileIndex);
+ break;
+ }
+
+ case GetByOffset: {
+ StorageOperand storage(this, node.child1());
+ GPRTemporary resultTag(this, storage);
+ GPRTemporary resultPayload(this);
+
+ GPRReg storageGPR = storage.gpr();
+ GPRReg resultTagGPR = resultTag.gpr();
+ GPRReg resultPayloadGPR = resultPayload.gpr();
+
+ storage.use();
+ StorageAccessData& storageAccessData = m_jit.graph().m_storageAccessData[node.storageAccessDataIndex()];
+
+ m_jit.load32(JITCompiler::Address(storageGPR, storageAccessData.offset * sizeof(EncodedJSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
+ m_jit.load32(JITCompiler::Address(storageGPR, storageAccessData.offset * sizeof(EncodedJSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultPayloadGPR);
+
+ jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex);
+ break;
+ }
+
+ case GetMethod: {
+ SpeculateCellOperand base(this, node.child1());
+ GPRTemporary resultTag(this, base);
+ GPRTemporary resultPayload(this);
+
+ GPRReg baseGPR = base.gpr();
+ GPRReg resultTagGPR = resultTag.gpr();
+ GPRReg resultPayloadGPR = resultPayload.gpr();
+ GPRReg scratchGPR;
+
+ if (resultTagGPR == baseGPR)
+ scratchGPR = resultPayloadGPR;
+ else
+ scratchGPR = resultTagGPR;
+
+ base.use();
+
+ cachedGetMethod(baseGPR, resultTagGPR, resultPayloadGPR, scratchGPR, node.identifierNumber());
+
+ jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly);
+ break;
+ }
+
+ case CheckMethod: {
+ MethodCheckData& methodCheckData = m_jit.graph().m_methodCheckData[node.methodCheckDataIndex()];
+
+ SpeculateCellOperand base(this, node.child1());
+ GPRTemporary scratch(this); // this needs to be a separate register, unfortunately.
+ GPRReg baseGPR = base.gpr();
+ GPRReg scratchGPR = scratch.gpr();
+
+ speculationCheck(m_jit.branchPtr(JITCompiler::NotEqual, JITCompiler::Address(baseGPR, JSCell::structureOffset()), JITCompiler::TrustedImmPtr(methodCheckData.structure)));
+ if (methodCheckData.prototype != m_jit.codeBlock()->globalObject()->methodCallDummy()) {
+ m_jit.move(JITCompiler::TrustedImmPtr(methodCheckData.prototype->structureAddress()), scratchGPR);
+ speculationCheck(m_jit.branchPtr(JITCompiler::NotEqual, JITCompiler::Address(scratchGPR), JITCompiler::TrustedImmPtr(methodCheckData.prototypeStructure)));
+ }
+
+ useChildren(node);
+ initConstantInfo(m_compileIndex);
+ break;
+ }
+
+ case PutById: {
+ SpeculateCellOperand base(this, node.child1());
+ JSValueOperand value(this, node.child2());
+ GPRTemporary scratch(this);
+
+ GPRReg baseGPR = base.gpr();
+ GPRReg valueTagGPR = value.tagGPR();
+ GPRReg valuePayloadGPR = value.payloadGPR();
+ GPRReg scratchGPR = scratch.gpr();
+
+ base.use();
+ value.use();
+
+ cachedPutById(baseGPR, valueTagGPR, valuePayloadGPR, node.child2(), scratchGPR, node.identifierNumber(), NotDirect);
+
+ noResult(m_compileIndex, UseChildrenCalledExplicitly);
+ break;
+ }
+
+ case PutByIdDirect: {
+ SpeculateCellOperand base(this, node.child1());
+ JSValueOperand value(this, node.child2());
+ GPRTemporary scratch(this);
+
+ GPRReg baseGPR = base.gpr();
+ GPRReg valueTagGPR = value.tagGPR();
+ GPRReg valuePayloadGPR = value.payloadGPR();
+ GPRReg scratchGPR = scratch.gpr();
+
+ base.use();
+ value.use();
+
+ cachedPutById(baseGPR, valueTagGPR, valuePayloadGPR, node.child2(), scratchGPR, node.identifierNumber(), Direct);
+
+ noResult(m_compileIndex, UseChildrenCalledExplicitly);
+ break;
+ }
+
+ case GetGlobalVar: {
+ GPRTemporary result(this);
+ GPRTemporary scratch(this);
+
+ JSVariableObject* globalObject = m_jit.codeBlock()->globalObject();
+ m_jit.loadPtr(const_cast<WriteBarrier<Unknown>**>(globalObject->addressOfRegisters()), result.gpr());
+ m_jit.load32(JITCompiler::tagForGlobalVar(result.gpr(), node.varNumber()), scratch.gpr());
+ m_jit.load32(JITCompiler::payloadForGlobalVar(result.gpr(), node.varNumber()), result.gpr());
+
+ jsValueResult(scratch.gpr(), result.gpr(), m_compileIndex);
+ break;
+ }
+
+ case PutGlobalVar: {
+ JSValueOperand value(this, node.child1());
+ GPRTemporary globalObject(this);
+ GPRTemporary scratch(this);
+
+ GPRReg globalObjectReg = globalObject.gpr();
+ GPRReg scratchReg = scratch.gpr();
+
+ m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.codeBlock()->globalObject()), globalObjectReg);
+
+ writeBarrier(m_jit.codeBlock()->globalObject(), value.tagGPR(), node.child1(), WriteBarrierForVariableAccess, scratchReg);
+
+ m_jit.loadPtr(MacroAssembler::Address(globalObjectReg, JSVariableObject::offsetOfRegisters()), scratchReg);
+ m_jit.store32(value.tagGPR(), JITCompiler::tagForGlobalVar(scratchReg, node.varNumber()));
+ m_jit.store32(value.payloadGPR(), JITCompiler::payloadForGlobalVar(scratchReg, node.varNumber()));
+
+ noResult(m_compileIndex);
+ break;
+ }
+
+ case CheckHasInstance: {
+ SpeculateCellOperand base(this, node.child1());
+ GPRTemporary structure(this);
+
+ // Speculate that base 'ImplementsDefaultHasInstance'.
+ m_jit.loadPtr(MacroAssembler::Address(base.gpr(), JSCell::structureOffset()), structure.gpr());
+ speculationCheck(m_jit.branchTest8(MacroAssembler::Zero, MacroAssembler::Address(structure.gpr(), Structure::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(ImplementsDefaultHasInstance)));
+
+ noResult(m_compileIndex);
+ break;
+ }
+
+ case InstanceOf: {
+ SpeculateCellOperand value(this, node.child1());
+ // Base unused since we speculate default InstanceOf behaviour in CheckHasInstance.
+ SpeculateCellOperand prototype(this, node.child3());
+
+ GPRTemporary scratch(this);
+ GPRTemporary booleanTag(this, value);
+
+ GPRReg valueReg = value.gpr();
+ GPRReg prototypeReg = prototype.gpr();
+ GPRReg scratchReg = scratch.gpr();
+
+ // Check that prototype is an object.
+ m_jit.loadPtr(MacroAssembler::Address(prototypeReg, JSCell::structureOffset()), scratchReg);
+ speculationCheck(m_jit.branch8(MacroAssembler::NotEqual, MacroAssembler::Address(scratchReg, Structure::typeInfoTypeOffset()), MacroAssembler::TrustedImm32(ObjectType)));
+
+ // Initialize scratchReg with the value being checked.
+ m_jit.move(valueReg, scratchReg);
+
+ // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
+ MacroAssembler::Label loop(&m_jit);
+ m_jit.loadPtr(MacroAssembler::Address(scratchReg, JSCell::structureOffset()), scratchReg);
+ m_jit.load32(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), scratchReg);
+ MacroAssembler::Jump isInstance = m_jit.branch32(MacroAssembler::Equal, scratchReg, prototypeReg);
+ m_jit.branchTest32(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
+
+ // No match - result is false.
+ m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
+ MacroAssembler::Jump putResult = m_jit.jump();
+
+ isInstance.link(&m_jit);
+ m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
+
+ putResult.link(&m_jit);
+ m_jit.move(TrustedImm32(JSValue::BooleanTag), booleanTag.gpr());
+ jsValueResult(booleanTag.gpr(), scratchReg, m_compileIndex, DataFormatJSBoolean);
+ break;
+ }
+
+ case Phi:
+ ASSERT_NOT_REACHED();
+
+ case Breakpoint:
+#if ENABLE(DEBUG_WITH_BREAKPOINT)
+ m_jit.breakpoint();
+#else
+ ASSERT_NOT_REACHED();
+#endif
+ break;
+
+ case Call:
+ case Construct:
+ emitCall(node);
+ break;
+
+ case Resolve: {
+ flushRegisters();
+ GPRResult resultPayload(this);
+ GPRResult2 resultTag(this);
+ callOperation(operationResolve, resultTag.gpr(), resultPayload.gpr(), identifier(node.identifierNumber()));
+ jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex);
+ break;
+ }
+
+ case ResolveBase: {
+ flushRegisters();
+ GPRResult resultPayload(this);
+ GPRResult2 resultTag(this);
+ callOperation(operationResolveBase, resultTag.gpr(), resultPayload.gpr(), identifier(node.identifierNumber()));
+ jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex);
+ break;
+ }
+
+ case ResolveBaseStrictPut: {
+ flushRegisters();
+ GPRResult resultPayload(this);
+ GPRResult2 resultTag(this);
+ callOperation(operationResolveBaseStrictPut, resultTag.gpr(), resultPayload.gpr(), identifier(node.identifierNumber()));
+ jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex);
+ break;
+ }
+
+ case ResolveGlobal: {
+ GPRTemporary globalObject(this);
+ GPRTemporary resolveInfo(this);
+ GPRTemporary resultTag(this);
+ GPRTemporary resultPayload(this);
+
+ GPRReg globalObjectGPR = globalObject.gpr();
+ GPRReg resolveInfoGPR = resolveInfo.gpr();
+ GPRReg resultTagGPR = resultTag.gpr();
+ GPRReg resultPayloadGPR = resultPayload.gpr();
+
+ ResolveGlobalData& data = m_jit.graph().m_resolveGlobalData[node.resolveGlobalDataIndex()];
+ GlobalResolveInfo* resolveInfoAddress = &(m_jit.codeBlock()->globalResolveInfo(data.resolveInfoIndex));
+
+ // Check Structure of global object
+ m_jit.move(JITCompiler::TrustedImmPtr(m_jit.codeBlock()->globalObject()), globalObjectGPR);
+ m_jit.move(JITCompiler::TrustedImmPtr(resolveInfoAddress), resolveInfoGPR);
+ m_jit.load32(JITCompiler::Address(resolveInfoGPR, OBJECT_OFFSETOF(GlobalResolveInfo, structure) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
+ m_jit.load32(JITCompiler::Address(resolveInfoGPR, OBJECT_OFFSETOF(GlobalResolveInfo, structure) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultPayloadGPR);
+
+ JITCompiler::JumpList structuresNotMatch;
+ structuresNotMatch.append(m_jit.branch32(JITCompiler::NotEqual, resultTagGPR, JITCompiler::Address(globalObjectGPR, JSCell::structureOffset() + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag))));
+ structuresNotMatch.append(m_jit.branch32(JITCompiler::NotEqual, resultPayloadGPR, JITCompiler::Address(globalObjectGPR, JSCell::structureOffset() + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload))));
+
+ // Fast case
+ m_jit.load32(JITCompiler::Address(globalObjectGPR, JSObject::offsetOfPropertyStorage()), resultPayloadGPR);
+ m_jit.load32(JITCompiler::Address(resolveInfoGPR, OBJECT_OFFSETOF(GlobalResolveInfo, offset)), resolveInfoGPR);
+ m_jit.load32(JITCompiler::BaseIndex(resultPayloadGPR, resolveInfoGPR, JITCompiler::TimesEight, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
+ m_jit.load32(JITCompiler::BaseIndex(resultPayloadGPR, resolveInfoGPR, JITCompiler::TimesEight, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultPayloadGPR);
+
+ JITCompiler::Jump wasFast = m_jit.jump();
+
+ structuresNotMatch.link(&m_jit);
+ silentSpillAllRegisters(resultTagGPR, resultPayloadGPR);
+ m_jit.push(JITCompiler::TrustedImm32(reinterpret_cast<int>(&m_jit.codeBlock()->identifier(data.identifierNumber))));
+ m_jit.push(resolveInfoGPR);
+ m_jit.push(GPRInfo::callFrameRegister);
+ JITCompiler::Call functionCall = appendCallWithExceptionCheck(operationResolveGlobal);
+ setupResults(resultTagGPR, resultPayloadGPR);
+ silentFillAllRegisters(resultTagGPR, resultPayloadGPR);
+
+ wasFast.link(&m_jit);
+
+ jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex);
+ break;
+ }
+
+ case Phantom:
+ // This is a no-op.
+ noResult(m_compileIndex);
+ break;
+ }
+
+ if (node.hasResult() && node.mustGenerate())
+ use(m_compileIndex);
+}
+
+void SpeculativeJIT::compileMovHint(Node& node)
+{
+ ASSERT(node.op == SetLocal);
+
+ setNodeIndexForOperand(node.child1(), node.local());
+ m_lastSetOperand = node.local();
+}
+
+void SpeculativeJIT::compile(BasicBlock& block)
+{
+ ASSERT(m_compileOkay);
+ ASSERT(m_compileIndex == block.begin);
+
+ if (block.isOSRTarget)
+ m_jit.noticeOSREntry(block);
+
+ m_blockHeads[m_block] = m_jit.label();
+#if ENABLE(DFG_JIT_BREAK_ON_EVERY_BLOCK)
+ m_jit.breakpoint();
+#endif
+
+ for (size_t i = 0; i < m_arguments.size(); ++i)
+ m_arguments[i] = ValueSource();
+ for (size_t i = 0; i < m_variables.size(); ++i)
+ m_variables[i] = ValueSource();
+ m_lastSetOperand = std::numeric_limits<int>::max();
+ m_bytecodeIndexForOSR = std::numeric_limits<uint32_t>::max();
+
+ for (; m_compileIndex < block.end; ++m_compileIndex) {
+ Node& node = m_jit.graph()[m_compileIndex];
+ m_bytecodeIndexForOSR = node.codeOrigin.bytecodeIndex();
+ if (!node.shouldGenerate()) {
+#if ENABLE(DFG_DEBUG_VERBOSE)
+ fprintf(stderr, "SpeculativeJIT skipping Node @%d (bc#%u) at JIT offset 0x%x ", (int)m_compileIndex, node.codeOrigin.bytecodeIndex(), m_jit.debugOffset());
+#endif
+ if (node.op == SetLocal)
+ compileMovHint(node);
+ } else {
+
+#if ENABLE(DFG_DEBUG_VERBOSE)
+ fprintf(stderr, "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x ", (int)m_compileIndex, node.codeOrigin.bytecodeIndex(), m_jit.debugOffset());
+#endif
+#if ENABLE(DFG_JIT_BREAK_ON_EVERY_NODE)
+ m_jit.breakpoint();
+#endif
+ checkConsistency();
+ compile(node);
+ if (!m_compileOkay) {
+ m_compileOkay = true;
+ m_compileIndex = block.end;
+ clearGenerationInfo();
+ return;
+ }
+
+#if ENABLE(DFG_DEBUG_VERBOSE)
+ if (node.hasResult()) {
+ GenerationInfo& info = m_generationInfo[node.virtualRegister()];
+ fprintf(stderr, "-> %s, vr#%d", dataFormatToString(info.registerFormat()), (int)node.virtualRegister());
+ if (info.registerFormat() != DataFormatNone) {
+ if (info.registerFormat() == DataFormatDouble)
+ fprintf(stderr, ", %s", FPRInfo::debugName(info.fpr()));
+ else if (!(info.registerFormat() & DataFormatJS))
+ fprintf(stderr, ", %s", GPRInfo::debugName(info.gpr()));
+ else
+ fprintf(stderr, ", %s %s", GPRInfo::debugName(info.tagGPR()), GPRInfo::debugName(info.payloadGPR()));
+ }
+ fprintf(stderr, " ");
+ } else
+ fprintf(stderr, " ");
+#endif
+ }
+
+#if ENABLE(DFG_VERBOSE_VALUE_RECOVERIES)
+ for (int operand = -m_arguments.size() - RegisterFile::CallFrameHeaderSize; operand < -RegisterFile::CallFrameHeaderSize; ++operand)
+ computeValueRecoveryFor(operand).dump(stderr);
+
+ fprintf(stderr, " : ");
+
+ for (int operand = 0; operand < (int)m_variables.size(); ++operand)
+ computeValueRecoveryFor(operand).dump(stderr);
+#endif
+
+#if ENABLE(DFG_DEBUG_VERBOSE)
+ fprintf(stderr, "\n");
+#endif
+
+ if (node.shouldGenerate())
+ checkConsistency();
+ }
+}
+
+// If we are making type predictions about our arguments then
+// we need to check that they are correct on function entry.
+void SpeculativeJIT::checkArgumentTypes()
+{
+ ASSERT(!m_compileIndex);
+ m_bytecodeIndexForOSR = 0;
+ for (int i = 0; i < m_jit.codeBlock()->m_numParameters; ++i) {
+ VirtualRegister virtualRegister = (VirtualRegister)(m_jit.codeBlock()->thisRegister() + i);
+ PredictedType predictedType = m_jit.graph().getPrediction(virtualRegister);
+ if (isInt32Prediction(predictedType))
+ speculationCheck(m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
+ else if (isArrayPrediction(predictedType)) {
+ GPRTemporary temp(this);
+ m_jit.load32(JITCompiler::tagFor(virtualRegister), temp.gpr());
+ speculationCheck(m_jit.branch32(MacroAssembler::NotEqual, temp.gpr(), TrustedImm32(JSValue::CellTag)));
+ m_jit.load32(JITCompiler::payloadFor(virtualRegister), temp.gpr());
+ speculationCheck(m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->jsArrayVPtr)));
+ }
+ }
+}
+
+// For any vars that we will be treating as numeric, write 0 to
+// the var on entry. Throughout the block we will only read/write
+// to the payload, by writing the tag now we prevent the GC from
+// misinterpreting values as pointers.
+void SpeculativeJIT::initializeVariableTypes()
+{
+ ASSERT(!m_compileIndex);
+ for (int var = 0; var < (int)m_jit.graph().predictions().numberOfVariables(); ++var) {
+ if (isInt32Prediction(m_jit.graph().getPrediction(var)))
+ m_jit.store32(TrustedImm32(JSValue::Int32Tag), JITCompiler::tagFor((VirtualRegister)var));
+ }
+}
+
+bool SpeculativeJIT::compile()
+{
+ checkArgumentTypes();
+ initializeVariableTypes();
+
+ ASSERT(!m_compileIndex);
+ for (m_block = 0; m_block < m_jit.graph().m_blocks.size(); ++m_block)
+ compile(*m_jit.graph().m_blocks[m_block]);
+ linkBranches();
+ return true;
+}
+
+ValueRecovery SpeculativeJIT::computeValueRecoveryFor(const ValueSource& valueSource)
+{
+ if (!valueSource.isSet())
+ return ValueRecovery::alreadyInRegisterFile();
+
+ if (m_jit.isConstant(valueSource.nodeIndex()))
+ return ValueRecovery::constant(m_jit.valueOfJSConstant(valueSource.nodeIndex()));
+
+ Node* nodePtr = &m_jit.graph()[valueSource.nodeIndex()];
+ if (!nodePtr->shouldGenerate()) {
+ // It's legitimately dead. As in, nobody will ever use this node, or operand,
+ // ever. Set it to Undefined to make the GC happy after the OSR.
+ return ValueRecovery::constant(jsUndefined());
+ }
+
+ GenerationInfo* infoPtr = &m_generationInfo[nodePtr->virtualRegister()];
+ if (!infoPtr->alive() || infoPtr->nodeIndex() != valueSource.nodeIndex()) {
+ // Try to see if there is an alternate node that would contain the value we want.
+ // There are four possibilities:
+ //
+ // ValueToNumber: If the only live version of the value is a ValueToNumber node
+ // then it means that all remaining uses of the value would have performed a
+ // ValueToNumber conversion anyway. Thus, we can substitute ValueToNumber.
+ //
+ // ValueToInt32: Likewise, if the only remaining live version of the value is
+ // ValueToInt32, then we can use it. But if there is both a ValueToInt32
+ // and a ValueToNumber, then we better go with ValueToNumber because it
+ // means that some remaining uses would have converted to number while
+ // others would have converted to Int32.
+ //
+ // UInt32ToNumber: If the only live version of the value is a UInt32ToNumber
+ // then the only remaining uses are ones that want a properly formed number
+ // rather than a UInt32 intermediate.
+ //
+ // The reverse of the above: This node could be a UInt32ToNumber, but its
+ // alternative is still alive. This means that the only remaining uses of
+ // the number would be fine with a UInt32 intermediate.
+
+ bool found = false;
+
+ if (nodePtr->op == UInt32ToNumber) {
+ NodeIndex nodeIndex = nodePtr->child1();
+ nodePtr = &m_jit.graph()[nodeIndex];
+ infoPtr = &m_generationInfo[nodePtr->virtualRegister()];
+ if (infoPtr->alive() && infoPtr->nodeIndex() == nodeIndex)
+ found = true;
+ }
+
+ if (!found) {
+ NodeIndex valueToNumberIndex = NoNode;
+ NodeIndex valueToInt32Index = NoNode;
+ NodeIndex uint32ToNumberIndex = NoNode;
+
+ for (unsigned virtualRegister = 0; virtualRegister < m_generationInfo.size(); ++virtualRegister) {
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+ if (!info.alive())
+ continue;
+ if (info.nodeIndex() == NoNode)
+ continue;
+ Node& node = m_jit.graph()[info.nodeIndex()];
+ if (node.child1Unchecked() != valueSource.nodeIndex())
+ continue;
+ switch (node.op) {
+ case ValueToNumber:
+ case ValueToDouble:
+ valueToNumberIndex = info.nodeIndex();
+ break;
+ case ValueToInt32:
+ valueToInt32Index = info.nodeIndex();
+ break;
+ case UInt32ToNumber:
+ uint32ToNumberIndex = info.nodeIndex();
+ break;
+ default:
+ break;
+ }
+ }
+
+ NodeIndex nodeIndexToUse;
+ if (valueToNumberIndex != NoNode)
+ nodeIndexToUse = valueToNumberIndex;
+ else if (valueToInt32Index != NoNode)
+ nodeIndexToUse = valueToInt32Index;
+ else if (uint32ToNumberIndex != NoNode)
+ nodeIndexToUse = uint32ToNumberIndex;
+ else
+ nodeIndexToUse = NoNode;
+
+ if (nodeIndexToUse != NoNode) {
+ nodePtr = &m_jit.graph()[nodeIndexToUse];
+ infoPtr = &m_generationInfo[nodePtr->virtualRegister()];
+ ASSERT(infoPtr->alive() && infoPtr->nodeIndex() == nodeIndexToUse);
+ found = true;
+ }
+ }
+
+ if (!found)
+ return ValueRecovery::constant(jsUndefined());
+ }
+
+ ASSERT(infoPtr->alive());
+
+ if (infoPtr->registerFormat() != DataFormatNone) {
+ if (infoPtr->registerFormat() == DataFormatDouble)
+ return ValueRecovery::inFPR(infoPtr->fpr());
+ if (infoPtr->registerFormat() & DataFormatJS)
+ return ValueRecovery::inPair(infoPtr->tagGPR(), infoPtr->payloadGPR());
+ return ValueRecovery::inGPR(infoPtr->gpr(), infoPtr->registerFormat());
+ }
+ if (infoPtr->spillFormat() != DataFormatNone)
+ return ValueRecovery::displacedInRegisterFile(static_cast<VirtualRegister>(nodePtr->virtualRegister()));
+
+ ASSERT_NOT_REACHED();
+ return ValueRecovery();
+}
+
+} } // namespace JSC::DFG
+
+#endif
+#endif
class PropertySlot;
class PutPropertySlot;
class UString;
+#if ENABLE(DFG_JIT)
+ namespace DFG {
+ class JITCompiler;
+ class JITCodeGenerator;
+ class SpeculativeJIT;
+ }
+#endif
struct ClassInfo;
struct Instruction;
friend class JITStubCall;
friend class JSInterfaceJIT;
friend class SpecializedThunkJIT;
+#if ENABLE(DFG_JIT)
+ friend class DFG::JITCompiler;
+ friend class DFG::JITCodeGenerator;
+ friend class DFG::SpeculativeJIT;
+#endif
public:
static EncodedJSValue encode(JSValue);
SET(WTF_SOURCES
Assertions.cpp
+ BitVector.cpp
ByteArray.cpp
CryptographicallyRandomNumber.cpp
CurrentTime.cpp