https://bugs.webkit.org/show_bug.cgi?id=68320
Reviewed by Oliver Hunt.
This adds the ability to access properties directly, by offset.
This optimization kicks in when at the time of DFG compilation,
it appears that the given get_by_id is self-cached by the old JIT.
Two new opcodes get introduced: CheckStructure and GetByOffset.
CheckStructure performs a speculation check on the object's
structure, and returns the storage pointer. GetByOffset performs
a direct read of the field from the storage pointer. Both
CheckStructure and GetByOffset can be CSE'd, so that we can
eliminate redundant structure checks, and redundant reads of the
same field.
This is a 4% speed-up on V8, a 2% slow-down on Kraken, and
neutral on SunSpider.
* bytecode/PredictedType.cpp:
(JSC::predictionFromClassInfo):
(JSC::predictionFromStructure):
(JSC::predictionFromCell):
* bytecode/PredictedType.h:
* dfg/DFGByteCodeParser.cpp:
(JSC::DFG::ByteCodeParser::parseBlock):
* dfg/DFGGenerationInfo.h:
(JSC::DFG::dataFormatToString):
(JSC::DFG::needDataFormatConversion):
(JSC::DFG::GenerationInfo::initStorage):
(JSC::DFG::GenerationInfo::spill):
(JSC::DFG::GenerationInfo::fillStorage):
* dfg/DFGGraph.h:
(JSC::DFG::Graph::predict):
(JSC::DFG::Graph::getPrediction):
* dfg/DFGJITCodeGenerator.cpp:
(JSC::DFG::JITCodeGenerator::fillInteger):
(JSC::DFG::JITCodeGenerator::fillDouble):
(JSC::DFG::JITCodeGenerator::fillJSValue):
(JSC::DFG::JITCodeGenerator::fillStorage):
(JSC::DFG::GPRTemporary::GPRTemporary):
* dfg/DFGJITCodeGenerator.h:
(JSC::DFG::JITCodeGenerator::silentSpillGPR):
(JSC::DFG::JITCodeGenerator::silentFillGPR):
(JSC::DFG::JITCodeGenerator::spill):
(JSC::DFG::JITCodeGenerator::storageResult):
(JSC::DFG::StorageOperand::StorageOperand):
(JSC::DFG::StorageOperand::~StorageOperand):
(JSC::DFG::StorageOperand::index):
(JSC::DFG::StorageOperand::gpr):
(JSC::DFG::StorageOperand::use):
* dfg/DFGNode.h:
(JSC::DFG::OpInfo::OpInfo):
(JSC::DFG::Node::Node):
(JSC::DFG::Node::hasPrediction):
(JSC::DFG::Node::hasStructure):
(JSC::DFG::Node::structure):
(JSC::DFG::Node::hasStorageAccessData):
(JSC::DFG::Node::storageAccessDataIndex):
* dfg/DFGPropagator.cpp:
(JSC::DFG::Propagator::propagateNode):
(JSC::DFG::Propagator::globalVarLoadElimination):
(JSC::DFG::Propagator::getMethodLoadElimination):
(JSC::DFG::Propagator::checkStructureLoadElimination):
(JSC::DFG::Propagator::getByOffsetLoadElimination):
(JSC::DFG::Propagator::performNodeCSE):
* dfg/DFGSpeculativeJIT.cpp:
(JSC::DFG::SpeculativeJIT::fillSpeculateIntInternal):
(JSC::DFG::SpeculativeJIT::fillSpeculateDouble):
(JSC::DFG::SpeculativeJIT::fillSpeculateCell):
(JSC::DFG::SpeculativeJIT::fillSpeculateBoolean):
(JSC::DFG::SpeculativeJIT::compile):
* wtf/StdLibExtras.h:
(WTF::safeCast):
git-svn-id: http://svn.webkit.org/repository/webkit/trunk@95523
268f45cc-cd09-0410-ab3c-
d52691b4dbfc
+2011-09-18 Filip Pizlo <fpizlo@apple.com>
+
+ DFG JIT does not speculate aggressively enough on GetById
+ https://bugs.webkit.org/show_bug.cgi?id=68320
+
+ Reviewed by Oliver Hunt.
+
+ This adds the ability to access properties directly, by offset.
+ This optimization kicks in when at the time of DFG compilation,
+ it appears that the given get_by_id is self-cached by the old JIT.
+ Two new opcodes get introduced: CheckStructure and GetByOffset.
+ CheckStructure performs a speculation check on the object's
+ structure, and returns the storage pointer. GetByOffset performs
+ a direct read of the field from the storage pointer. Both
+ CheckStructure and GetByOffset can be CSE'd, so that we can
+ eliminate redundant structure checks, and redundant reads of the
+ same field.
+
+ This is a 4% speed-up on V8, a 2% slow-down on Kraken, and
+ neutral on SunSpider.
+
+ * bytecode/PredictedType.cpp:
+ (JSC::predictionFromClassInfo):
+ (JSC::predictionFromStructure):
+ (JSC::predictionFromCell):
+ * bytecode/PredictedType.h:
+ * dfg/DFGByteCodeParser.cpp:
+ (JSC::DFG::ByteCodeParser::parseBlock):
+ * dfg/DFGGenerationInfo.h:
+ (JSC::DFG::dataFormatToString):
+ (JSC::DFG::needDataFormatConversion):
+ (JSC::DFG::GenerationInfo::initStorage):
+ (JSC::DFG::GenerationInfo::spill):
+ (JSC::DFG::GenerationInfo::fillStorage):
+ * dfg/DFGGraph.h:
+ (JSC::DFG::Graph::predict):
+ (JSC::DFG::Graph::getPrediction):
+ * dfg/DFGJITCodeGenerator.cpp:
+ (JSC::DFG::JITCodeGenerator::fillInteger):
+ (JSC::DFG::JITCodeGenerator::fillDouble):
+ (JSC::DFG::JITCodeGenerator::fillJSValue):
+ (JSC::DFG::JITCodeGenerator::fillStorage):
+ (JSC::DFG::GPRTemporary::GPRTemporary):
+ * dfg/DFGJITCodeGenerator.h:
+ (JSC::DFG::JITCodeGenerator::silentSpillGPR):
+ (JSC::DFG::JITCodeGenerator::silentFillGPR):
+ (JSC::DFG::JITCodeGenerator::spill):
+ (JSC::DFG::JITCodeGenerator::storageResult):
+ (JSC::DFG::StorageOperand::StorageOperand):
+ (JSC::DFG::StorageOperand::~StorageOperand):
+ (JSC::DFG::StorageOperand::index):
+ (JSC::DFG::StorageOperand::gpr):
+ (JSC::DFG::StorageOperand::use):
+ * dfg/DFGNode.h:
+ (JSC::DFG::OpInfo::OpInfo):
+ (JSC::DFG::Node::Node):
+ (JSC::DFG::Node::hasPrediction):
+ (JSC::DFG::Node::hasStructure):
+ (JSC::DFG::Node::structure):
+ (JSC::DFG::Node::hasStorageAccessData):
+ (JSC::DFG::Node::storageAccessDataIndex):
+ * dfg/DFGPropagator.cpp:
+ (JSC::DFG::Propagator::propagateNode):
+ (JSC::DFG::Propagator::globalVarLoadElimination):
+ (JSC::DFG::Propagator::getMethodLoadElimination):
+ (JSC::DFG::Propagator::checkStructureLoadElimination):
+ (JSC::DFG::Propagator::getByOffsetLoadElimination):
+ (JSC::DFG::Propagator::performNodeCSE):
+ * dfg/DFGSpeculativeJIT.cpp:
+ (JSC::DFG::SpeculativeJIT::fillSpeculateIntInternal):
+ (JSC::DFG::SpeculativeJIT::fillSpeculateDouble):
+ (JSC::DFG::SpeculativeJIT::fillSpeculateCell):
+ (JSC::DFG::SpeculativeJIT::fillSpeculateBoolean):
+ (JSC::DFG::SpeculativeJIT::compile):
+ * wtf/StdLibExtras.h:
+ (WTF::safeCast):
+
2011-09-19 Mark Hahnenberg <mhahnenberg@apple.com>
Remove toPrimitive from JSCell
}
#endif
-PredictedType predictionFromCell(JSCell* cell)
+PredictedType predictionFromClassInfo(const ClassInfo* classInfo)
{
- const ClassInfo* classInfo = cell->structure()->classInfo();
-
if (classInfo == &JSFinalObject::s_info)
return PredictFinalObject;
return PredictCellOther;
}
+PredictedType predictionFromStructure(Structure* structure)
+{
+ return predictionFromClassInfo(structure->classInfo());
+}
+
+PredictedType predictionFromCell(JSCell* cell)
+{
+ return predictionFromStructure(cell->structure());
+}
+
PredictedType predictionFromValue(JSValue value)
{
if (value.isInt32())
namespace JSC {
+class Structure;
+
typedef uint16_t PredictedType;
static const PredictedType PredictNone = 0x0000; // We don't know anything yet.
static const PredictedType PredictFinalObject = 0x0001; // It's definitely a JSFinalObject.
return type | (source == StrongPrediction ? StrongPredictionTag : 0);
}
+PredictedType predictionFromClassInfo(const ClassInfo*);
+PredictedType predictionFromStructure(Structure*);
PredictedType predictionFromCell(JSCell*);
PredictedType predictionFromValue(JSValue);
case op_get_by_id: {
NodeIndex base = get(currentInstruction[2].u.operand);
- unsigned identifier = currentInstruction[3].u.operand;
+ unsigned identifierNumber = currentInstruction[3].u.operand;
+
+ StructureStubInfo& stubInfo = m_profiledBlock->getStubInfo(m_currentIndex);
+
+ NodeIndex getById = NoNode;
+ if (stubInfo.seen && stubInfo.accessType == access_get_by_id_self) {
+ Structure* structure = stubInfo.u.getByIdSelf.baseObjectStructure.get();
+ Identifier identifier = m_codeBlock->identifier(identifierNumber);
+ size_t offset = structure->get(*m_globalData, identifier);
+
+ if (offset != notFound) {
+ getById = addToGraph(GetByOffset, OpInfo(m_graph.m_storageAccessData.size()), OpInfo(PredictNone), addToGraph(CheckStructure, OpInfo(structure), base));
+
+ StorageAccessData storageAccessData;
+ storageAccessData.offset = offset;
+ storageAccessData.identifierNumber = identifierNumber;
+ m_graph.m_storageAccessData.append(storageAccessData);
+ }
+ }
+
+ if (getById == NoNode)
+ getById = addToGraph(GetById, OpInfo(identifierNumber), OpInfo(PredictNone), base);
- NodeIndex getById = addToGraph(GetById, OpInfo(identifier), OpInfo(PredictNone), base);
set(currentInstruction[1].u.operand, getById);
stronglyPredict(getById);
DataFormatDouble = 2,
DataFormatBoolean = 3,
DataFormatCell = 4,
+ DataFormatStorage = 5,
DataFormatJS = 8,
DataFormatJSInteger = DataFormatJS | DataFormatInteger,
DataFormatJSDouble = DataFormatJS | DataFormatDouble,
return "Cell";
case DataFormatBoolean:
return "Boolean";
+ case DataFormatStorage:
+ return "Storage";
case DataFormatJS:
return "JS";
case DataFormatJSInteger:
// This captures DataFormatBoolean, which is currently unused.
ASSERT_NOT_REACHED();
}
+ case DataFormatStorage:
+ ASSERT(to == DataFormatStorage);
+ return false;
default:
// This captures DataFormatBoolean, which is currently unused.
ASSERT_NOT_REACHED();
m_canFill = false;
u.fpr = fpr;
}
+ void initStorage(NodeIndex nodeIndex, uint32_t useCount, GPRReg gpr)
+ {
+ m_nodeIndex = nodeIndex;
+ m_useCount = useCount;
+ m_registerFormat = DataFormatStorage;
+ m_spillFormat = DataFormatNone;
+ m_canFill = false;
+ u.gpr = gpr;
+ }
// Get the index of the node that produced this value.
NodeIndex nodeIndex() { return m_nodeIndex; }
ASSERT(m_spillFormat == DataFormatNone);
// We should only be spilling values that are currently in machine registers.
ASSERT(m_registerFormat != DataFormatNone);
- // We only spill values that have been boxed as a JSValue; otherwise the GC
- // would need a way to distinguish cell pointers from numeric primitives.
- ASSERT(spillFormat & DataFormatJS);
+ // We only spill values that have been boxed as a JSValue because historically
+ // we assumed that the GC would want to be able to precisely identify heap
+ // pointers. This is not true anymore, but we still assume, in the fill code,
+ // that any spill slot for a JS value is boxed. For storage pointers, there is
+ // nothing we can do to box them, so we allow that to be an exception.
+ ASSERT((spillFormat & DataFormatJS) || spillFormat == DataFormatStorage);
m_registerFormat = DataFormatNone;
m_spillFormat = spillFormat;
m_registerFormat = DataFormatDouble;
u.fpr = fpr;
}
+ void fillStorage(GPRReg gpr)
+ {
+ m_registerFormat = DataFormatStorage;
+ u.gpr = gpr;
+ }
bool alive()
{
}
};
+struct StorageAccessData {
+ size_t offset;
+ unsigned identifierNumber;
+
+ // NOTE: the offset and identifierNumber do not by themselves
+ // uniquely identify a property. The identifierNumber and a
+ // Structure* do. If those two match, then the offset should
+ // be the same, as well. For any Node that has a StorageAccessData,
+ // it is possible to retrieve the Structure* by looking at the
+ // first child. It should be a CheckStructure, which has the
+ // Structure*.
+};
+
typedef Vector <BlockIndex, 2> PredecessorList;
struct BasicBlock {
case GetByVal:
case Call:
case Construct:
+ case GetByOffset:
return node.predict(prediction, source);
default:
return false;
case GetByVal:
case Call:
case Construct:
+ case GetByOffset:
return nodePtr->getPrediction();
case CheckMethod:
return getMethodCheckPrediction(*nodePtr);
Vector< OwnPtr<BasicBlock> , 8> m_blocks;
Vector<NodeIndex, 16> m_varArgChildren;
Vector<MethodCheckData> m_methodCheckData;
+ Vector<StorageAccessData> m_storageAccessData;
unsigned m_preservedVars;
unsigned m_parameterSlots;
private:
case DataFormatJSCell:
case DataFormatBoolean:
case DataFormatJSBoolean:
+ case DataFormatStorage:
// Should only be calling this function if we know this operand to be integer.
ASSERT_NOT_REACHED();
case DataFormatJSCell:
case DataFormatBoolean:
case DataFormatJSBoolean:
+ case DataFormatStorage:
// Should only be calling this function if we know this operand to be numeric.
ASSERT_NOT_REACHED();
}
case DataFormatBoolean:
+ case DataFormatStorage:
// this type currently never occurs
ASSERT_NOT_REACHED();
}
return InvalidGPRReg;
}
+GPRReg JITCodeGenerator::fillStorage(NodeIndex nodeIndex)
+{
+ Node& node = m_jit.graph()[nodeIndex];
+ VirtualRegister virtualRegister = node.virtualRegister();
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+
+ switch (info.registerFormat()) {
+ case DataFormatNone: {
+ GPRReg gpr = allocate();
+ ASSERT(info.spillFormat() == DataFormatStorage);
+ m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
+ m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
+ info.fillStorage(gpr);
+ return gpr;
+ }
+
+ case DataFormatStorage: {
+ GPRReg gpr = info.gpr();
+ m_gprs.lock(gpr);
+ return gpr;
+ }
+
+ default:
+ ASSERT_NOT_REACHED();
+ }
+
+ return InvalidGPRReg;
+}
+
void JITCodeGenerator::useChildren(Node& node)
{
if (node.op & NodeHasVarArgs) {
jsValueResult(scratchReg, m_compileIndex, UseChildrenCalledExplicitly);
}
-template<typename To, typename From>
-inline To safeCast(From value)
-{
- To result = static_cast<To>(value);
- ASSERT(result == value);
- return result;
-}
-
JITCompiler::Call JITCodeGenerator::cachedGetById(GPRReg baseGPR, GPRReg resultGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget, NodeType nodeType)
{
JITCompiler::DataLabelPtr structureToCompare;
m_gpr = m_jit->allocate();
}
+GPRTemporary::GPRTemporary(JITCodeGenerator* jit, StorageOperand& op1)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+{
+ if (m_jit->canReuse(op1.index()))
+ m_gpr = m_jit->reuse(op1.gpr());
+ else
+ m_gpr = m_jit->allocate();
+}
+
FPRTemporary::FPRTemporary(JITCodeGenerator* jit)
: m_jit(jit)
, m_fpr(InvalidFPRReg)
// the register allocator.
enum SpillOrder {
SpillOrderConstant = 1, // no spill, and cheap fill
- SpillOrderSpilled = 2, // no spill
- SpillOrderJS = 4, // needs spill
- SpillOrderCell = 4, // needs spill
- SpillOrderInteger = 5, // needs spill and box
- SpillOrderDouble = 6, // needs spill and convert
+ SpillOrderSpilled = 2, // no spill
+ SpillOrderJS = 4, // needs spill
+ SpillOrderCell = 4, // needs spill
+ SpillOrderStorage = 4, // needs spill
+ SpillOrderInteger = 5, // needs spill and box
+ SpillOrderDouble = 6, // needs spill and convert
};
enum UseChildrenMode { CallUseChildren, UseChildrenCalledExplicitly };
GPRReg fillInteger(NodeIndex, DataFormat& returnFormat);
FPRReg fillDouble(NodeIndex);
GPRReg fillJSValue(NodeIndex);
+ GPRReg fillStorage(NodeIndex);
// lock and unlock GPR & FPR registers.
void lock(GPRReg reg)
DataFormat registerFormat = info.registerFormat();
- if (registerFormat == DataFormatInteger) {
+ if (registerFormat == DataFormatInteger)
m_jit.store32(info.gpr(), JITCompiler::addressFor(spillMe));
- } else {
- ASSERT(registerFormat & DataFormatJS || registerFormat == DataFormatCell);
+ else {
+ ASSERT(registerFormat & DataFormatJS || registerFormat == DataFormatCell || registerFormat == DataFormatStorage);
m_jit.storePtr(info.gpr(), JITCompiler::addressFor(spillMe));
}
}
if (node.hasConstant())
m_jit.move(valueOfJSConstantAsImmPtr(nodeIndex), info.gpr());
else {
- ASSERT(registerFormat & DataFormatJS || registerFormat == DataFormatCell);
+ ASSERT(registerFormat & DataFormatJS || registerFormat == DataFormatCell || registerFormat == DataFormatStorage);
m_jit.loadPtr(JITCompiler::addressFor(spillMe), info.gpr());
}
}
}
DataFormat spillFormat = info.registerFormat();
- if (spillFormat == DataFormatDouble) {
+ switch (spillFormat) {
+ case DataFormatDouble: {
// All values are spilled as JSValues, so box the double via a temporary gpr.
GPRReg gpr = boxDouble(info.fpr());
m_jit.storePtr(gpr, JITCompiler::addressFor(spillMe));
info.spill(DataFormatJSDouble);
return;
}
+
+ case DataFormatStorage: {
+ // This is special, since it's not a JS value - as in it's not visible to JS
+ // code.
+ m_jit.storePtr(info.gpr(), JITCompiler::addressFor(spillMe));
+ info.spill(DataFormatStorage);
+ return;
+ }
- // The following code handles JSValues, int32s, and cells.
- ASSERT(spillFormat == DataFormatInteger || spillFormat == DataFormatCell || spillFormat & DataFormatJS);
-
- GPRReg reg = info.gpr();
- // We need to box int32 and cell values ...
- // but on JSVALUE64 boxing a cell is a no-op!
- if (spillFormat == DataFormatInteger)
- m_jit.orPtr(GPRInfo::tagTypeNumberRegister, reg);
-
- // Spill the value, and record it as spilled in its boxed form.
- m_jit.storePtr(reg, JITCompiler::addressFor(spillMe));
- info.spill((DataFormat)(spillFormat | DataFormatJS));
+ default:
+ // The following code handles JSValues, int32s, and cells.
+ ASSERT(spillFormat == DataFormatInteger || spillFormat == DataFormatCell || spillFormat & DataFormatJS);
+
+ GPRReg reg = info.gpr();
+ // We need to box int32 and cell values ...
+ // but on JSVALUE64 boxing a cell is a no-op!
+ if (spillFormat == DataFormatInteger)
+ m_jit.orPtr(GPRInfo::tagTypeNumberRegister, reg);
+
+ // Spill the value, and record it as spilled in its boxed form.
+ m_jit.storePtr(reg, JITCompiler::addressFor(spillMe));
+ info.spill((DataFormat)(spillFormat | DataFormatJS));
+ return;
+ }
}
bool isStrictInt32(NodeIndex);
{
jsValueResult(reg, nodeIndex, DataFormatJS, mode);
}
+ void storageResult(GPRReg reg, NodeIndex nodeIndex, UseChildrenMode mode = CallUseChildren)
+ {
+ Node& node = m_jit.graph()[nodeIndex];
+ if (mode == CallUseChildren)
+ useChildren(node);
+
+ VirtualRegister virtualRegister = node.virtualRegister();
+ m_gprs.retain(reg, virtualRegister, SpillOrderStorage);
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+ info.initStorage(nodeIndex, node.refCount(), reg);
+ }
void doubleResult(FPRReg reg, NodeIndex nodeIndex, UseChildrenMode mode = CallUseChildren)
{
Node& node = m_jit.graph()[nodeIndex];
GPRReg m_gprOrInvalid;
};
+class StorageOperand {
+public:
+ explicit StorageOperand(JITCodeGenerator* jit, NodeIndex index)
+ : m_jit(jit)
+ , m_index(index)
+ , m_gprOrInvalid(InvalidGPRReg)
+ {
+ ASSERT(m_jit);
+ if (jit->isFilled(index))
+ gpr();
+ }
+
+ ~StorageOperand()
+ {
+ ASSERT(m_gprOrInvalid != InvalidGPRReg);
+ m_jit->unlock(m_gprOrInvalid);
+ }
+
+ NodeIndex index() const
+ {
+ return m_index;
+ }
+
+ GPRReg gpr()
+ {
+ if (m_gprOrInvalid == InvalidGPRReg)
+ m_gprOrInvalid = m_jit->fillStorage(index());
+ return m_gprOrInvalid;
+ }
+
+ void use()
+ {
+ m_jit->use(m_index);
+ }
+
+private:
+ JITCodeGenerator* m_jit;
+ NodeIndex m_index;
+ GPRReg m_gprOrInvalid;
+};
+
// === Temporaries ===
//
GPRTemporary(JITCodeGenerator*, SpeculateCellOperand&);
GPRTemporary(JITCodeGenerator*, SpeculateBooleanOperand&);
GPRTemporary(JITCodeGenerator*, JSValueOperand&);
+ GPRTemporary(JITCodeGenerator*, StorageOperand&);
~GPRTemporary()
{
#define NodeMightClobber 0x800000
// These values record the result type of the node (as checked by NodeResultMask, above), 0 for no result.
-#define NodeResultJS 0x1000
-#define NodeResultNumber 0x2000
-#define NodeResultInt32 0x3000
-#define NodeResultBoolean 0x4000
+#define NodeResultJS 0x1000
+#define NodeResultNumber 0x2000
+#define NodeResultInt32 0x3000
+#define NodeResultBoolean 0x4000
+#define NodeResultStorage 0x5000
// This macro defines a set of information about all known node types, used to populate NodeId, NodeType below.
#define FOR_EACH_DFG_OP(macro) \
macro(GetById, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \
macro(PutById, NodeMustGenerate | NodeClobbersWorld) \
macro(PutByIdDirect, NodeMustGenerate | NodeClobbersWorld) \
+ macro(CheckStructure, NodeResultStorage | NodeMustGenerate) \
+ macro(GetByOffset, NodeResultJS) \
macro(GetMethod, NodeResultJS | NodeMustGenerate) \
macro(CheckMethod, NodeResultJS | NodeMustGenerate) \
macro(GetGlobalVar, NodeResultJS | NodeMustGenerate) \
// distinguishes an immediate value (typically an index into a CodeBlock data structure -
// a constant index, argument, or identifier) from a NodeIndex.
struct OpInfo {
- explicit OpInfo(unsigned value) : m_value(value) {}
- unsigned m_value;
+ explicit OpInfo(int value) : m_value(value) { }
+ explicit OpInfo(unsigned value) : m_value(value) { }
+ explicit OpInfo(uintptr_t value) : m_value(value) { }
+ explicit OpInfo(void* value) : m_value(reinterpret_cast<uintptr_t>(value)) { }
+ uintptr_t m_value;
};
// === Node ===
, m_virtualRegister(InvalidVirtualRegister)
, m_refCount(0)
, m_opInfo(imm1.m_value)
- , m_opInfo2(imm2.m_value)
+ , m_opInfo2(safeCast<unsigned>(imm2.m_value))
{
ASSERT(!(op & NodeHasVarArgs));
children.fixed.child1 = child1;
, m_virtualRegister(InvalidVirtualRegister)
, m_refCount(0)
, m_opInfo(imm1.m_value)
- , m_opInfo2(imm2.m_value)
+ , m_opInfo2(safeCast<unsigned>(imm2.m_value))
{
ASSERT(op & NodeHasVarArgs);
children.variable.firstChild = firstChild;
case GetByVal:
case Call:
case Construct:
+ case GetByOffset:
return true;
default:
return false;
return m_opInfo2;
}
+ bool hasStructure()
+ {
+ return op == CheckStructure;
+ }
+
+ Structure* structure()
+ {
+ return reinterpret_cast<Structure*>(m_opInfo);
+ }
+
+ bool hasStorageAccessData()
+ {
+ return op == GetByOffset;
+ }
+
+ unsigned storageAccessDataIndex()
+ {
+ return m_opInfo;
+ }
+
bool hasVirtualRegister()
{
return m_virtualRegister != InvalidVirtualRegister;
VirtualRegister m_virtualRegister;
// The number of uses of the result of this operation (+1 for 'must generate' nodes, which have side-effects).
unsigned m_refCount;
- // Immediate values, accesses type-checked via accessors above.
- unsigned m_opInfo, m_opInfo2;
+ // Immediate values, accesses type-checked via accessors above. The first one is
+ // big enough to store a pointer.
+ uintptr_t m_opInfo;
+ unsigned m_opInfo2;
};
} } // namespace JSC::DFG
break;
}
+ case CheckStructure: {
+ // We backward propagate what this CheckStructure tells us. Maybe that's the right way
+ // to go; maybe it isn't. I'm not sure. What we'd really want is flow-sensitive
+ // forward propagation of what we learn from having executed CheckStructure. But for
+ // now we preserve the flow-insensitive nature of this analysis, because it's cheap to
+ // run and seems to work well enough.
+ changed |= mergeUse(node.child1(), predictionFromStructure(node.structure()) | StrongPredictionTag);
+ changed |= setPrediction(PredictOther | StrongPredictionTag);
+ break;
+ }
+
+ case GetByOffset: {
+ changed |= node.predict(m_uses[m_compileIndex] & ~PredictionTagMask, StrongPrediction);
+ if (isStrongPrediction(node.getPrediction()))
+ changed |= setPrediction(node.getPrediction());
+ break;
+ }
+
case CheckMethod: {
changed |= mergeUse(node.child1(), PredictObjectUnknown | StrongPredictionTag);
changed |= setPrediction(m_graph.getMethodCheckPrediction(node));
NodeIndex globalVarLoadElimination(unsigned varNumber)
{
- NodeIndex start = startIndex();
+ NodeIndex start = startIndexForChildren();
for (NodeIndex index = m_compileIndex; index-- > start;) {
Node& node = m_graph[index];
switch (node.op) {
NodeIndex getMethodLoadElimination(const MethodCheckData& methodCheckData, unsigned identifierNumber, NodeIndex child1)
{
- NodeIndex start = startIndex();
+ NodeIndex start = startIndexForChildren(child1);
for (NodeIndex index = m_compileIndex; index-- > start;) {
Node& node = m_graph[index];
if (node.op == CheckMethod
return NoNode;
}
+ NodeIndex checkStructureLoadElimination(Structure* structure, NodeIndex child1)
+ {
+ NodeIndex start = startIndexForChildren(child1);
+ for (NodeIndex index = m_compileIndex; index-- > start;) {
+ Node& node = m_graph[index];
+ if (node.op == CheckStructure
+ && node.child1() == child1
+ && node.structure() == structure)
+ return index;
+ if (clobbersWorld(index))
+ break;
+ }
+ return NoNode;
+ }
+
+ NodeIndex getByOffsetLoadElimination(unsigned identifierNumber, NodeIndex child1)
+ {
+ NodeIndex start = startIndexForChildren(child1);
+ for (NodeIndex index = m_compileIndex; index-- > start;) {
+ Node& node = m_graph[index];
+ if (node.op == GetByOffset
+ && node.child1() == child1
+ && m_graph.m_storageAccessData[node.storageAccessDataIndex()].identifierNumber == identifierNumber)
+ return index;
+ if (clobbersWorld(index))
+ break;
+ }
+ return NoNode;
+ }
+
void performSubstitution(NodeIndex& child)
{
// Check if this operand is actually unused.
setReplacement(getMethodLoadElimination(m_graph.m_methodCheckData[node.methodCheckDataIndex()], node.identifierNumber(), node.child1()));
break;
+ case CheckStructure:
+ setReplacement(checkStructureLoadElimination(node.structure(), node.child1()));
+ break;
+
+ case GetByOffset:
+ setReplacement(getByOffsetLoadElimination(m_graph.m_storageAccessData[node.storageAccessDataIndex()].identifierNumber, node.child1()));
+ break;
+
default:
// do nothing.
break;
returnFormat = DataFormatInteger;
return allocate();
}
+
+ case DataFormatStorage:
+ ASSERT_NOT_REACHED();
}
ASSERT_NOT_REACHED();
switch (info.registerFormat()) {
case DataFormatNone: // Should have filled, above.
case DataFormatBoolean: // This type never occurs.
+ case DataFormatStorage:
ASSERT_NOT_REACHED();
case DataFormatCell:
terminateSpeculativeExecution();
return allocate();
}
+
+ case DataFormatStorage:
+ ASSERT_NOT_REACHED();
}
ASSERT_NOT_REACHED();
terminateSpeculativeExecution();
return allocate();
}
+
+ case DataFormatStorage:
+ ASSERT_NOT_REACHED();
}
ASSERT_NOT_REACHED();
jsValueResult(resultGPR, m_compileIndex, UseChildrenCalledExplicitly);
break;
}
+
+ case CheckStructure: {
+ SpeculateCellOperand base(this, node.child1());
+ GPRTemporary result(this, base);
+
+ GPRReg baseGPR = base.gpr();
+ GPRReg resultGPR = result.gpr();
+
+ speculationCheck(m_jit.branchPtr(JITCompiler::NotEqual, JITCompiler::Address(baseGPR, JSCell::structureOffset()), JITCompiler::TrustedImmPtr(node.structure())));
+
+ m_jit.loadPtr(JITCompiler::Address(baseGPR, JSObject::offsetOfPropertyStorage()), resultGPR);
+
+ storageResult(resultGPR, m_compileIndex);
+ break;
+ }
+
+ case GetByOffset: {
+ StorageOperand storage(this, node.child1());
+ GPRTemporary result(this, storage);
+
+ GPRReg storageGPR = storage.gpr();
+ GPRReg resultGPR = result.gpr();
+
+ StorageAccessData& storageAccessData = m_jit.graph().m_storageAccessData[node.storageAccessDataIndex()];
+
+ m_jit.loadPtr(JITCompiler::Address(storageGPR, storageAccessData.offset * sizeof(EncodedJSValue)), resultGPR);
+
+ jsValueResult(resultGPR, m_compileIndex);
+ break;
+ }
case GetMethod: {
SpeculateCellOperand base(this, node.child1());
#ifndef WTF_StdLibExtras_h
#define WTF_StdLibExtras_h
-#include <wtf/Assertions.h>
+#include "CheckedArithmetic.h"
+#include "Assertions.h"
// Use these to declare and define a static local variable (static T;) so that
// it is leaked so that its destructors are not called at exit. Using this
return u.to;
}
+template<typename To, typename From>
+inline To safeCast(From value)
+{
+ ASSERT(isInBounds<To>(value));
+ return static_cast<To>(value);
+}
+
// Returns a count of the number of bits set in 'bits'.
inline size_t bitCount(unsigned bits)
{
using WTF::binarySearch;
using WTF::bitwise_cast;
+using WTF::safeCast;
#endif // WTF_StdLibExtras_h