return CanCompile;
case op_call_varargs:
- return ShouldProfile;
+ return MayInline;
default:
return CannotCompile;
// Put things here that must be defined even if ENABLE(DFG_JIT) is false.
-enum CapabilityLevel { CannotCompile, ShouldProfile, CanCompile, CapabilityLevelNotSet };
+enum CapabilityLevel { CannotCompile, MayInline, CanCompile, CapabilityLevelNotSet };
} } // namespace JSC::DFG
m_canBeOptimized = false;
m_shouldEmitProfiling = false;
break;
- case DFG::ShouldProfile:
+ case DFG::MayInline:
m_canBeOptimized = false;
+ m_canBeOptimizedOrInlined = true;
m_shouldEmitProfiling = true;
break;
case DFG::CanCompile:
m_canBeOptimized = true;
+ m_canBeOptimizedOrInlined = true;
m_shouldEmitProfiling = true;
break;
default:
}
#if ENABLE(DFG_JIT) || ENABLE(LLINT)
- if (canBeOptimized()
+ if (canBeOptimizedOrInlined()
#if ENABLE(LLINT)
|| true
#endif
#if ENABLE(DFG_JIT)
bool canBeOptimized() { return m_canBeOptimized; }
+ bool canBeOptimizedOrInlined() { return m_canBeOptimizedOrInlined; }
bool shouldEmitProfiling() { return m_shouldEmitProfiling; }
#else
bool canBeOptimized() { return false; }
+ bool canBeOptimizedOrInlined() { return false; }
// Enables use of value profiler with tiered compilation turned off,
// in which case all code gets profiled.
bool shouldEmitProfiling() { return true; }
#if ENABLE(VALUE_PROFILER)
bool m_canBeOptimized;
+ bool m_canBeOptimizedOrInlined;
bool m_shouldEmitProfiling;
#endif
} JIT_CLASS_ALIGNMENT;
emitFastArithIntToImmNoCheck(regT1, regT1);
emitPutVirtualRegister(srcDst, regT1);
emitPutVirtualRegister(result);
+ if (canBeOptimizedOrInlined())
+ killLastResultRegister();
}
void JIT::emitSlow_op_post_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
emitFastArithIntToImmNoCheck(regT1, regT1);
emitPutVirtualRegister(srcDst, regT1);
emitPutVirtualRegister(result);
+ if (canBeOptimizedOrInlined())
+ killLastResultRegister();
}
void JIT::emitSlow_op_post_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
emitStoreInt32(srcDst, regT2, true);
emitStoreAndMapInt32(dst, regT1, regT0, false, OPCODE_LENGTH(op_post_inc));
+ if (canBeOptimizedOrInlined())
+ unmap();
}
void JIT::emitSlow_op_post_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
emitStoreInt32(srcDst, regT2, true);
emitStoreAndMapInt32(dst, regT1, regT0, false, OPCODE_LENGTH(op_post_dec));
+ if (canBeOptimizedOrInlined())
+ unmap();
}
void JIT::emitSlow_op_post_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
int dst = instruction[1].u.operand;
emitValueProfilingSite();
emitPutVirtualRegister(dst);
- if (canBeOptimized())
+ if (canBeOptimizedOrInlined())
killLastResultRegister(); // Make lastResultRegister tracking simpler in the DFG.
}
m_mappedTag = tag;
m_mappedPayload = payload;
- ASSERT(!canBeOptimized() || m_mappedPayload == regT0);
- ASSERT(!canBeOptimized() || m_mappedTag == regT1);
+ ASSERT(!canBeOptimizedOrInlined() || m_mappedPayload == regT0);
+ ASSERT(!canBeOptimizedOrInlined() || m_mappedTag == regT1);
}
inline void JIT::unmap(RegisterID registerID)
int dst = currentInstruction[1].u.operand;
int src = currentInstruction[2].u.operand;
- if (canBeOptimized()) {
+ if (canBeOptimizedOrInlined()) {
// Use simpler approach, since the DFG thinks that the last result register
// is always set to the destination on every operation.
emitGetVirtualRegister(src, regT0);
// If we succeed in all of our checks, and the code was optimizable, then make sure we
// decrement the rare case counter.
#if ENABLE(VALUE_PROFILER)
- if (m_codeBlock->canCompileWithDFG() >= DFG::ShouldProfile) {
+ if (m_codeBlock->canCompileWithDFG() >= DFG::MayInline) {
sub32(
TrustedImm32(1),
AbsoluteAddress(&m_codeBlock->rareCaseProfileForBytecodeOffset(stubInfo->bytecodeIndex)->m_counter));
// If we succeed in all of our checks, and the code was optimizable, then make sure we
// decrement the rare case counter.
#if ENABLE(VALUE_PROFILER)
- if (m_codeBlock->canCompileWithDFG() >= DFG::ShouldProfile) {
+ if (m_codeBlock->canCompileWithDFG() >= DFG::MayInline) {
sub32(
TrustedImm32(1),
AbsoluteAddress(&m_codeBlock->rareCaseProfileForBytecodeOffset(stubInfo->bytecodeIndex)->m_counter));
#if !defined(ENABLE_DFG_JIT) && ENABLE(JIT)
/* Enable the DFG JIT on X86 and X86_64. Only tested on Mac and GNU/Linux. */
-#if (CPU(X86) || CPU(X86_64)) && (PLATFORM(MAC) || OS(LINUX)) && !OS(TIZEN)
+#if (CPU(X86) || CPU(X86_64)) && (PLATFORM(MAC) || OS(LINUX))
#define ENABLE_DFG_JIT 1
#endif
/* Enable the DFG JIT on ARMv7. Only tested on iOS. */