There are two two kinds of transition penalties:
1.Transition from 256-bit AVX code to 128-bit legacy SSE code.
2.Transition from 128-bit legacy SSE code to either 128 or
256-bit AVX code. This only happens if there was a preceding
AVX256->legacy SSE transition penalty.
The primary goal is to remove the dotnet/coreclr#1 AVX to SSE transition penalty.
Added two emitter flags: contains256bitAVXInstruction indicates that
if the JIT method contains 256-bit AVX code, containsAVXInstruction
indicates that if the method contains 128-bit or 256-bit AVX code.
Issue VZEROUPPER in prolog if the method contains 128-bit or 256-bit
AVX code, to avoid legacy SSE to AVX transition penalty, this could
happen for reverse pinvoke situation. Issue VZEROUPPER in epilog
if the method contains 256-bit AVX code, to avoid AVX to legacy
SSE transition penalty.
To limite code size increase impact, we only issue VZEROUPPER before
PInvoke call on user defined function if the JIT method contains
256-bit AVX code, assuming user defined function contains legacy
SSE code. No need to issue VZEROUPPER after PInvoke call because dotnet/coreclr#2
SSE to AVX transition penalty won't happen since dotnet/coreclr#1 AVX to SSE
transition has been taken care of before the PInvoke call.
We measured ~3% to 1% performance gain on TechEmPower plaintext and
verified those VTune AVX/SSE events: OTHER_ASSISTS.AVX_TO_SSE and
OTHER_ASSISTS.SSE_TO_AVE have been reduced to 0.
Fix dotnet/coreclr#7240
move setContainsAVX flags to lower, refactor to a smaller method
refactor, fix typo in comments
fix format error
Commit migrated from https://github.com/dotnet/coreclr/commit/
cc169eac6736693c4bdbd9f61ae821146252e4cb
// Save/Restore callee saved float regs to stack
void genPreserveCalleeSavedFltRegs(unsigned lclFrameSize);
void genRestoreCalleeSavedFltRegs(unsigned lclFrameSize);
+ // Generate VZeroupper instruction to avoid AVX/SSE transition penalty
+ bool genVzeroupperIfNeeded(bool check256bitOnly = true);
#endif // _TARGET_XARCH_ && FEATURE_STACK_FP_X87
// funclet frames: this will be FuncletInfo.fiSpDelta.
void CodeGen::genPreserveCalleeSavedFltRegs(unsigned lclFrameSize)
{
- regMaskTP regMask = compiler->compCalleeFPRegsSavedMask;
+ bool bVzeroupperIssued = genVzeroupperIfNeeded(false);
+ regMaskTP regMask = compiler->compCalleeFPRegsSavedMask;
// Only callee saved floating point registers should be in regMask
assert((regMask & RBM_FLT_CALLEE_SAVED) == regMask);
regMaskTP regBit = genRegMask(reg);
if ((regBit & regMask) != 0)
{
+#ifdef FEATURE_AVX_SUPPORT
+ // when we reach here, function does not contain AVX instruction so far, however, since copyIns can
+ // be an AVX instruction such as vmovupd, we should check and issue vzeroupper before the copyIns to
+ // avoid Legacy SSE code (from native code such as Reverse PInvoke) to AVX transition penalty
+ if (compiler->getFloatingPointInstructionSet() == InstructionSet_AVX && !bVzeroupperIssued &&
+ getEmitter()->IsAVXInstruction(copyIns))
+ {
+ instGen(INS_vzeroupper);
+ bVzeroupperIssued = true;
+ }
+#endif
// ABI requires us to preserve lower 128-bits of YMM register.
getEmitter()->emitIns_AR_R(copyIns,
EA_8BYTE, // TODO-XArch-Cleanup: size specified here doesn't matter but should be
offset -= XMM_REGSIZE_BYTES;
}
}
-
-#ifdef FEATURE_AVX_SUPPORT
- // Just before restoring float registers issue a Vzeroupper to zero out upper 128-bits of all YMM regs.
- // This is to avoid penalty if this routine is using AVX-256 and now returning to a routine that is
- // using SSE2.
- if (compiler->getFloatingPointInstructionSet() == InstructionSet_AVX)
- {
- instGen(INS_vzeroupper);
- }
-#endif
}
// Save/Restore compCalleeFPRegsPushed with the smallest register number saved at [RSP+offset], working
// fast path return
if (regMask == RBM_NONE)
{
+ genVzeroupperIfNeeded();
return;
}
assert((offset % 16) == 0);
#endif // _TARGET_AMD64_
-#ifdef FEATURE_AVX_SUPPORT
- // Just before restoring float registers issue a Vzeroupper to zero out upper 128-bits of all YMM regs.
- // This is to avoid penalty if this routine is using AVX-256 and now returning to a routine that is
- // using SSE2.
- if (compiler->getFloatingPointInstructionSet() == InstructionSet_AVX)
- {
- instGen(INS_vzeroupper);
- }
-#endif
-
for (regNumber reg = REG_FLT_CALLEE_SAVED_FIRST; regMask != RBM_NONE; reg = REG_NEXT(reg))
{
regMaskTP regBit = genRegMask(reg);
offset -= XMM_REGSIZE_BYTES;
}
}
+ genVzeroupperIfNeeded();
+}
+
+// Generate Vzeroupper instruction as needed to zero out upper 128b-bit of all YMM registers so that the
+// AVX/Legacy SSE transition penalties can be avoided
+//
+// Params
+// check256bitOnly - Flag to check if the function contains 256-bit AVX instruction and generate Vzeroupper
+// instruction, otherwise check if the function contains AVX instruciton (either 128-bit or 256-bit).
+//
+// Return Value:
+// true if Vzeroupper instruction is issued, false otherwise.
+//
+bool CodeGen::genVzeroupperIfNeeded(bool check256bitOnly)
+{
+ bool bVzeroupperIssued = false;
+#ifdef FEATURE_AVX_SUPPORT
+ if (compiler->getFloatingPointInstructionSet() == InstructionSet_AVX)
+ {
+ if (check256bitOnly)
+ {
+ if (getEmitter()->Contains256bitAVX())
+ {
+ instGen(INS_vzeroupper);
+ bVzeroupperIssued = true;
+ }
+ }
+ else
+ {
+ if (getEmitter()->ContainsAVX())
+ {
+ instGen(INS_vzeroupper);
+ bVzeroupperIssued = true;
+ }
+ }
+ }
+#endif
+ return bVzeroupperIssued;
}
+
#endif // defined(_TARGET_XARCH_) && !FEATURE_STACK_FP_X87
//-----------------------------------------------------------------------------------
#endif // defined(_TARGET_X86_)
+#ifdef FEATURE_AVX_SUPPORT
+ // When it's a PInvoke call and the call type is USER function, we issue VZEROUPPER here
+ // if the function contains 256bit AVX instructions, this is to avoid AVX-256 to Legacy SSE
+ // transition penalty, assuming the user function contains legacy SSE instruction
+ if (call->IsPInvoke() && call->gtCallType == CT_USER_FUNC &&
+ compiler->getFloatingPointInstructionSet() == InstructionSet_AVX)
+ {
+ if (getEmitter()->Contains256bitAVX())
+ {
+ instGen(INS_vzeroupper);
+ }
+ }
+#endif
+
if (target != nullptr)
{
#ifdef _TARGET_X86_
if (opts.compCanUseAVX)
{
codeGen->getEmitter()->SetUseAVX(true);
+ // Assume each JITted method does not contain AVX instruction at first
+ codeGen->getEmitter()->SetContainsAVX(false);
+ codeGen->getEmitter()->SetContains256bitAVX(false);
}
else
#endif // FEATURE_AVX_SUPPORT
useAVXEncodings = value;
}
+bool containsAVXInstruction;
+bool ContainsAVX()
+{
+ return containsAVXInstruction;
+}
+void SetContainsAVX(bool value)
+{
+ containsAVXInstruction = value;
+}
+
+bool contains256bitAVXInstruction;
+bool Contains256bitAVX()
+{
+ return contains256bitAVXInstruction;
+}
+void SetContains256bitAVX(bool value)
+{
+ contains256bitAVXInstruction = value;
+}
+
bool IsThreeOperandBinaryAVXInstruction(instruction ins);
bool IsThreeOperandMoveAVXInstruction(instruction ins);
bool IsThreeOperandAVXInstruction(instruction ins)
{
return false;
}
+bool ContainsAVX()
+{
+ return false;
+}
+bool Contains256bitAVX()
+{
+ return false;
+}
bool hasVexPrefix(code_t code)
{
return false;
#if defined(_TARGET_XARCH_)
void SetMulOpCounts(GenTreePtr tree);
+ void SetContainsAVXFlags(bool isFloatingType = true, unsigned sizeOfSIMDVector = 0);
#endif // defined(_TARGET_XARCH_)
#if !CPU_LOAD_STORE_ARCH
Compiler* compiler = comp;
TreeNodeInfo* info = &(tree->gtLsraInfo);
-
+ // floating type generates AVX instruction (vmovss etc.), set the flag
+ SetContainsAVXFlags(varTypeIsFloating(tree->TypeGet()));
switch (tree->OperGet())
{
GenTree* op1;
{
MakeSrcContained(blkNode, source);
}
+ // use XMM register to fill with constants, it's AVX instruction and set the flag
+ SetContainsAVXFlags();
}
blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindUnroll;
// series of 16-byte loads and stores.
blkNode->gtLsraInfo.internalFloatCount = 1;
blkNode->gtLsraInfo.addInternalCandidates(l, l->internalFloatRegCandidates());
+ // use XMM register for load and store, need set the flag for AVX instruction
+ SetContainsAVXFlags();
}
// If src or dst are on stack, we don't have to generate the address into a register
TreeNodeInfo* info = &(tree->gtLsraInfo);
LinearScan* lsra = m_lsra;
info->dstCount = 1;
+ SetContainsAVXFlags(true, simdTree->gtSIMDSize);
switch (simdTree->gtSIMDIntrinsicID)
{
GenTree* op1;
}
//------------------------------------------------------------------------------
+// SetContainsAVXFlags: default value of isFloatingType is true, we set the
+// ContainsAVX flag when floating type value is true, when SIMD vector size is
+// 32 bytes, it is 256bit AVX instruction and we set Contains256bitAVX flag too
+//
+// Arguments:
+// isFloatingType - is floating type
+// sizeOfSIMDVector - SIMD Vector size
+//
+void Lowering::SetContainsAVXFlags(bool isFloatingType, unsigned sizeOfSIMDVector)
+{
+#ifdef FEATURE_AVX_SUPPORT
+ if (comp->getSIMDInstructionSet() == InstructionSet_AVX)
+ {
+ if (isFloatingType)
+ {
+ comp->getEmitter()->SetContainsAVX(true);
+ if (sizeOfSIMDVector == 32)
+ {
+ comp->codeGen->getEmitter()->SetContains256bitAVX(true);
+ }
+ }
+ }
+#endif
+}
+
+//------------------------------------------------------------------------------
// isRMWRegOper: Can this binary tree node be used in a Read-Modify-Write format
//
// Arguments: