- jitosr_stress
- jitosr_pgo
- jitosr_stress_random
+ - jit_stress_splitting
- jitpartialcompilation
- jitpartialcompilation_osr
- jitpartialcompilation_osr_pgo
void unwindReserveFuncHelper(FuncInfoDsc* func, bool isHotCode);
void unwindEmitFuncHelper(FuncInfoDsc* func, void* pHotCode, void* pColdCode, bool isHotCode);
+#ifdef DEBUG
+ void fakeUnwindEmitFuncHelper(FuncInfoDsc* func, void* pHotCode);
+#endif // DEBUG
+
#endif // TARGET_AMD64 || (TARGET_X86 && FEATURE_EH_FUNCLETS)
UNATIVE_OFFSET unwindGetCurrentOffset(FuncInfoDsc* func);
void Compiler::eeAllocMem(AllocMemArgs* args)
{
#ifdef DEBUG
- // Fake splitting implementation: hot section = hot code + 4K buffer + cold code
- const UNATIVE_OFFSET hotSizeRequest = args->hotCodeSize;
- const UNATIVE_OFFSET coldSizeRequest = args->coldCodeSize;
- const UNATIVE_OFFSET fakeSplittingBuffer = 4096;
+ const UNATIVE_OFFSET hotSizeRequest = args->hotCodeSize;
+ const UNATIVE_OFFSET coldSizeRequest = args->coldCodeSize;
+ // Fake splitting implementation: place hot/cold code in contiguous section
if (JitConfig.JitFakeProcedureSplitting() && (coldSizeRequest > 0))
{
- args->hotCodeSize = hotSizeRequest + fakeSplittingBuffer + coldSizeRequest;
+ args->hotCodeSize = hotSizeRequest + coldSizeRequest;
args->coldCodeSize = 0;
}
#endif
if (JitConfig.JitFakeProcedureSplitting() && (coldSizeRequest > 0))
{
// Fix up hot/cold code pointers
- args->coldCodeBlock = ((BYTE*)args->hotCodeBlock) + hotSizeRequest + fakeSplittingBuffer;
- args->coldCodeBlockRW = ((BYTE*)args->hotCodeBlockRW) + hotSizeRequest + fakeSplittingBuffer;
+ args->coldCodeBlock = ((BYTE*)args->hotCodeBlock) + hotSizeRequest;
+ args->coldCodeBlockRW = ((BYTE*)args->hotCodeBlockRW) + hotSizeRequest;
// Reset args' hot/cold code sizes in case caller reads them later
args->hotCodeSize = hotSizeRequest;
printf("reserveUnwindInfo(isFunclet=%s, isColdCode=%s, unwindSize=0x%x)\n", isFunclet ? "true" : "false",
isColdCode ? "true" : "false", unwindSize);
}
-
- // Fake splitting currently does not handle unwind info for cold code
- if (isColdCode && JitConfig.JitFakeProcedureSplitting())
- {
- JITDUMP("reserveUnwindInfo for cold code with JitFakeProcedureSplitting enabled: ignoring cold unwind info\n");
- return;
- }
#endif // DEBUG
if (info.compMatchedVM)
}
printf(")\n");
}
-
- // Fake splitting currently does not handle unwind info for cold code
- if (pColdCode && JitConfig.JitFakeProcedureSplitting())
- {
- JITDUMP("allocUnwindInfo for cold code with JitFakeProcedureSplitting enabled: ignoring cold unwind info\n");
- return;
- }
#endif // DEBUG
if (info.compMatchedVM)
{
// The allocated chunk is bigger than used, fill in unused space in it.
unsigned unusedSize = allocatedHotCodeSize - emitCurCodeOffs(cp);
+ BYTE* cpRW = cp + writeableOffset;
for (unsigned i = 0; i < unusedSize; ++i)
{
- *cp++ = DEFAULT_CODE_BUFFER_INIT;
+ *cpRW++ = DEFAULT_CODE_BUFFER_INIT;
}
+
+ cp = cpRW - writeableOffset;
assert(allocatedHotCodeSize == emitCurCodeOffs(cp));
}
}
CONFIG_METHODSET(JitEHDump, W("JitEHDump")) // Dump the EH table for the method, as reported to the VM
CONFIG_METHODSET(JitExclude, W("JitExclude"))
CONFIG_INTEGER(JitFakeProcedureSplitting, W("JitFakeProcedureSplitting"), 0) // Do code splitting independent of VM.
- // For now, this disables unwind info for
- // cold sections, breaking stack walks.
- // Set COMPlus_GCgen0size=1000000 to avoid
- // running the GC, which requires
- // stack-walking.
CONFIG_METHODSET(JitForceProcedureSplitting, W("JitForceProcedureSplitting"))
CONFIG_METHODSET(JitGCDump, W("JitGCDump"))
CONFIG_METHODSET(JitDebugDump, W("JitDebugDump"))
//
void Compiler::unwindReserveFunc(FuncInfoDsc* func)
{
- unwindReserveFuncHelper(func, true);
-
- if (fgFirstColdBlock != nullptr)
+#ifdef DEBUG
+ if (JitConfig.JitFakeProcedureSplitting() && (fgFirstColdBlock != nullptr))
{
- unwindReserveFuncHelper(func, false);
+ assert(func->funKind == FUNC_ROOT); // No fake-splitting of funclets.
+ unwindReserveFuncHelper(func, true);
+ }
+ else
+#endif // DEBUG
+ {
+ unwindReserveFuncHelper(func, true);
+
+ if (fgFirstColdBlock != nullptr)
+ {
+ unwindReserveFuncHelper(func, false);
+ }
}
}
static_assert_no_msg(FUNC_HANDLER == (FuncKind)CORJIT_FUNC_HANDLER);
static_assert_no_msg(FUNC_FILTER == (FuncKind)CORJIT_FUNC_FILTER);
- unwindEmitFuncHelper(func, pHotCode, pColdCode, true);
+#ifdef DEBUG
+ if (JitConfig.JitFakeProcedureSplitting() && (pColdCode != nullptr))
+ {
+ fakeUnwindEmitFuncHelper(func, pHotCode);
+ }
+ else
+#endif // DEBUG
+ {
+ unwindEmitFuncHelper(func, pHotCode, pColdCode, true);
+
+ if (pColdCode != nullptr)
+ {
+ unwindEmitFuncHelper(func, pHotCode, pColdCode, false);
+ }
+ }
+}
+
+#ifdef DEBUG
+void Compiler::fakeUnwindEmitFuncHelper(FuncInfoDsc* func, void* pHotCode)
+{
+ assert(fgFirstColdBlock != nullptr);
+ assert(func->funKind == FUNC_ROOT); // No fake-splitting of funclets.
- if (pColdCode != nullptr)
+ const UNATIVE_OFFSET startOffset = 0;
+ const UNATIVE_OFFSET endOffset = info.compNativeCodeSize;
+ const DWORD unwindCodeBytes = sizeof(func->unwindCodes) - func->unwindCodeSlot;
+ BYTE* pUnwindBlock = &func->unwindCodes[func->unwindCodeSlot];
+
+ if (opts.dspUnwind)
{
- unwindEmitFuncHelper(func, pHotCode, pColdCode, false);
+ DumpUnwindInfo(true, startOffset, endOffset, (const UNWIND_INFO* const)pUnwindBlock);
}
+
+ // Pass pColdCode = nullptr; VM allocs unwind info for combined hot/cold section
+ eeAllocUnwindInfo((BYTE*)pHotCode, nullptr, startOffset, endOffset, unwindCodeBytes, pUnwindBlock,
+ (CorJitFuncKind)func->funKind);
}
+#endif // DEBUG
#endif // TARGET_AMD64
//
void Compiler::unwindReserveFunc(FuncInfoDsc* func)
{
- unwindReserveFuncHelper(func, true);
-
- if (fgFirstColdBlock != nullptr)
+#ifdef DEBUG
+ if (JitConfig.JitFakeProcedureSplitting() && (fgFirstColdBlock != nullptr))
{
- unwindReserveFuncHelper(func, false);
+ assert(func->funKind == FUNC_ROOT); // No fake-splitting of funclets.
+ unwindReserveFuncHelper(func, true);
+ }
+ else
+#endif // DEBUG
+ {
+ unwindReserveFuncHelper(func, true);
+
+ if (fgFirstColdBlock != nullptr)
+ {
+ unwindReserveFuncHelper(func, false);
+ }
}
}
static_assert_no_msg(FUNC_HANDLER == (FuncKind)CORJIT_FUNC_HANDLER);
static_assert_no_msg(FUNC_FILTER == (FuncKind)CORJIT_FUNC_FILTER);
- unwindEmitFuncHelper(func, pHotCode, pColdCode, true);
-
- if (pColdCode != nullptr)
+#ifdef DEBUG
+ if (JitConfig.JitFakeProcedureSplitting() && (pColdCode != nullptr))
+ {
+ fakeUnwindEmitFuncHelper(func, pHotCode);
+ }
+ else
+#endif // DEBUG
{
- unwindEmitFuncHelper(func, pHotCode, pColdCode, false);
+ unwindEmitFuncHelper(func, pHotCode, pColdCode, true);
+
+ if (pColdCode != nullptr)
+ {
+ unwindEmitFuncHelper(func, pHotCode, pColdCode, false);
+ }
}
}
eeAllocUnwindInfo((BYTE*)pHotCode, (BYTE*)pColdCode, startOffset, endOffset, sizeof(UNWIND_INFO),
(BYTE*)&unwindInfo, (CorJitFuncKind)func->funKind);
}
+
+#ifdef DEBUG
+void Compiler::fakeUnwindEmitFuncHelper(FuncInfoDsc* func, void* pHotCode)
+{
+ assert(fgFirstColdBlock != nullptr);
+ assert(func->funKind == FUNC_ROOT); // No fake-splitting of funclets.
+
+ const UNATIVE_OFFSET startOffset = 0;
+ const UNATIVE_OFFSET endOffset = info.compNativeCodeSize;
+
+ UNWIND_INFO unwindInfo;
+ unwindInfo.FunctionLength = (ULONG)(endOffset);
+
+ // Pass pColdCode = nullptr; VM allocs unwind info for combined hot/cold section
+ eeAllocUnwindInfo((BYTE*)pHotCode, nullptr, startOffset, endOffset, sizeof(UNWIND_INFO), (BYTE*)&unwindInfo,
+ (CorJitFuncKind)func->funKind);
+}
+#endif // DEBUG
+
#endif // FEATURE_EH_FUNCLETS
COMPlus_HeapVerify;
COMPlus_JITMinOpts;
COMPlus_JitELTHookEnabled;
+ COMPlus_JitFakeProcedureSplitting;
COMPlus_JitStress;
+ COMPlus_JitStressProcedureSplitting;
COMPlus_JitStressRegs;
COMPlus_TailcallStress;
COMPlus_ReadyToRun;
<TestEnvironment Include="jitosr" TC_OnStackReplacement="1" TC_QuickJitForLoops="1" TieredCompilation="1" />
<TestEnvironment Include="jitosr_stress" TC_OnStackReplacement="1" TC_QuickJitForLoops="1" TC_OnStackReplacement_InitialCounter="1" OSR_HitLimit="1" TieredCompilation="1" />
<TestEnvironment Include="jitosr_stress_random" TC_OnStackReplacement="1" TC_QuickJitForLoops="1" TC_OnStackReplacement_InitialCounter="1" OSR_HitLimit="2" TieredCompilation="1" JitRandomOnStackReplacement="15"/>
+ <TestEnvironment Include="jit_stress_splitting" JitFakeProcedureSplitting="1" JitStressProcedureSplitting="1" />
<TestEnvironment Include="jitosr_pgo" TC_OnStackReplacement="1" TC_QuickJitForLoops="1" TieredCompilation="1" TieredPGO="1" />
<TestEnvironment Include="jitpartialcompilation" TC_PartialCompilation="1" TC_QuickJitForLoops="1" TieredCompilation="1" />
<TestEnvironment Include="jitpartialcompilation_pgo" TC_PartialCompilation="1" TC_QuickJitForLoops="1" TieredCompilation="1" TieredPGO="1" />