Always try and merge "branch to next" blocks when building the intial flow graph
if BBINSTR or BBOPT is set.
Fixes #85856.
return jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR);
}
- bool IsInstrumentedOptimized() const
+ bool IsInstrumentedAndOptimized() const
{
- return IsInstrumented() && jitFlags->IsSet(JitFlags::JIT_FLAG_TIER1);
+ return IsInstrumented() && jitFlags->IsSet(JitFlags::JIT_FLAG_BBOPT);
+ }
+
+ bool IsInstrumentedOrOptimized() const
+ {
+ return IsInstrumented() || jitFlags->IsSet(JitFlags::JIT_FLAG_BBOPT);
}
// true if we should use the PINVOKE_{BEGIN,END} helpers instead of generating
// Compute jump target address
signed jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr);
- if (compIsForInlining() && jmpDist == 0 &&
- (opcode == CEE_LEAVE || opcode == CEE_LEAVE_S || opcode == CEE_BR || opcode == CEE_BR_S))
+ if ((jmpDist == 0) &&
+ (opcode == CEE_LEAVE || opcode == CEE_LEAVE_S || opcode == CEE_BR || opcode == CEE_BR_S) &&
+ opts.IsInstrumentedOrOptimized())
{
break; /* NOP */
}
jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr);
- if (compIsForInlining() && jmpDist == 0 && (opcode == CEE_BR || opcode == CEE_BR_S))
+ if ((jmpDist == 0) && (opcode == CEE_BR || opcode == CEE_BR_S) && opts.IsInstrumentedOrOptimized())
{
continue; /* NOP */
}
{
// We only see such blocks when optimizing. They are flagged by the importer.
//
- if (!m_comp->opts.IsInstrumentedOptimized() || ((m_comp->optMethodFlags & OMF_HAS_TAILCALL_SUCCESSOR) == 0))
+ if (!m_comp->opts.IsInstrumentedAndOptimized() || ((m_comp->optMethodFlags & OMF_HAS_TAILCALL_SUCCESSOR) == 0))
{
// No problematic blocks to worry about.
//
{
// We only see such blocks when optimizing. They are flagged by the importer.
//
- if (!m_comp->opts.IsInstrumentedOptimized() || ((m_comp->optMethodFlags & OMF_HAS_TAILCALL_SUCCESSOR) == 0))
+ if (!m_comp->opts.IsInstrumentedAndOptimized() || ((m_comp->optMethodFlags & OMF_HAS_TAILCALL_SUCCESSOR) == 0))
{
// No problematic blocks to worry about.
//
case CEE_BR_S:
jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr);
- if (compIsForInlining() && jmpDist == 0)
+ if ((jmpDist == 0) && opts.IsInstrumentedOrOptimized())
{
break; /* NOP */
}
// have to check for anything that might introduce a recursive tail call.
// * We only instrument root method blocks in OSR methods,
//
- if ((opts.IsInstrumentedOptimized() || opts.IsOSR()) && !compIsForInlining())
+ if ((opts.IsInstrumentedAndOptimized() || opts.IsOSR()) && !compIsForInlining())
{
// If a root method tail call candidate block is not a BBJ_RETURN, it should have a unique
// BBJ_RETURN successor. Mark that successor so we can handle it specially during profile
return false;
}
- assert(opts.OptimizationDisabled() || opts.IsInstrumentedOptimized());
+ assert(opts.OptimizationDisabled() || opts.IsInstrumentedAndOptimized());
assert(!compIsForInlining());
// During importation, optionally flag this block as one that