compJmpOpUsed = false;
compLongUsed = false;
compTailCallUsed = false;
+ compLocallocSeen = false;
compLocallocUsed = false;
compLocallocOptimized = false;
compQmarkRationalized = false;
bool compLongUsed; // Does the method use TYP_LONG
bool compFloatingPointUsed; // Does the method use TYP_FLOAT or TYP_DOUBLE
bool compTailCallUsed; // Does the method do a tailcall
+ bool compLocallocSeen; // Does the method IL have localloc opcode
bool compLocallocUsed; // Does the method use localloc.
bool compLocallocOptimized; // Does the method have an optimized localloc
bool compQmarkUsed; // Does the method use GT_QMARK/GT_COLON
return !info.compInitMem && opts.compDbgCode;
}
+ // Returns true if the jit supports having patchpoints in this method.
+ // Optionally, get the reason why not.
+ bool compCanHavePatchpoints(const char** reason = nullptr);
+
#if defined(DEBUG)
void compDispLocalVars();
m_lvRefCntWtd = newValue;
}
+//------------------------------------------------------------------------------
+// compCanHavePatchpoints: return true if patchpoints are supported in this
+// method.
+//
+// Arguments:
+// reason - [out, optional] reason why patchpoints are not supported
+//
+// Returns:
+// True if patchpoints are supported in this method.
+//
+inline bool Compiler::compCanHavePatchpoints(const char** reason)
+{
+ const char* whyNot = nullptr;
+
+#ifdef FEATURE_ON_STACK_REPLACEMENT
+ if (compLocallocSeen)
+ {
+ whyNot = "localloc";
+ }
+ else if ((info.compFlags & CORINFO_FLG_SYNCH) != 0)
+ {
+ whyNot = "synchronized method";
+ }
+ else if (opts.IsReversePInvoke())
+ {
+ whyNot = "reverse pinvoke";
+ }
+#else
+ whyNot = "OSR feature not defined in build";
+#endif
+
+ if (reason != nullptr)
+ {
+ *reason = whyNot;
+ }
+
+ return whyNot == nullptr;
+}
+
/*****************************************************************************/
#endif //_COMPILER_HPP_
/*****************************************************************************/
case CEE_LOCALLOC:
+ compLocallocSeen = true;
+
// We now allow localloc callees to become candidates in some cases.
if (makeInlineObservations)
{
// Are there any places in the method where we might add a patchpoint?
if (compHasBackwardJump)
{
- // Are patchpoints enabled?
- if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && (JitConfig.TC_OnStackReplacement() > 0))
+ // Are patchpoints enabled and supported?
+ if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && (JitConfig.TC_OnStackReplacement() > 0) &&
+ compCanHavePatchpoints())
{
// We don't inline at Tier0, if we do, we may need rethink our approach.
// Could probably support inlines that don't introduce flow.
assert(!compIsForInlining());
// Is the start of this block a suitable patchpoint?
- // Current strategy is blocks that are stack-empty and backwards branch targets
- if (block->bbFlags & BBF_BACKWARD_JUMP_TARGET && (verCurrentState.esStackDepth == 0))
+ // Current strategy is blocks that are stack-empty and backwards branch targets and not in a handler
+ //
+ // Todo (perhaps): bail out of OSR and jit this method with optimization.
+ //
+ if (!block->hasHndIndex() && ((block->bbFlags & BBF_BACKWARD_JUMP_TARGET) != 0) &&
+ (verCurrentState.esStackDepth == 0))
{
block->bbFlags |= BBF_PATCHPOINT;
setMethodHasPatchpoint();
//
// Todo: stress mode...
//
- if ((JitConfig.TC_PartialCompilation() > 0) && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) &&
- (block != fgFirstBB) && block->isRunRarely() && (verCurrentState.esStackDepth == 0) &&
- ((block->bbFlags & BBF_PATCHPOINT) == 0))
+ if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && (JitConfig.TC_PartialCompilation() > 0) &&
+ compCanHavePatchpoints())
{
- block->bbFlags |= BBF_PARTIAL_COMPILATION_PATCHPOINT;
- setMethodHasPartialCompilationPatchpoint();
+ // Is this block a good place for partial compilation?
+ //
+ if ((block != fgFirstBB) && block->isRunRarely() && (verCurrentState.esStackDepth == 0) &&
+ ((block->bbFlags & BBF_PATCHPOINT) == 0) && !block->hasHndIndex())
+ {
+ JITDUMP("\nBlock " FMT_BB " will be a partial compilation patchpoint -- not importing\n", block->bbNum);
+ block->bbFlags |= BBF_PARTIAL_COMPILATION_PATCHPOINT;
+ setMethodHasPartialCompilationPatchpoint();
+
+ // Change block to BBJ_THROW so we won't trigger importation of sucessors.
+ //
+ block->bbJumpKind = BBJ_THROW;
+
+ // If this method has a explicit generic context, the only uses of it may be in
+ // the IL for this block. So assume it's used.
+ //
+ if (info.compMethodInfo->options &
+ (CORINFO_GENERICS_CTXT_FROM_METHODDESC | CORINFO_GENERICS_CTXT_FROM_METHODTABLE))
+ {
+ lvaGenericsContextInUse = true;
+ }
+
+ return;
+ }
}
#endif // FEATURE_ON_STACK_REPLACEMENT
{
if (block->bbFlags & BBF_PATCHPOINT)
{
+ // We can't OSR from funclets.
+ //
+ assert(!block->hasHndIndex());
+
// Clear the patchpoint flag.
//
block->bbFlags &= ~BBF_PATCHPOINT;
- // If block is in a handler region, don't insert a patchpoint.
- // We can't OSR from funclets.
- //
- // TODO: check this earlier, somehow, and fall back to fully
- // optimizing the method (ala QJFL=0).
- if (compiler->ehGetBlockHndDsc(block) != nullptr)
- {
- JITDUMP("Patchpoint: skipping patchpoint for " FMT_BB " as it is in a handler\n", block->bbNum);
- continue;
- }
-
- JITDUMP("Patchpoint: loop patchpoint in " FMT_BB "\n", block->bbNum);
- assert(block != compiler->fgFirstBB);
+ JITDUMP("Patchpoint: regular patchpoint in " FMT_BB "\n", block->bbNum);
TransformBlock(block);
count++;
}
else if (block->bbFlags & BBF_PARTIAL_COMPILATION_PATCHPOINT)
{
- if (compiler->ehGetBlockHndDsc(block) != nullptr)
- {
- JITDUMP("Patchpoint: skipping partial compilation patchpoint for " FMT_BB
- " as it is in a handler\n",
- block->bbNum);
- continue;
- }
+ // We can't OSR from funclets.
+ // Also, we don't import the IL for these blocks.
+ //
+ assert(!block->hasHndIndex());
+
+ // If we're instrumenting, we should not have decided to
+ // put class probes here, as that is driven by looking at IL.
+ //
+ assert((block->bbFlags & BBF_HAS_CLASS_PROFILE) == 0);
+
+ // Clear the partial comp flag.
+ //
+ block->bbFlags &= ~BBF_PARTIAL_COMPILATION_PATCHPOINT;
JITDUMP("Patchpoint: partial compilation patchpoint in " FMT_BB "\n", block->bbNum);
TransformPartialCompilation(block);
compiler->gtNewHelperCallNode(CORINFO_HELP_PARTIAL_COMPILATION_PATCHPOINT, TYP_VOID, helperArgs);
compiler->fgNewStmtAtEnd(block, helperCall);
-
- // This block will no longer have class probes.
- // (They will appear in the OSR variant).
- //
- if ((block->bbFlags & BBF_HAS_CLASS_PROFILE) != 0)
- {
- JITDUMP("No longer adding class probes to " FMT_BB "\n", block->bbNum);
- block->bbFlags &= ~BBF_HAS_CLASS_PROFILE;
- }
}
};
// We should only be adding patchpoints at Tier0, so should not be in an inlinee
assert(!compIsForInlining());
- // We currently can't do OSR in methods with localloc.
- // Such methods don't have a fixed relationship between frame and stack pointers.
- //
- // This is true whether or not the localloc was executed in the original method.
- //
- // TODO: handle this case, or else check this earlier and fall back to fully
- // optimizing the method (ala QJFL=0).
- if (compLocallocUsed)
- {
- JITDUMP("\n -- unable to handle methods with localloc\n");
- return PhaseStatus::MODIFIED_NOTHING;
- }
-
- // We currently can't do OSR in synchronized methods. We need to alter
- // the logic in fgAddSyncMethodEnterExit for OSR to not try and obtain the
- // monitor (since the original method will have done so) and set the monitor
- // obtained flag to true (or reuse the original method slot value).
- if ((info.compFlags & CORINFO_FLG_SYNCH) != 0)
- {
- JITDUMP("\n -- unable to handle synchronized methods\n");
- return PhaseStatus::MODIFIED_NOTHING;
- }
-
- if (opts.IsReversePInvoke())
- {
- JITDUMP(" -- unable to handle Reverse P/Invoke\n");
- return PhaseStatus::MODIFIED_NOTHING;
- }
+ // We should be allowed to have patchpoints in this method.
+ assert(compCanHavePatchpoints());
PatchpointTransformer ppTransformer(this);
int count = ppTransformer.Run();