return false;
}
+ // At this point we know target is BBJ_COND.
+ //
+ // Bail out if OSR, as we can have unusual flow into loops. If one
+ // of target's successors is also a backedge target, this optimization
+ // may mess up loop recognition by creating too many non-loop preds.
+ //
+ if (opts.IsOSR())
+ {
+ assert(target->bbJumpKind == BBJ_COND);
+
+ if ((target->bbNext->bbFlags & BBF_BACKWARD_JUMP_TARGET) != 0)
+ {
+ JITDUMP("Deferring: " FMT_BB " --> " FMT_BB "; latter looks like loop top\n", target->bbNum,
+ target->bbNext->bbNum);
+ return false;
+ }
+
+ if ((target->bbJumpDest->bbFlags & BBF_BACKWARD_JUMP_TARGET) != 0)
+ {
+ JITDUMP("Deferring: " FMT_BB " --> " FMT_BB "; latter looks like loop top\n", target->bbNum,
+ target->bbJumpDest->bbNum);
+ return false;
+ }
+ }
+
// See if this block assigns constant or other interesting tree to that same local.
//
if (!fgBlockEndFavorsTailDuplication(block, lclNum))
// OSR is not yet supported for methods with explicit tail calls.
//
- // But we also do not have to switch these methods to be optimized as we should be
+ // But we also do not have to switch these methods to be optimized, as we should be
// able to avoid getting trapped in Tier0 code by normal call counting.
// So instead, just suppress adding patchpoints.
//
if (!compTailPrefixSeen)
{
- // The normaly policy is only to add patchpoints to the targets of lexically
- // backwards branches.
+ // We only need to add patchpoints if the method can loop.
//
if (compHasBackwardJump)
{
assert(compCanHavePatchpoints());
- // Is the start of this block a suitable patchpoint?
+ // By default we use the "adaptive" strategy.
//
- if (((block->bbFlags & BBF_BACKWARD_JUMP_TARGET) != 0) && (verCurrentState.esStackDepth == 0))
+ // This can create both source and target patchpoints within a given
+ // loop structure, which isn't ideal, but is not incorrect. We will
+ // just have some extra Tier0 overhead.
+ //
+ // Todo: implement support for mid-block patchpoints. If `block`
+ // is truly a backedge source (and not in a handler) then we should be
+ // able to find a stack empty point somewhere in the block.
+ //
+ const int patchpointStrategy = JitConfig.TC_PatchpointStrategy();
+ bool addPatchpoint = false;
+ bool mustUseTargetPatchpoint = false;
+
+ switch (patchpointStrategy)
{
- // We should have noted this earlier and bailed out of OSR.
- //
- assert(!block->hasHndIndex());
+ default:
+ {
+ // Patchpoints at backedge sources, if possible, otherwise targets.
+ //
+ addPatchpoint = ((block->bbFlags & BBF_BACKWARD_JUMP_SOURCE) == BBF_BACKWARD_JUMP_SOURCE);
+ mustUseTargetPatchpoint = (verCurrentState.esStackDepth != 0) || block->hasHndIndex();
+ break;
+ }
+
+ case 1:
+ {
+ // Patchpoints at stackempty backedge targets.
+ // Note if we have loops where the IL stack is not empty on the backedge we can't patchpoint
+ // them.
+ //
+ // We should not have allowed OSR if there were backedges in handlers.
+ //
+ assert(!block->hasHndIndex());
+ addPatchpoint = ((block->bbFlags & BBF_BACKWARD_JUMP_TARGET) == BBF_BACKWARD_JUMP_TARGET) &&
+ (verCurrentState.esStackDepth == 0);
+ break;
+ }
+
+ case 2:
+ {
+ // Adaptive strategy.
+ //
+ // Patchpoints at backedge targets if there are multiple backedges,
+ // otherwise at backedge sources, if possible. Note a block can be both; if so we
+ // just need one patchpoint.
+ //
+ if ((block->bbFlags & BBF_BACKWARD_JUMP_TARGET) == BBF_BACKWARD_JUMP_TARGET)
+ {
+ // We don't know backedge count, so just use ref count.
+ //
+ addPatchpoint = (block->bbRefs > 1) && (verCurrentState.esStackDepth == 0);
+ }
+
+ if (!addPatchpoint && ((block->bbFlags & BBF_BACKWARD_JUMP_SOURCE) == BBF_BACKWARD_JUMP_SOURCE))
+ {
+ addPatchpoint = true;
+ mustUseTargetPatchpoint = (verCurrentState.esStackDepth != 0) || block->hasHndIndex();
+
+ // Also force target patchpoint if target block has multiple (backedge) preds.
+ //
+ if (!mustUseTargetPatchpoint)
+ {
+ for (BasicBlock* const succBlock : block->Succs(this))
+ {
+ if ((succBlock->bbNum <= block->bbNum) && (succBlock->bbRefs > 1))
+ {
+ mustUseTargetPatchpoint = true;
+ break;
+ }
+ }
+ }
+ }
+ break;
+ }
+ }
+
+ if (addPatchpoint)
+ {
+ if (mustUseTargetPatchpoint)
+ {
+ // We wanted a source patchpoint, but could not have one.
+ // So, add patchpoints to the backedge targets.
+ //
+ for (BasicBlock* const succBlock : block->Succs(this))
+ {
+ if (succBlock->bbNum <= block->bbNum)
+ {
+ // The succBlock had better agree it's a target.
+ //
+ assert((succBlock->bbFlags & BBF_BACKWARD_JUMP_TARGET) == BBF_BACKWARD_JUMP_TARGET);
+
+ // We may already have decided to put a patchpoint in succBlock. If not, add one.
+ //
+ if ((succBlock->bbFlags & BBF_PATCHPOINT) != 0)
+ {
+ // In some cases the target may not be stack-empty at entry.
+ // If so, we will bypass patchpoints for this backedge.
+ //
+ if (succBlock->bbStackDepthOnEntry() > 0)
+ {
+ JITDUMP("\nCan't set source patchpoint at " FMT_BB ", can't use target " FMT_BB
+ " as it has non-empty stack on entry.\n",
+ block->bbNum, succBlock->bbNum);
+ }
+ else
+ {
+ JITDUMP("\nCan't set source patchpoint at " FMT_BB ", using target " FMT_BB
+ " instead\n",
+ block->bbNum, succBlock->bbNum);
+
+ assert(!succBlock->hasHndIndex());
+ succBlock->bbFlags |= BBF_PATCHPOINT;
+ }
+ }
+ }
+ }
+ }
+ else
+ {
+ assert(!block->hasHndIndex());
+ block->bbFlags |= BBF_PATCHPOINT;
+ }
- block->bbFlags |= BBF_PATCHPOINT;
setMethodHasPatchpoint();
}
}
else
{
- // Should not see backward branch targets w/o backwards branches
- assert((block->bbFlags & BBF_BACKWARD_JUMP_TARGET) == 0);
+ // Should not see backward branch targets w/o backwards branches.
+ // So if !compHasBackwardsBranch, these flags should never be set.
+ //
+ assert((block->bbFlags & (BBF_BACKWARD_JUMP_TARGET | BBF_BACKWARD_JUMP_SOURCE)) == 0);
}
}