// If there is nothing next, that means the result is thrown away, so this value is not live.
// However, for minopts or debuggable code, we keep it live to support managed return value debugging.
- if ((call->gtNext == nullptr) && !compiler->opts.MinOpts() && !compiler->opts.compDbgCode)
+ if ((call->gtNext == nullptr) && compiler->opts.OptimizationEnabled())
{
gcInfo.gcMarkRegSetNpt(RBM_INTRET);
}
/* Control the optimizations */
- if (opts.MinOpts() || opts.compDbgCode)
+ if (opts.OptimizationDisabled())
{
opts.compFlags &= ~CLFLG_MAXOPT;
opts.compFlags |= CLFLG_MINOPT;
codeGen->setFramePointerRequired(false);
codeGen->setFrameRequired(false);
- if (opts.MinOpts() || opts.compDbgCode)
+ if (opts.OptimizationDisabled())
{
codeGen->setFrameRequired(true);
}
}
}
- info.compUnwrapContextful = !opts.MinOpts() && !opts.compDbgCode;
+ info.compUnwrapContextful = opts.OptimizationEnabled();
fgCanRelocateEHRegions = true;
}
#endif // FEATURE_EH_FUNCLETS
- if (!opts.MinOpts() && !opts.compDbgCode)
+ if (opts.OptimizationEnabled())
{
optOptimizeLayout();
EndPhase(PHASE_OPTIMIZE_LAYOUT);
EndPhase(PHASE_COMPUTE_REACHABILITY);
}
- if (!opts.MinOpts() && !opts.compDbgCode)
+ if (opts.OptimizationEnabled())
{
/* Perform loop inversion (i.e. transform "while" loops into
"repeat" loops) and discover and classify natural loops
//
assert(lvaLocalVarRefCounted());
- if (!opts.MinOpts() && !opts.compDbgCode)
+ if (opts.OptimizationEnabled())
{
/* Optimize boolean conditions */
#endif
// At this point we know if we are fully interruptible or not
- if (!opts.MinOpts() && !opts.compDbgCode)
+ if (opts.OptimizationEnabled())
{
bool doSsa = true;
bool doEarlyProp = true;
return compMinOptsIsSet;
}
#endif // !DEBUG
+
+ inline bool OptimizationDisabled()
+ {
+ return MinOpts() || compDbgCode;
+ }
+ inline bool OptimizationEnabled()
+ {
+ return !OptimizationDisabled();
+ }
+
inline void SetMinOpts(bool val)
{
assert(!compMinOptsIsUsed);
// this new local will be referenced.
if (lvaLocalVarRefCounted())
{
- if (opts.MinOpts() || opts.compDbgCode)
+ if (opts.OptimizationDisabled())
{
lvaTable[tempNum].lvImplicitlyReferenced = 1;
}
inline void LclVarDsc::incRefCnts(BasicBlock::weight_t weight, Compiler* comp, RefCountState state, bool propagate)
{
// In minopts and debug codegen, we don't maintain normal ref counts.
- if ((state == RCS_NORMAL) && (comp->opts.MinOpts() || comp->opts.compDbgCode))
+ if ((state == RCS_NORMAL) && comp->opts.OptimizationDisabled())
{
// Note, at least, that there is at least one reference.
lvImplicitlyReferenced = 1;
}
#endif // DEBUG
- if (!(opts.MinOpts() || opts.compDbgCode))
+ if (opts.OptimizationEnabled())
{
// Remove polls from well formed loops with a constant upper bound.
for (unsigned lnum = 0; lnum < optLoopCount; ++lnum)
// can't or don't want to emit an inline check. Check all of those. If after all of that we still
// have INLINE, then emit an inline check.
- if (opts.MinOpts() || opts.compDbgCode)
+ if (opts.OptimizationDisabled())
{
#ifdef DEBUG
if (verbose)
// past the epilog. We should never split blocks unless we're optimizing.
if (createdPollBlocks)
{
- noway_assert(!opts.MinOpts() && !opts.compDbgCode);
+ noway_assert(opts.OptimizationEnabled());
fgReorderBlocks();
}
}
JITDUMP("*************** In fgComputeBlockAndEdgeWeights()\n");
const bool usingProfileWeights = fgIsUsingProfileWeights();
- const bool isOptimizing = !opts.MinOpts() && !opts.compDbgCode;
+ const bool isOptimizing = opts.OptimizationEnabled();
fgHaveValidEdgeWeights = false;
fgCalledCount = BB_UNITY_WEIGHT;
/* This should never be called for debuggable code */
- noway_assert(!opts.MinOpts() && !opts.compDbgCode);
+ noway_assert(opts.OptimizationEnabled());
#ifdef DEBUG
if (verbose)
}
else if ((kind & GTK_BINOP) && op1 && tree->gtOp.gtOp2 &&
// Don't take out conditionals for debugging
- !((opts.compDbgCode || opts.MinOpts()) && tree->OperIsCompare()))
+ (opts.OptimizationEnabled() || !tree->OperIsCompare()))
{
GenTree* op2 = tree->gtOp.gtOp2;
}
// Defer folding if not optimizing.
- if (opts.compDbgCode || opts.MinOpts())
+ if (opts.OptimizationDisabled())
{
return call;
}
GenTree* retNode = nullptr;
// Under debug and minopts, only expand what is required.
- if (!mustExpand && (opts.compDbgCode || opts.MinOpts()))
+ if (!mustExpand && opts.OptimizationDisabled())
{
*pIntrinsicID = CORINFO_INTRINSIC_Illegal;
return retNode;
case CORINFO_INTRINSIC_StringLength:
op1 = impPopStack().val;
- if (!opts.MinOpts() && !opts.compDbgCode)
+ if (opts.OptimizationEnabled())
{
GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, OFFSETOF__CORINFO_String__stringLen);
op1 = arrLen;
// structs is cheap.
JITDUMP("\nCompiler::impImportAndPushBox -- handling BOX(value class) via");
bool canExpandInline = (boxHelper == CORINFO_HELP_BOX);
- bool optForSize = !exprToBox->IsCall() && (operCls != nullptr) && (opts.compDbgCode || opts.MinOpts());
+ bool optForSize = !exprToBox->IsCall() && (operCls != nullptr) && opts.OptimizationDisabled();
bool expandInline = canExpandInline && !optForSize;
if (expandInline)
// and the other you get
// *(temp+4) = expr
- if (opts.MinOpts() || opts.compDbgCode)
+ if (opts.OptimizationDisabled())
{
// For minopts/debug code, try and minimize the total number
// of box temps by reusing an existing temp when possible.
return false;
}
- if (opts.compDbgCode || opts.MinOpts())
+ if (opts.OptimizationDisabled())
{
return false;
}
assert(op1->TypeGet() == TYP_REF);
// Don't optimize for minopts or debug codegen.
- if (opts.compDbgCode || opts.MinOpts())
+ if (opts.OptimizationDisabled())
{
return nullptr;
}
// Don't bother with inline expansion when jit is trying to
// generate code quickly, or the cast is in code that won't run very
// often, or the method already is pretty big.
- if (compCurBB->isRunRarely() || opts.compDbgCode || opts.MinOpts())
+ if (compCurBB->isRunRarely() || opts.OptimizationDisabled())
{
// not worth the code expansion if jitting fast or in a rarely run block
shouldExpandInline = false;
type = op1->TypeGet();
// brfalse and brtrue is only allowed on I4, refs, and byrefs.
- if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
+ if (opts.OptimizationEnabled() && (block->bbJumpDest == block->bbNext))
{
block->bbJumpKind = BBJ_NONE;
varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
- if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
+ if (opts.OptimizationEnabled() && (block->bbJumpDest == block->bbNext))
{
block->bbJumpKind = BBJ_NONE;
// Check legality and profitability of inline expansion for unboxing.
const bool canExpandInline = (helper == CORINFO_HELP_UNBOX);
- const bool shouldExpandInline = !(compCurBB->isRunRarely() || opts.compDbgCode || opts.MinOpts());
+ const bool shouldExpandInline = !compCurBB->isRunRarely() && opts.OptimizationEnabled();
if (canExpandInline && shouldExpandInline)
{
}
op1 = impPopStack().val;
- if (!opts.MinOpts() && !opts.compDbgCode)
+ if (opts.OptimizationEnabled())
{
/* Use GT_ARR_LENGTH operator so rng check opts see this */
GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, OFFSETOF__CORINFO_Array__length);
assert(call->IsVirtual());
// Bail if not optimizing
- if (opts.MinOpts())
- {
- return;
- }
-
- // Bail if debuggable codegen
- if (opts.compDbgCode)
+ if (opts.OptimizationDisabled())
{
return;
}
}
// Bail if not optimizing or the call site is very likely cold
- if (compCurBB->isRunRarely() || opts.compDbgCode || opts.MinOpts())
+ if (compCurBB->isRunRarely() || opts.OptimizationDisabled())
{
JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- rare / dbg / minopts\n",
dspTreeID(call));
lvaComputeRefCounts(isRecompute, setSlotNumbers);
// If we're not optimizing, we're done.
- if (opts.MinOpts() || opts.compDbgCode)
+ if (opts.OptimizationDisabled())
{
return;
}
#if ASSERTION_PROP
- assert(!opts.MinOpts() && !opts.compDbgCode);
+ assert(opts.OptimizationEnabled());
// Note: optAddCopies() depends on lvaRefBlks, which is set in lvaMarkLocalVars(BasicBlock*), called above.
optAddCopies();
//
// On first compute: mark all locals as implicitly referenced and untracked.
// On recompute: do nothing.
- if (opts.MinOpts() || opts.compDbgCode)
+ if (opts.OptimizationDisabled())
{
if (isRecompute)
{
JITDUMP("In fgLocalVarLivenessInit\n");
// Sort locals first, if we're optimizing
- if (!opts.MinOpts() && !opts.compDbgCode)
+ if (opts.OptimizationEnabled())
{
lvaSortByRefCount();
}
if (targetCnt == 1)
{
JITDUMP("Lowering switch " FMT_BB ": single target; converting to BBJ_ALWAYS\n", originalSwitchBB->bbNum);
- noway_assert(comp->opts.MinOpts() || comp->opts.compDbgCode);
+ noway_assert(comp->opts.OptimizationDisabled());
if (originalSwitchBB->bbNext == jumpTab[0])
{
originalSwitchBB->bbJumpKind = BBJ_NONE;
comp->fgLocalVarLiveness();
// local var liveness can delete code, which may create empty blocks
- if (!comp->opts.MinOpts() && !comp->opts.compDbgCode)
+ if (comp->opts.OptimizationEnabled())
{
comp->optLoopsMarked = false;
bool modified = comp->fgUpdateFlowGraph();
//
// We can't determine that all of the time, but if there is only
// one use and the method has no loops, then this use must be the last.
- if (!(opts.compDbgCode || opts.MinOpts()))
+ if (opts.OptimizationEnabled())
{
GenTreeLclVarCommon* lcl = nullptr;
/* Try to change *(&lcl + cns) into lcl[cns] to prevent materialization of &lcl */
if (op1->gtOp.gtOp1->OperGet() == GT_ADDR && op1->gtOp.gtOp2->OperGet() == GT_CNS_INT &&
- (!(opts.MinOpts() || opts.compDbgCode)))
+ opts.OptimizationEnabled())
{
// No overflow arithmetic with pointers
noway_assert(!op1->gtOverflow());
bool result = false;
// We don't want to make any code unreachable
- if (opts.compDbgCode || opts.MinOpts())
+ if (opts.OptimizationDisabled())
{
return false;
}
continue;
}
#ifdef FEATURE_SIMD
- if (!opts.MinOpts() && stmt->gtStmtExpr->TypeGet() == TYP_FLOAT && stmt->gtStmtExpr->OperGet() == GT_ASG)
+ if (opts.OptimizationEnabled() && stmt->gtStmtExpr->TypeGet() == TYP_FLOAT &&
+ stmt->gtStmtExpr->OperGet() == GT_ASG)
{
fgMorphCombineSIMDFieldAssignments(block, stmt);
}
//
// Local assertion prop is enabled if we are optimized
//
- optLocalAssertionProp = (!opts.compDbgCode && !opts.MinOpts());
+ optLocalAssertionProp = opts.OptimizationEnabled();
if (optLocalAssertionProp)
{
// TODO-ObjectStackAllocation: Enable the optimization for architectures using
// JIT32_GCENCODER (i.e., x86).
#ifndef JIT32_GCENCODER
- if (JitConfig.JitObjectStackAllocation() && !opts.MinOpts() && !opts.compDbgCode)
+ if (JitConfig.JitObjectStackAllocation() && opts.OptimizationEnabled())
{
objectAllocator.EnableObjectStackAllocation();
}
void Compiler::optSetBlockWeights()
{
- noway_assert(!opts.MinOpts() && !opts.compDbgCode);
+ noway_assert(opts.OptimizationEnabled());
assert(fgDomsComputed);
#ifdef DEBUG
void Compiler::fgOptWhileLoop(BasicBlock* block)
{
- noway_assert(!opts.MinOpts() && !opts.compDbgCode);
+ noway_assert(opts.OptimizationEnabled());
noway_assert(compCodeOpt() != SMALL_CODE);
/*
void Compiler::optOptimizeLayout()
{
- noway_assert(!opts.MinOpts() && !opts.compDbgCode);
+ noway_assert(opts.OptimizationEnabled());
#ifdef DEBUG
if (verbose)
void Compiler::optOptimizeLoops()
{
- noway_assert(!opts.MinOpts() && !opts.compDbgCode);
+ noway_assert(opts.OptimizationEnabled());
#ifdef DEBUG
if (verbose)
#endif
#if ETW_EBP_FRAMED
- if (!result && (opts.MinOpts() || opts.compDbgCode))
+ if (!result && opts.OptimizationDisabled())
{
INDEBUG(reason = "Debug Code");
result = true;
void RegSet::verifyRegistersUsed(regMaskTP regMask)
{
- if (m_rsCompiler->opts.MinOpts() || m_rsCompiler->opts.compDbgCode)
+ if (m_rsCompiler->opts.OptimizationDisabled())
{
return;
}
// For debuggable or minopts code, scopes can begin only on block boundaries.
// For other codegen modes (eg minopts/tier0) we currently won't report any
// untracked locals.
- if (compiler->opts.compDbgCode || compiler->opts.MinOpts())
+ if (compiler->opts.OptimizationDisabled())
{
// Check if there are any scopes on the current block's start boundary.
VarScopeDsc* varScope = nullptr;
void Compiler::impMarkContiguousSIMDFieldAssignments(GenTree* stmt)
{
- if (!featureSIMD || opts.MinOpts())
+ if (!featureSIMD || opts.OptimizationDisabled())
{
return;
}