void inst_TT(instruction ins, GenTree* tree, unsigned offs = 0, int shfv = 0, emitAttr size = EA_UNKNOWN);
- void inst_TT_RV(instruction ins,
- GenTree* tree,
- regNumber reg,
- unsigned offs = 0,
- emitAttr size = EA_UNKNOWN,
- insFlags flags = INS_FLAGS_DONT_CARE);
+ void inst_TT_RV(instruction ins, emitAttr size, GenTree* tree, regNumber reg);
void inst_RV_TT(instruction ins,
regNumber reg,
varDsc = compiler->lvaTable + varNum;
#ifndef _TARGET_64BIT_
- // If not a stack arg go to the next one
+ // If this arg is never on the stack, go to the next one.
if (varDsc->lvType == TYP_LONG)
{
if (regArgTab[argNum].slot == 1 && !regArgTab[argNum].stackArg)
else
#endif // !_TARGET_64BIT_
{
- // If not a stack arg go to the next one
+ // If this arg is never on the stack, go to the next one.
if (!regArgTab[argNum].stackArg)
{
continue;
instruction storeIns = ins_Store(lclTyp, compiler->isSIMDTypeLocalAligned(varNum));
assert(varDsc->GetRegNum() == tree->GetRegNum());
- inst_TT_RV(storeIns, tree, tree->GetRegNum(), 0, size);
+ inst_TT_RV(storeIns, size, tree, tree->GetRegNum());
genUpdateRegLife(varDsc, /*isBorn*/ false, /*isDying*/ true DEBUGARG(tree));
gcInfo.gcMarkRegSetNpt(varDsc->lvRegMask());
unsigned varNum = tree->AsLclVarCommon()->GetLclNum();
assert(!compiler->lvaTable[varNum].lvNormalizeOnStore() ||
(tree->TypeGet() == genActualType(compiler->lvaTable[varNum].TypeGet())));
- inst_TT_RV(ins_Store(tree->gtType, compiler->isSIMDTypeLocalAligned(varNum)), tree, tree->GetRegNum());
+ inst_TT_RV(ins_Store(tree->gtType, compiler->isSIMDTypeLocalAligned(varNum)), emitTypeSize(tree->TypeGet()),
+ tree, tree->GetRegNum());
}
else
{
{
op1->gtFlags |= GTF_SPILL;
inst_TT_RV(ins_Store(op1->gtType, compiler->isSIMDTypeLocalAligned(op1->AsLclVarCommon()->GetLclNum())),
- op1, op1->GetRegNum());
+ emitTypeSize(op1->TypeGet()), op1, op1->GetRegNum());
}
// Now, load it to the fp stack.
GetEmitter()->emitIns_S(INS_fld, emitTypeSize(op1), op1->AsLclVarCommon()->GetLclNum(), 0);
// Getters and setters for address-exposed and do-not-enregister local var properties.
bool lvaVarAddrExposed(unsigned varNum);
void lvaSetVarAddrExposed(unsigned varNum);
+ void lvaSetVarLiveInOutOfHandler(unsigned varNum);
bool lvaVarDoNotEnregister(unsigned varNum);
#ifdef DEBUG
// Reasons why we can't enregister. Some of these correspond to debug properties of local vars.
}
}
-/*****************************************************************************
- *
- * Generate an instruction that has one operand given by a tree (which has
- * been made addressable) and another that is a register.
- */
-
-void CodeGen::inst_TT_RV(instruction ins, GenTree* tree, regNumber reg, unsigned offs, emitAttr size, insFlags flags)
+//------------------------------------------------------------------------
+// inst_TT_RV: Generate a store of a lclVar
+//
+// Arguments:
+// ins - the instruction to generate
+// size - the size attributes for the store
+// tree - the lclVar node
+// reg - the register currently holding the value of the local
+//
+void CodeGen::inst_TT_RV(instruction ins, emitAttr size, GenTree* tree, regNumber reg)
{
+#ifdef DEBUG
+ // The tree must have a valid register value.
assert(reg != REG_STK);
-
-AGAIN:
-
- /* Is this a spilled value? */
-
- if (tree->gtFlags & GTF_SPILLED)
+ bool isValidInReg = ((tree->gtFlags & GTF_SPILLED) == 0);
+ if (!isValidInReg)
{
- assert(!"ISSUE: If this can happen, we need to generate 'ins [ebp+spill]'");
- }
-
- if (size == EA_UNKNOWN)
- {
- if (instIsFP(ins))
+ // Is this the special case of a write-thru lclVar?
+ // We mark it as SPILLED to denote that its value is valid in memory.
+ if (((tree->gtFlags & GTF_SPILL) != 0) && tree->gtOper == GT_STORE_LCL_VAR)
{
- size = EA_ATTR(genTypeSize(tree->TypeGet()));
- }
- else
- {
- size = emitTypeSize(tree->TypeGet());
+ isValidInReg = true;
}
}
+ assert(isValidInReg);
+ assert(size != EA_UNKNOWN);
+ assert(tree->OperIs(GT_LCL_VAR, GT_STORE_LCL_VAR));
+#endif // DEBUG
- switch (tree->gtOper)
- {
- unsigned varNum;
-
- case GT_LCL_VAR:
-
- inst_set_SV_var(tree);
- goto LCL;
-
- case GT_LCL_FLD:
- case GT_STORE_LCL_FLD:
- offs += tree->AsLclFld()->GetLclOffs();
- goto LCL;
-
- LCL:
-
- varNum = tree->AsLclVarCommon()->GetLclNum();
- assert(varNum < compiler->lvaCount);
-
+ unsigned varNum = tree->AsLclVarCommon()->GetLclNum();
+ assert(varNum < compiler->lvaCount);
#if CPU_LOAD_STORE_ARCH
- if (!GetEmitter()->emitInsIsStore(ins))
- {
- // TODO-LdStArch-Bug: Should regTmp be a dst on the node or an internal reg?
- // Either way, it is not currently being handled by Lowering.
- regNumber regTmp = tree->GetRegNum();
- assert(regTmp != REG_NA);
- GetEmitter()->emitIns_R_S(ins_Load(tree->TypeGet()), size, regTmp, varNum, offs);
- GetEmitter()->emitIns_R_R(ins, size, regTmp, reg, flags);
- GetEmitter()->emitIns_S_R(ins_Store(tree->TypeGet()), size, regTmp, varNum, offs);
-
- regSet.verifyRegUsed(regTmp);
- }
- else
+ assert(GetEmitter()->emitInsIsStore(ins));
#endif
- {
- // ins is a Store instruction
- //
- GetEmitter()->emitIns_S_R(ins, size, reg, varNum, offs);
-#ifdef _TARGET_ARM_
- // If we need to set the flags then add an extra movs reg,reg instruction
- if (flags == INS_FLAGS_SET)
- GetEmitter()->emitIns_R_R(INS_mov, size, reg, reg, INS_FLAGS_SET);
-#endif
- }
- return;
-
- case GT_CLS_VAR:
- // Make sure FP instruction size matches the operand size
- // (We optimized constant doubles to floats when we can, just want to
- // make sure that we don't mistakenly use 8 bytes when the
- // constant).
- assert(!isFloatRegType(tree->gtType) || genTypeSize(tree->gtType) == EA_SIZE_IN_BYTES(size));
-
-#if CPU_LOAD_STORE_ARCH
- if (!GetEmitter()->emitInsIsStore(ins))
- {
- NYI("Store of GT_CLS_VAR not supported for ARM");
- }
- else
-#endif // CPU_LOAD_STORE_ARCH
- {
- GetEmitter()->emitIns_C_R(ins, size, tree->AsClsVar()->gtClsVarHnd, reg, offs);
- }
- return;
-
- case GT_IND:
- case GT_NULLCHECK:
- case GT_ARR_ELEM:
- {
- assert(!"inst_TT_RV not supported for GT_IND, GT_NULLCHECK or GT_ARR_ELEM");
- }
- break;
-
- case GT_COMMA:
- // tree->AsOp()->gtOp1 - already processed by genCreateAddrMode()
- tree = tree->AsOp()->gtOp2;
- goto AGAIN;
-
- default:
- assert(!"invalid address");
- }
+ GetEmitter()->emitIns_S_R(ins, size, reg, varNum, 0);
}
/*****************************************************************************
lvaSetVarDoNotEnregister(varNum DEBUGARG(DNER_AddrExposed));
}
+//------------------------------------------------------------------------
+// lvaSetVarLiveInOutOfHandler: Set the local varNum as being live in and/or out of a handler
+//
+// Arguments:
+// varNum - the varNum of the local
+//
+void Compiler::lvaSetVarLiveInOutOfHandler(unsigned varNum)
+{
+ LclVarDsc* varDsc = lvaGetDesc(varNum);
+
+ INDEBUG(varDsc->lvLiveInOutOfHndlr = 1);
+
+ if (varDsc->lvPromoted)
+ {
+ noway_assert(varTypeIsStruct(varDsc));
+
+ for (unsigned i = varDsc->lvFieldLclStart; i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++i)
+ {
+ noway_assert(lvaTable[i].lvIsStructField);
+ INDEBUG(lvaTable[i].lvLiveInOutOfHndlr = 1);
+ lvaSetVarDoNotEnregister(i DEBUGARG(DNER_LiveInOutOfHandler));
+ }
+ }
+
+ lvaSetVarDoNotEnregister(varNum DEBUGARG(DNER_LiveInOutOfHandler));
+}
+
/*****************************************************************************
*
* Record that the local var "varNum" should not be enregistered (for one of several reasons.)
printf(" HFA(%s) ", varTypeName(varDsc->GetHfaType()));
}
+ if (varDsc->lvLiveInOutOfHndlr)
+ {
+ printf(" EH");
+ }
+
if (varDsc->lvDoNotEnregister)
{
printf(" do-not-enreg[");
}
// Mark all variables that are live on entry to an exception handler
- // or on exit from a filter handler or finally as DoNotEnregister */
+ // or on exit from a filter handler or finally.
- if (VarSetOps::IsMember(this, exceptVars, varDsc->lvVarIndex) ||
+ bool isFinallyVar = VarSetOps::IsMember(this, finallyVars, varDsc->lvVarIndex);
+ if (isFinallyVar || VarSetOps::IsMember(this, exceptVars, varDsc->lvVarIndex) ||
VarSetOps::IsMember(this, filterVars, varDsc->lvVarIndex))
{
- /* Mark the variable appropriately */
- lvaSetVarDoNotEnregister(varNum DEBUGARG(DNER_LiveInOutOfHandler));
- }
-
- /* Mark all pointer variables live on exit from a 'finally'
- block as either volatile for non-GC ref types or as
- 'explicitly initialized' (volatile and must-init) for GC-ref types */
-
- if (VarSetOps::IsMember(this, finallyVars, varDsc->lvVarIndex))
- {
- lvaSetVarDoNotEnregister(varNum DEBUGARG(DNER_LiveInOutOfHandler));
+ // Mark the variable appropriately.
+ lvaSetVarLiveInOutOfHandler(varNum);
- /* Don't set lvMustInit unless we have a non-arg, GC pointer */
+ // Mark all pointer variables live on exit from a 'finally' block as
+ // 'explicitly initialized' (must-init) for GC-ref types.
- if (varDsc->lvIsParam)
+ if (isFinallyVar)
{
- continue;
- }
-
- if (!varTypeIsGC(varDsc->TypeGet()))
- {
- continue;
+ // Set lvMustInit only if we have a non-arg, GC pointer.
+ if (!varDsc->lvIsParam && varTypeIsGC(varDsc->TypeGet()))
+ {
+ varDsc->lvMustInit = true;
+ }
}
-
- /* Mark it */
- varDsc->lvMustInit = true;
}
}
//
void LinearScan::identifyCandidatesExceptionDataflow()
{
- VarSetOps::AssignNoCopy(compiler, exceptVars, VarSetOps::MakeEmpty(compiler));
#ifdef DEBUG
VARSET_TP finallyVars(VarSetOps::MakeEmpty(compiler));
#endif
return;
}
+ VarSetOps::AssignNoCopy(compiler, exceptVars, VarSetOps::MakeEmpty(compiler));
if (compiler->compHndBBtabCount > 0)
{
identifyCandidatesExceptionDataflow();
BasicBlock* prevBlock DEBUGARG(bool* pPredBlockIsAllocated))
{
BasicBlock* predBlock = nullptr;
+ assert(*pPredBlockIsAllocated == false);
// Blocks with exception flow on entry use no predecessor blocks, as all incoming vars
// are on the stack.
if (blockInfo[block->bbNum].hasEHBoundaryIn)
{
+ JITDUMP("\n\nIncoming EH boundary; ");
+ return nullptr;
+ }
+
+ if (block == compiler->fgFirstBB)
+ {
return nullptr;
}
+ if (block->bbPreds == nullptr)
+ {
+ // We may have unreachable blocks, due to optimization.
+ // We don't want to set the predecessor as null in this case, since that will result in
+ // unnecessary DummyDefs, and possibly result in inconsistencies requiring resolution
+ // (since these unreachable blocks can have reachable successors).
+ assert((block != compiler->fgFirstBB) || (prevBlock != nullptr));
+ JITDUMP("\n\nNo predecessor; ");
+ return prevBlock;
+ }
+
#ifdef DEBUG
- assert(*pPredBlockIsAllocated == false);
if (getLsraBlockBoundaryLocations() == LSRA_BLOCK_BOUNDARY_LAYOUT)
{
if (prevBlock != nullptr)
}
else
#endif // DEBUG
- if (block != compiler->fgFirstBB)
{
predBlock = block->GetUniquePred(compiler);
if (predBlock != nullptr)
// Special handling to improve matching on backedges.
BasicBlock* otherBlock = (block == predBlock->bbNext) ? predBlock->bbJumpDest : predBlock->bbNext;
noway_assert(otherBlock != nullptr);
- if (isBlockVisited(otherBlock))
+ if (isBlockVisited(otherBlock) && !blockInfo[otherBlock->bbNum].hasEHBoundaryIn)
{
// This is the case when we have a conditional branch where one target has already
// been visited. It would be best to use the same incoming regs as that block,
if (predBBNum == 0)
{
#if DEBUG
- // This should still be in its initialized empty state.
- for (unsigned varIndex = 0; varIndex < compiler->lvaTrackedCount; varIndex++)
+ if (blockInfo[currentBlock->bbNum].hasEHBoundaryIn || !allocationPassComplete)
{
- // In the case where we're extending lifetimes for stress, we are intentionally modeling variables
- // as live when they really aren't to create extra register pressure & constraints.
- // However, this means that non-EH-vars will be live into EH regions. We can and should ignore the
- // locations of these. Note that they aren't reported to codegen anyway.
- if (!getLsraExtendLifeTimes() || VarSetOps::IsMember(compiler, currentBlock->bbLiveIn, varIndex))
+ // This should still be in its initialized empty state.
+ for (unsigned varIndex = 0; varIndex < compiler->lvaTrackedCount; varIndex++)
{
- assert(inVarToRegMap[varIndex] == REG_STK);
+ // In the case where we're extending lifetimes for stress, we are intentionally modeling variables
+ // as live when they really aren't to create extra register pressure & constraints.
+ // However, this means that non-EH-vars will be live into EH regions. We can and should ignore the
+ // locations of these. Note that they aren't reported to codegen anyway.
+ if (!getLsraExtendLifeTimes() || VarSetOps::IsMember(compiler, currentBlock->bbLiveIn, varIndex))
+ {
+ assert(inVarToRegMap[varIndex] == REG_STK);
+ }
}
}
#endif // DEBUG
bool predBlockIsAllocated = false;
predBlock = findPredBlockForLiveIn(block, prevBlock DEBUGARG(&predBlockIsAllocated));
- if (predBlock)
+ if (predBlock != nullptr)
{
JITDUMP("\n\nSetting " FMT_BB " as the predecessor for determining incoming variable registers of " FMT_BB
"\n",
// Any lclVars live-in to a block are resolution candidates.
VarSetOps::UnionD(compiler, resolutionCandidateVars, currentLiveVars);
- // Determine if we need any DummyDefs.
- // We need DummyDefs for cases where "predBlock" isn't really a predecessor.
- // Note that it's possible to have uses of unitialized variables, in which case even the first
- // block may require DummyDefs, which we are not currently adding - this means that these variables
- // will always be considered to be in memory on entry (and reloaded when the use is encountered).
- // TODO-CQ: Consider how best to tune this. Currently, if we create DummyDefs for uninitialized
- // variables (which may actually be initialized along the dynamically executed paths, but not
- // on all static paths), we wind up with excessive liveranges for some of these variables.
- VARSET_TP newLiveIn(VarSetOps::MakeCopy(compiler, currentLiveVars));
- if (predBlock)
+ if (!blockInfo[block->bbNum].hasEHBoundaryIn)
{
- // Compute set difference: newLiveIn = currentLiveVars - predBlock->bbLiveOut
- VarSetOps::DiffD(compiler, newLiveIn, predBlock->bbLiveOut);
- }
- bool needsDummyDefs = (!VarSetOps::IsEmpty(compiler, newLiveIn) && block != compiler->fgFirstBB);
-
- // Create dummy def RefPositions
+ // Determine if we need any DummyDefs.
+ // We need DummyDefs for cases where "predBlock" isn't really a predecessor.
+ // Note that it's possible to have uses of unitialized variables, in which case even the first
+ // block may require DummyDefs, which we are not currently adding - this means that these variables
+ // will always be considered to be in memory on entry (and reloaded when the use is encountered).
+ // TODO-CQ: Consider how best to tune this. Currently, if we create DummyDefs for uninitialized
+ // variables (which may actually be initialized along the dynamically executed paths, but not
+ // on all static paths), we wind up with excessive liveranges for some of these variables.
+
+ VARSET_TP newLiveIn(VarSetOps::MakeCopy(compiler, currentLiveVars));
+ if (predBlock != nullptr)
+ {
+ // Compute set difference: newLiveIn = currentLiveVars - predBlock->bbLiveOut
+ VarSetOps::DiffD(compiler, newLiveIn, predBlock->bbLiveOut);
+ }
+ bool needsDummyDefs = (!VarSetOps::IsEmpty(compiler, newLiveIn) && block != compiler->fgFirstBB);
- if (needsDummyDefs)
- {
- // If we are using locations from a predecessor, we should never require DummyDefs.
- assert(!predBlockIsAllocated);
+ // Create dummy def RefPositions
- JITDUMP("Creating dummy definitions\n");
- VarSetOps::Iter iter(compiler, newLiveIn);
- unsigned varIndex = 0;
- while (iter.NextElem(&varIndex))
+ if (needsDummyDefs)
{
- LclVarDsc* varDsc = compiler->lvaGetDescByTrackedIndex(varIndex);
- // Add a dummyDef for any candidate vars that are in the "newLiveIn" set.
- // If this is the entry block, don't add any incoming parameters (they're handled with ParamDefs).
- if (isCandidateVar(varDsc) && (predBlock != nullptr || !varDsc->lvIsParam))
+ // If we are using locations from a predecessor, we should never require DummyDefs.
+ assert(!predBlockIsAllocated);
+
+ JITDUMP("Creating dummy definitions\n");
+ VarSetOps::Iter iter(compiler, newLiveIn);
+ unsigned varIndex = 0;
+ while (iter.NextElem(&varIndex))
{
+ // Add a dummyDef for any candidate vars that are in the "newLiveIn" set.
+ LclVarDsc* varDsc = compiler->lvaGetDescByTrackedIndex(varIndex);
+ assert(isCandidateVar(varDsc));
Interval* interval = getIntervalForLocalVar(varIndex);
RefPosition* pos = newRefPosition(interval, currentLoc, RefTypeDummyDef, nullptr,
allRegs(interval->registerType));
pos->setRegOptional(true);
}
+ JITDUMP("Finished creating dummy definitions\n\n");
}
- JITDUMP("Finished creating dummy definitions\n\n");
}
}