#ifndef LEGACY_BACKEND
assert(nogen == true);
#endif // !LEGACY_BACKEND
+
/*
The following indirections are valid address modes on x86/x64:
compiler->eeSetLIdone();
}
-/*****************************************************************************/
#endif // DEBUGGING_SUPPORT
-/*****************************************************************************/
-
/*============================================================================
*
switch (dstType)
{
- case TYP_INT: // conv.ovf.i8.i4
+ case TYP_INT:
+ // conv.ovf.i8.i4
/* Generate the following sequence
test loDWord, loDWord // set flags
compiler->eeSetLVinfo(which, startOffs, length, ilVarNum, LVnum, name, avail, varLoc);
}
-/*****************************************************************************/
#endif // DEBUGGING_SUPPORT
-/*****************************************************************************/
/*****************************************************************************
*
if (rpFrameType == FT_EBP_FRAME)
printf("%3s | ", STR_FPBASE);
else if (rpFrameType == FT_ESP_FRAME)
+ {
printf("%3s | ", STR_SPBASE );
+ }
#if DOUBLE_ALIGN
else if (rpFrameType == FT_DOUBLE_ALIGN_FRAME)
+ {
printf("dbl | ");
+ }
#endif
else // (rpFrameType == FT_NOT_SET)
+ {
printf("??? | ");
+ }
if (fgHasLoops)
printf("LOOP |");
return emitComp->eeGetMethodFullName(methHnd);
}
-/*****************************************************************************/
#endif//DEBUG
+
/*****************************************************************************
*
* Be very careful, some instruction descriptors are allocated as "tiny" and
*/
srcInstrOffs = jmpIG->igOffs + jmp->idjOffs;
- dstOffs = tgtIG->igOffs; /* Note that the destination is always the beginning of an IG, so no need for an offset inside it */
+
+ /* Note that the destination is always the beginning of an IG, so no need for an offset inside it */
+ dstOffs = tgtIG->igOffs;
#if defined(_TARGET_ARM_)
srcEncodingOffs = srcInstrOffs + 4; // For relative branches, ARM PC is always considered to be the instruction address + 4
#if defined(_TARGET_XARCH_)
jmp->idCodeSize(jsz);
#elif defined(_TARGET_ARM_)
+#if 0
// This is done as part of emitSetShortJump():
- // insSize isz = emitInsSize(jmp->idInsFmt());
- // jmp->idInsSize(isz);
+ insSize isz = emitInsSize(jmp->idInsFmt());
+ jmp->idInsSize(isz);
+#endif
#elif defined(_TARGET_ARM64_)
// The size of IF_LARGEJMP/IF_LARGEADR/IF_LARGELDC are 8 or 12.
// All other code size is 4.
{
private:
#if defined(_TARGET_XARCH_) && !defined(LEGACY_BACKEND)
- instruction _idIns :9; // The assembly instruction
+ // The assembly instruction
+ instruction _idIns :9;
#else // !defined(_TARGET_XARCH_) || defined(LEGACY_BACKEND)
- instruction _idIns :8; // The assembly instruction
+ // The assembly instruction
+ instruction _idIns :8;
#endif // !defined(_TARGET_XARCH_) || defined(LEGACY_BACKEND)
- insFormat _idInsFmt :8; // The format for the instruction
+ // The format for the instruction
+ insFormat _idInsFmt :8;
public:
instruction idIns() const { return _idIns; }
// At this point we have fully consumed first DWORD so that next field
// doesn't cross a byte boundary.
#elif defined(_TARGET_ARM64_)
- // Moved the definition of '_idOpSize' later
- // so that we don't cross a 32-bit boundary when laying out bitfields
+// Moved the definition of '_idOpSize' later so that we don't cross a 32-bit boundary when laying out bitfields
#else // ARM or x86-LEGACY_BACKEND
opSize _idOpSize :2; // operand size: 0=1 , 1=2 , 2=4 , 3=8
#endif // ARM or x86-LEGACY_BACKEND
printf(", ");
#ifdef RELOC_SUPPORT
if (cnsVal.cnsReloc)
+ {
emitDispReloc(val);
+ }
else
#endif
+ {
goto PRINT_CONSTANT;
+ }
}
break;
printf(", ");
#ifdef RELOC_SUPPORT
if (id->idIsCnsReloc())
+ {
emitDispReloc(val);
+ }
else
#endif
+ {
goto PRINT_CONSTANT;
+ }
break;
case IF_RRD:
case IF_RRW_MRD:
if (ins == INS_movsx || ins == INS_movzx)
+ {
attr = EA_PTRSIZE;
+ }
#ifdef _TARGET_AMD64_
else if (ins == INS_movsxd)
{
val = emitGetInsSC(id);
#ifdef RELOC_SUPPORT
if (id->idIsCnsReloc())
+ {
emitDispReloc(val);
+ }
else
#endif
+ {
goto PRINT_CONSTANT;
+ }
break;
#if FEATURE_STACK_FP_X87
gtfType = " nullcheck";
if (tree->gtFlags & GTF_CALL_VIRT_VTABLE)
+ {
gtfType = " ind";
+ }
else if (tree->gtFlags & GTF_CALL_VIRT_STUB)
+ {
gtfType = " stub";
+ }
#ifdef FEATURE_READYTORUN_COMPILER
else if (tree->gtCall.IsR2RRelativeIndir())
+ {
gtfType = " r2r_ind";
+ }
#endif // FEATURE_READYTORUN_COMPILER
else if (tree->gtFlags & GTF_CALL_UNMANAGED)
{
else if (lclNum == lvaInlinedPInvokeFrameVar)
ilName = "PInvokeFrame";
else if (lclNum == lvaGSSecurityCookie)
+ {
ilName = "GsCookie";
+ }
#if FEATURE_FIXED_OUT_ARGS
else if (lclNum == lvaPInvokeFrameRegSaveVar)
+ {
ilName = "PInvokeFrameRegSave";
+ }
else if (lclNum == lvaOutgoingArgSpaceVar)
+ {
ilName = "OutArgs";
+ }
#endif // FEATURE_FIXED_OUT_ARGS
#ifdef _TARGET_ARM_
else if (lclNum == lvaPromotedStructAssemblyScratchVar)
+ {
ilName = "PromotedStructScratch";
+ }
#endif // _TARGET_ARM_
#if !FEATURE_EH_FUNCLETS
else if (lclNum == lvaShadowSPslotsVar)
+ {
ilName = "EHSlots";
+ }
#endif // !FEATURE_EH_FUNCLETS
else if (lclNum == lvaLocAllocSPvar)
+ {
ilName = "LocAllocSP";
+ }
#if FEATURE_EH_FUNCLETS
else if (lclNum == lvaPSPSym)
+ {
ilName = "PSPSym";
+ }
#endif // FEATURE_EH_FUNCLETS
else
{
break;
}
else if (gtOp.gtOp1->OperGet() == GT_CNS_INT)
- {
+ {
gtOp.gtOp2->LabelIndex(comp, isConst);
break;
}
}
}
+// Note that the value of the below field doesn't matter; it exists only to provide a distinguished address.
+//
// static
-FieldSeqNode FieldSeqStore::s_notAField(NULL, NULL); // Value doesn't matter; exists only to provide a distinguished address.
+FieldSeqNode FieldSeqStore::s_notAField(NULL, NULL);
// FieldSeqStore methods.
FieldSeqStore::FieldSeqStore(IAllocator* alloc) : m_alloc(alloc), m_canonMap(new (alloc) FieldSeqNodeCanonMap(alloc))
{
regionIndex = HBtab->ebdEnclosingTryIndex;
if (regionIndex == EHblkDsc::NO_ENCLOSING_INDEX)
- break; // No enclosing 'try'; we're done
+ {
+ // No enclosing 'try'; we're done
+ break;
+ }
HBtab = ehGetDsc(regionIndex);
if (!EHblkDsc::ebdIsSameILTry(ehDscRoot, HBtab))
- break; // Found an enclosing 'try' that has a different 'try' region (is not mutually-protect with the original region). Return it.
+ {
+ // Found an enclosing 'try' that has a different 'try' region (is not mutually-protect with the
+ // original region). Return it.
+ break;
+ }
}
return regionIndex;
fgWalkAllTreesPre(lvaStressLclFldCB, &Args);
}
-/*****************************************************************************/
#endif // DEBUG
+
/*****************************************************************************
*
* A little routine that displays a local variable bitset.
}
else
#endif // DEBUG
- // Set preferences so that this register set will be preferred for earlier refs
- theInterval->updateRegisterPreferences(rp->registerAssignment);
+ {
+ // Set preferences so that this register set will be preferred for earlier refs
+ theInterval->updateRegisterPreferences(rp->registerAssignment);
+ }
}
void
optValnumCSE_phase = false;
}
-/*****************************************************************************/
#endif // FEATURE_VALNUM_CSE
-/*****************************************************************************/
-
/*****************************************************************************
*
}
#ifdef LEGACY_BACKEND // We don't use any of the old register allocator functions when LSRA is used instead.
+
/*****************************************************************************
*
* Predict which variables will be assigned to registers