regMaskTP LinearScan::allRegs(RegisterType rt)
{
assert((rt != TYP_UNDEF) && (rt != TYP_STRUCT));
- if (rt == TYP_FLOAT)
- {
- return availableFloatRegs;
- }
- else if (rt == TYP_DOUBLE)
- {
- return availableDoubleRegs;
- }
-#ifdef FEATURE_SIMD
- // TODO-Cleanup: Add an RBM_ALLSIMD
- else if (varTypeIsSIMD(rt))
- {
- return availableDoubleRegs;
- }
-#endif // FEATURE_SIMD
- else
- {
- return availableIntRegs;
- }
+ return *availableRegs[rt];
}
regMaskTP LinearScan::allByteRegs()
{
if (compiler->compFloatingPointUsed)
{
- return allRegs(TYP_FLOAT);
+ return availableFloatRegs;
}
else
{
availableDoubleRegs &= ~RBM_CALLEE_SAVED;
}
#endif // TARGET_AMD64 || TARGET_ARM64
+
+ for (unsigned int i = 0; i < TYP_COUNT; i++)
+ {
+ var_types thisType = (var_types)genActualTypes[i];
+ if (thisType == TYP_FLOAT)
+ {
+ availableRegs[i] = &availableFloatRegs;
+ }
+ else if (thisType == TYP_DOUBLE)
+ {
+ availableRegs[i] = &availableDoubleRegs;
+ }
+#ifdef FEATURE_SIMD
+ else if ((thisType >= TYP_SIMD8) && (thisType <= TYP_SIMD32))
+ {
+ availableRegs[i] = &availableDoubleRegs;
+ }
+#endif
+ else
+ {
+ availableRegs[i] = &availableIntRegs;
+ }
+ }
+
compiler->rpFrameType = FT_NOT_SET;
compiler->rpMustCreateEBPCalled = false;
// A temporary VarToRegMap used during the resolution of critical edges.
VarToRegMap sharedCriticalVarToRegMap;
- PhasedVar<regMaskTP> availableIntRegs;
- PhasedVar<regMaskTP> availableFloatRegs;
- PhasedVar<regMaskTP> availableDoubleRegs;
+ PhasedVar<regMaskTP> availableIntRegs;
+ PhasedVar<regMaskTP> availableFloatRegs;
+ PhasedVar<regMaskTP> availableDoubleRegs;
+ PhasedVar<regMaskTP>* availableRegs[TYP_COUNT];
// Register mask of argument registers currently occupied because we saw a
// PUTARG_REG node. Tracked between the PUTARG_REG and its corresponding
if (compiler->killGCRefs(tree))
{
RefPosition* pos =
- newRefPosition((Interval*)nullptr, currentLoc, RefTypeKillGCRefs, tree, (allRegs(TYP_REF) & ~RBM_ARG_REGS));
+ newRefPosition((Interval*)nullptr, currentLoc, RefTypeKillGCRefs, tree, (availableIntRegs & ~RBM_ARG_REGS));
insertedKills = true;
}
RefPosition* LinearScan::buildInternalIntRegisterDefForNode(GenTree* tree, regMaskTP internalCands)
{
// The candidate set should contain only integer registers.
- assert((internalCands & ~allRegs(TYP_INT)) == RBM_NONE);
+ assert((internalCands & ~availableIntRegs) == RBM_NONE);
RefPosition* defRefPosition = defineNewInternalTemp(tree, IntRegisterType, internalCands);
return defRefPosition;
RefPosition* LinearScan::buildInternalFloatRegisterDefForNode(GenTree* tree, regMaskTP internalCands)
{
// The candidate set should contain only float registers.
- assert((internalCands & ~allRegs(TYP_FLOAT)) == RBM_NONE);
+ assert((internalCands & ~availableFloatRegs) == RBM_NONE);
RefPosition* defRefPosition = defineNewInternalTemp(tree, FloatRegisterType, internalCands);
return defRefPosition;
{
if (dstCandidates == RBM_NONE)
{
- dstCandidates = allRegs(TYP_INT);
+ dstCandidates = availableIntRegs;
}
dstCandidates &= ~RBM_NON_BYTE_REGS;
assert(dstCandidates != RBM_NONE);
// Comparand is preferenced to RAX.
// The remaining two operands can be in any reg other than RAX.
- BuildUse(tree->AsCmpXchg()->gtOpLocation, allRegs(TYP_INT) & ~RBM_RAX);
- BuildUse(tree->AsCmpXchg()->gtOpValue, allRegs(TYP_INT) & ~RBM_RAX);
+ BuildUse(tree->AsCmpXchg()->gtOpLocation, availableIntRegs & ~RBM_RAX);
+ BuildUse(tree->AsCmpXchg()->gtOpValue, availableIntRegs & ~RBM_RAX);
BuildUse(tree->AsCmpXchg()->gtOpComparand, RBM_RAX);
BuildDef(tree, RBM_RAX);
}
#endif
else
{
- srcCandidates = allRegs(TYP_INT) & ~RBM_RCX;
- dstCandidates = allRegs(TYP_INT) & ~RBM_RCX;
+ srcCandidates = availableIntRegs & ~RBM_RCX;
+ dstCandidates = availableIntRegs & ~RBM_RCX;
}
// Note that Rotate Left/Right instructions don't set ZF and SF flags.
// Don't assign the call target to any of the argument registers because
// we will use them to also pass floating point arguments as required
// by Amd64 ABI.
- ctrlExprCandidates = allRegs(TYP_INT) & ~(RBM_ARG_REGS);
+ ctrlExprCandidates = availableIntRegs & ~(RBM_ARG_REGS);
}
srcCount += BuildOperandUses(ctrlExpr, ctrlExprCandidates);
}
case GenTreeBlk::BlkOpKindUnroll:
if ((size % XMM_REGSIZE_BYTES) != 0)
{
- regMaskTP regMask = allRegs(TYP_INT);
+ regMaskTP regMask = availableIntRegs;
#ifdef TARGET_X86
if ((size & 1) != 0)
{
// If we have a remainder smaller than XMM_REGSIZE_BYTES, we need an integer temp reg.
if ((loadSize % XMM_REGSIZE_BYTES) != 0)
{
- regMaskTP regMask = allRegs(TYP_INT);
+ regMaskTP regMask = availableIntRegs;
#ifdef TARGET_X86
// Storing at byte granularity requires a byteable register.
if ((loadSize & 1) != 0)
srcCount = 1;
}
- srcCount += BuildDelayFreeUses(op2, op1, allRegs(TYP_INT) & ~(RBM_RAX | RBM_RDX));
+ srcCount += BuildDelayFreeUses(op2, op1, availableIntRegs & ~(RBM_RAX | RBM_RDX));
buildInternalRegisterUses();