Move TreeNodeInfoInit to LinearScan
authorCarol Eidt <carol.eidt@microsoft.com>
Fri, 25 Aug 2017 20:36:43 +0000 (13:36 -0700)
committerCarol Eidt <carol.eidt@microsoft.com>
Fri, 25 Aug 2017 20:39:02 +0000 (13:39 -0700)
In preparation for computing the TreeNodeInfo on the fly, move all the TreeNodeInfoInit and related methods to the LinearScan class.

src/jit/lower.cpp
src/jit/lower.h
src/jit/lowerarm.cpp
src/jit/lowerarm64.cpp
src/jit/lowerxarch.cpp
src/jit/lsra.cpp
src/jit/lsra.h
src/jit/lsraarm.cpp
src/jit/lsraarm64.cpp
src/jit/lsraarmarch.cpp
src/jit/lsraxarch.cpp

index 5ac3682..6921b8b 100644 (file)
@@ -98,44 +98,6 @@ bool Lowering::IsSafeToContainMem(GenTree* parentNode, GenTree* childNode)
 }
 
 //------------------------------------------------------------------------
-// IsContainableMemoryOp: Checks whether this is a memory op that can be contained.
-//
-// Arguments:
-//    node        - the node of interest.
-//
-// Return value:
-//    True if this will definitely be a memory reference that could be contained.
-//
-// Notes:
-//    This differs from the isMemoryOp() method on GenTree because it checks for
-//    the case of doNotEnregister local. This won't include locals that
-//    for some other reason do not become register candidates, nor those that get
-//    spilled.
-//    Also, because we usually call this before we redo dataflow, any new lclVars
-//    introduced after the last dataflow analysis will not yet be marked lvTracked,
-//    so we don't use that.
-//
-bool Lowering::IsContainableMemoryOp(GenTree* node)
-{
-#ifdef _TARGET_XARCH_
-    if (node->isMemoryOp())
-    {
-        return true;
-    }
-    if (node->IsLocal())
-    {
-        if (!m_lsra->enregisterLocalVars)
-        {
-            return true;
-        }
-        LclVarDsc* varDsc = &comp->lvaTable[node->AsLclVar()->gtLclNum];
-        return varDsc->lvDoNotEnregister;
-    }
-#endif // _TARGET_XARCH_
-    return false;
-}
-
-//------------------------------------------------------------------------
 
 // This is the main entry point for Lowering.
 GenTree* Lowering::LowerNode(GenTree* node)
@@ -2437,7 +2399,7 @@ void Lowering::LowerCompare(GenTree* cmp)
         GenTreeIntCon* op2      = cmp->gtGetOp2()->AsIntCon();
         ssize_t        op2Value = op2->IconValue();
 
-        if (IsContainableMemoryOp(op1) && varTypeIsSmall(op1Type) && genTypeCanRepresentValue(op1Type, op2Value))
+        if (m_lsra->isContainableMemoryOp(op1) && varTypeIsSmall(op1Type) && genTypeCanRepresentValue(op1Type, op2Value))
         {
             //
             // If op1's type is small then try to narrow op2 so it has the same type as op1.
@@ -2467,7 +2429,7 @@ void Lowering::LowerCompare(GenTree* cmp)
                 // the result of bool returning calls.
                 //
 
-                if (castOp->OperIs(GT_CALL, GT_LCL_VAR) || castOp->OperIsLogical() || IsContainableMemoryOp(castOp))
+                if (castOp->OperIs(GT_CALL, GT_LCL_VAR) || castOp->OperIsLogical() || m_lsra->isContainableMemoryOp(castOp))
                 {
                     assert(!castOp->gtOverflowEx()); // Must not be an overflow checking operation
 
@@ -2529,7 +2491,7 @@ void Lowering::LowerCompare(GenTree* cmp)
                 andOp1->ClearContained();
                 andOp2->ClearContained();
 
-                if (IsContainableMemoryOp(andOp1) && andOp2->IsIntegralConst())
+                if (m_lsra->isContainableMemoryOp(andOp1) && andOp2->IsIntegralConst())
                 {
                     //
                     // For "test" we only care about the bits that are set in the second operand (mask).
@@ -4844,8 +4806,6 @@ void Lowering::DoPhase()
     }
 #endif
 
-    // The initialization code for the TreeNodeInfo map was initially part of a single full IR
-    // traversal and it has been split because the order of traversal performed by fgWalkTreePost
     // does not necessarily lower nodes in execution order and also, it could potentially
     // add new BasicBlocks on the fly as part of the Lowering pass so the traversal won't be complete.
     //
@@ -4893,7 +4853,7 @@ void Lowering::DoPhase()
 
             currentLoc += 2;
 
-            TreeNodeInfoInit(node);
+            m_lsra->TreeNodeInfoInit(node);
 
             // Only nodes that produce values should have a non-zero dstCount.
             assert((node->gtLsraInfo.dstCount == 0) || node->IsValue());
@@ -5425,43 +5385,6 @@ void Lowering::ContainCheckNode(GenTree* node)
 }
 
 //------------------------------------------------------------------------
-// GetIndirSourceCount: Get the source registers for an indirection that might be contained.
-//
-// Arguments:
-//    node      - The node of interest
-//
-// Return Value:
-//    The number of source registers used by the *parent* of this node.
-//
-int Lowering::GetIndirSourceCount(GenTreeIndir* indirTree)
-{
-    GenTree* const addr = indirTree->gtOp1;
-    if (!addr->isContained())
-    {
-        return 1;
-    }
-    if (!addr->OperIs(GT_LEA))
-    {
-        return 0;
-    }
-
-    GenTreeAddrMode* const addrMode = addr->AsAddrMode();
-
-    unsigned srcCount = 0;
-    if ((addrMode->Base() != nullptr) && !addrMode->Base()->isContained())
-    {
-        srcCount++;
-    }
-    if (addrMode->Index() != nullptr)
-    {
-        // We never have a contained index.
-        assert(!addrMode->Index()->isContained());
-        srcCount++;
-    }
-    return srcCount;
-}
-
-//------------------------------------------------------------------------
 // ContainCheckDivOrMod: determine which operands of a div/mod should be contained.
 //
 // Arguments:
@@ -5481,7 +5404,7 @@ void Lowering::ContainCheckDivOrMod(GenTreeOp* node)
         // everything is made explicit by adding casts.
         assert(dividend->TypeGet() == divisor->TypeGet());
 
-        if (IsContainableMemoryOp(divisor) || divisor->IsCnsNonZeroFltOrDbl())
+        if (m_lsra->isContainableMemoryOp(divisor) || divisor->IsCnsNonZeroFltOrDbl())
         {
             MakeSrcContained(node, divisor);
         }
@@ -5503,7 +5426,7 @@ void Lowering::ContainCheckDivOrMod(GenTreeOp* node)
 #endif
 
     // divisor can be an r/m, but the memory indirection must be of the same size as the divide
-    if (IsContainableMemoryOp(divisor) && (divisor->TypeGet() == node->TypeGet()))
+    if (m_lsra->isContainableMemoryOp(divisor) && (divisor->TypeGet() == node->TypeGet()))
     {
         MakeSrcContained(node, divisor);
     }
index ebee641..eb0c4fd 100644 (file)
@@ -210,25 +210,10 @@ private:
         return oldUseNode->AsLclVarCommon()->gtLclNum;
     }
 
-    // returns true if the tree can use the read-modify-write memory instruction form
-    bool isRMWRegOper(GenTreePtr tree);
-
     // return true if this call target is within range of a pc-rel call on the machine
     bool IsCallTargetInRange(void* addr);
 
-#ifdef _TARGET_X86_
-    bool ExcludeNonByteableRegisters(GenTree* tree);
-#endif
-
-    void TreeNodeInfoInit(GenTree* stmt);
-
-    void TreeNodeInfoInitCheckByteable(GenTree* tree);
-
-    void SetDelayFree(GenTree* delayUseSrc);
-
 #if defined(_TARGET_XARCH_)
-    void TreeNodeInfoInitSimple(GenTree* tree);
-
     //----------------------------------------------------------------------
     // SetRegOptional - sets a bit to indicate to LSRA that register
     // for a given tree node is optional for codegen purpose.  If no
@@ -289,37 +274,13 @@ private:
     }
 #endif // defined(_TARGET_XARCH_)
 
-    // TreeNodeInfoInit methods
-
-    int GetOperandSourceCount(GenTree* node);
-    int GetIndirSourceCount(GenTreeIndir* indirTree);
-    void HandleFloatVarArgs(GenTreeCall* call, GenTree* argNode, bool* callHasFloatRegArgs);
-
-    void TreeNodeInfoInitStoreLoc(GenTree* tree);
-    void TreeNodeInfoInitReturn(GenTree* tree);
-    void TreeNodeInfoInitShiftRotate(GenTree* tree);
-    void TreeNodeInfoInitPutArgReg(GenTreeUnOp* node);
-    void TreeNodeInfoInitCall(GenTreeCall* call);
-    void TreeNodeInfoInitCmp(GenTreePtr tree);
-    void TreeNodeInfoInitStructArg(GenTreePtr structArg);
-    void TreeNodeInfoInitBlockStore(GenTreeBlk* blkNode);
-    void TreeNodeInfoInitModDiv(GenTree* tree);
-    void TreeNodeInfoInitIntrinsic(GenTree* tree);
-    void TreeNodeInfoInitStoreLoc(GenTreeLclVarCommon* tree);
-    void TreeNodeInfoInitIndir(GenTreeIndir* indirTree);
-    void TreeNodeInfoInitGCWriteBarrier(GenTree* tree);
-    void TreeNodeInfoInitCast(GenTree* tree);
-
-#if defined(_TARGET_XARCH_)
-    void TreeNodeInfoInitMul(GenTreePtr tree);
-    void SetContainsAVXFlags(bool isFloatingPointType = true, unsigned sizeOfSIMDVector = 0);
-#endif // defined(_TARGET_XARCH_)
-
-#ifdef FEATURE_SIMD
-    void TreeNodeInfoInitSIMD(GenTreeSIMD* tree);
-#endif // FEATURE_SIMD
-
-    void TreeNodeInfoInitPutArgStk(GenTreePutArgStk* argNode);
+    // Per tree node member functions
+    void LowerStoreIndir(GenTreeIndir* node);
+    GenTree* LowerAdd(GenTree* node);
+    bool LowerUnsignedDivOrMod(GenTreeOp* divMod);
+    GenTree* LowerConstIntDivOrMod(GenTree* node);
+    GenTree* LowerSignedDivOrMod(GenTree* node);
+    void LowerBlockStore(GenTreeBlk* blkNode);
 #ifdef _TARGET_ARM64_
     void LowerPutArgStk(GenTreePutArgStk* argNode, fgArgTabEntryPtr info);
 #endif // _TARGET_ARM64_
@@ -327,21 +288,8 @@ private:
     void LowerPutArgStk(GenTreePutArgStk* argNode, fgArgTabEntryPtr info);
 #endif // _TARGET_ARM64_
     void LowerPutArgStk(GenTreePutArgStk* tree);
-#ifdef _TARGET_ARM_
-    void TreeNodeInfoInitPutArgSplit(GenTreePutArgSplit* tree);
-#endif
-    void TreeNodeInfoInitLclHeap(GenTree* tree);
-
     void DumpNodeInfoMap();
 
-    // Per tree node member functions
-    void LowerStoreIndir(GenTreeIndir* node);
-    GenTree* LowerAdd(GenTree* node);
-    bool LowerUnsignedDivOrMod(GenTreeOp* divMod);
-    GenTree* LowerConstIntDivOrMod(GenTree* node);
-    GenTree* LowerSignedDivOrMod(GenTree* node);
-    void LowerBlockStore(GenTreeBlk* blkNode);
-
     GenTree* TryCreateAddrMode(LIR::Use&& use, bool isIndir);
     void AddrModeCleanupHelper(GenTreeAddrMode* addrMode, GenTree* node);
 
@@ -380,9 +328,6 @@ private:
     //  for example small enough and non-relocatable
     bool IsContainableImmed(GenTree* parentNode, GenTree* childNode);
 
-    // Return true if 'node' is a containable memory op.
-    bool IsContainableMemoryOp(GenTree* node);
-
     // Makes 'childNode' contained in the 'parentNode'
     void MakeSrcContained(GenTreePtr parentNode, GenTreePtr childNode);
 
index 1d94caa..8cf5225 100644 (file)
@@ -29,18 +29,6 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
 #include "sideeffects.h"
 #include "lower.h"
 
-//------------------------------------------------------------------------
-// isRMWRegOper: Can use the read-mofify-write memory instruction form?
-//
-// Return Value:
-//    True if the tree can use the read-modify-write memory instruction form
-//
-bool Lowering::isRMWRegOper(GenTreePtr tree)
-{
-    NYI_ARM("isRMWRegOper() is never used and tested for ARM");
-    return false;
-}
-
 #endif // _TARGET_ARM_
 
 #endif // !LEGACY_BACKEND
index 38a82c7..060a3a9 100644 (file)
@@ -29,12 +29,6 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
 #include "sideeffects.h"
 #include "lower.h"
 
-// returns true if the tree can use the read-modify-write memory instruction form
-bool Lowering::isRMWRegOper(GenTreePtr tree)
-{
-    return false;
-}
-
 #endif // _TARGET_ARM64_
 
 #endif // !LEGACY_BACKEND
index 2b5ad0c..3d07c6f 100644 (file)
@@ -1191,48 +1191,6 @@ bool Lowering::IsRMWMemOpRootedAtStoreInd(GenTreePtr tree, GenTreePtr* outIndirC
     return true;
 }
 
-//------------------------------------------------------------------------------
-// isRMWRegOper: Can this binary tree node be used in a Read-Modify-Write format
-//
-// Arguments:
-//    tree      - a binary tree node
-//
-// Return Value:
-//    Returns true if we can use the read-modify-write instruction form
-//
-// Notes:
-//    This is used to determine whether to preference the source to the destination register.
-//
-bool Lowering::isRMWRegOper(GenTreePtr tree)
-{
-    // TODO-XArch-CQ: Make this more accurate.
-    // For now, We assume that most binary operators are of the RMW form.
-    assert(tree->OperIsBinary());
-
-    if (tree->OperIsCompare() || tree->OperIs(GT_CMP))
-    {
-        return false;
-    }
-
-    switch (tree->OperGet())
-    {
-        // These Opers either support a three op form (i.e. GT_LEA), or do not read/write their first operand
-        case GT_LEA:
-        case GT_STOREIND:
-        case GT_ARR_INDEX:
-        case GT_STORE_BLK:
-        case GT_STORE_OBJ:
-            return false;
-
-        // x86/x64 does support a three op multiply when op2|op1 is a contained immediate
-        case GT_MUL:
-            return (!IsContainableImmed(tree, tree->gtOp.gtOp2) && !IsContainableImmed(tree, tree->gtOp.gtOp1));
-
-        default:
-            return true;
-    }
-}
-
 // anything is in range for AMD64
 bool Lowering::IsCallTargetInRange(void* addr)
 {
@@ -1576,11 +1534,11 @@ void Lowering::ContainCheckMul(GenTreeOp* node)
     {
         assert(node->OperGet() == GT_MUL);
 
-        if (IsContainableMemoryOp(op2) || op2->IsCnsNonZeroFltOrDbl())
+        if (m_lsra->isContainableMemoryOp(op2) || op2->IsCnsNonZeroFltOrDbl())
         {
             MakeSrcContained(node, op2);
         }
-        else if (op1->IsCnsNonZeroFltOrDbl() || (IsContainableMemoryOp(op1) && IsSafeToContainMem(node, op1)))
+        else if (op1->IsCnsNonZeroFltOrDbl() || (m_lsra->isContainableMemoryOp(op1) && IsSafeToContainMem(node, op1)))
         {
             // Since  GT_MUL is commutative, we will try to re-order operands if it is safe to
             // generate more efficient code sequence for the case of GT_MUL(op1=memOp, op2=non-memOp)
@@ -1644,7 +1602,7 @@ void Lowering::ContainCheckMul(GenTreeOp* node)
         }
 
         MakeSrcContained(node, imm); // The imm is always contained
-        if (IsContainableMemoryOp(other))
+        if (m_lsra->isContainableMemoryOp(other))
         {
             memOp = other; // memOp may be contained below
         }
@@ -1657,11 +1615,11 @@ void Lowering::ContainCheckMul(GenTreeOp* node)
     //
     if (memOp == nullptr)
     {
-        if (IsContainableMemoryOp(op2) && (op2->TypeGet() == node->TypeGet()) && IsSafeToContainMem(node, op2))
+        if (m_lsra->isContainableMemoryOp(op2) && (op2->TypeGet() == node->TypeGet()) && IsSafeToContainMem(node, op2))
         {
             memOp = op2;
         }
-        else if (IsContainableMemoryOp(op1) && (op1->TypeGet() == node->TypeGet()) && IsSafeToContainMem(node, op1))
+        else if (m_lsra->isContainableMemoryOp(op1) && (op1->TypeGet() == node->TypeGet()) && IsSafeToContainMem(node, op1))
         {
             memOp = op1;
         }
@@ -1803,7 +1761,7 @@ void Lowering::ContainCheckCast(GenTreeCast* node)
         // U8 -> R8 conversion requires that the operand be in a register.
         if (srcType != TYP_ULONG)
         {
-            if (IsContainableMemoryOp(castOp) || castOp->IsCnsNonZeroFltOrDbl())
+            if (m_lsra->isContainableMemoryOp(castOp) || castOp->IsCnsNonZeroFltOrDbl())
             {
                 MakeSrcContained(node, castOp);
             }
@@ -1878,7 +1836,7 @@ void Lowering::ContainCheckCompare(GenTreeOp* cmp)
         {
             MakeSrcContained(cmp, otherOp);
         }
-        else if (IsContainableMemoryOp(otherOp) && ((otherOp == op2) || IsSafeToContainMem(cmp, otherOp)))
+        else if (m_lsra->isContainableMemoryOp(otherOp) && ((otherOp == op2) || IsSafeToContainMem(cmp, otherOp)))
         {
             MakeSrcContained(cmp, otherOp);
         }
@@ -1901,7 +1859,7 @@ void Lowering::ContainCheckCompare(GenTreeOp* cmp)
         // we can treat the MemoryOp as contained.
         if (op1Type == op2Type)
         {
-            if (IsContainableMemoryOp(op1))
+            if (m_lsra->isContainableMemoryOp(op1))
             {
                 MakeSrcContained(cmp, op1);
             }
@@ -1951,11 +1909,11 @@ void Lowering::ContainCheckCompare(GenTreeOp* cmp)
         // Note that TEST does not have a r,rm encoding like CMP has but we can still
         // contain the second operand because the emitter maps both r,rm and rm,r to
         // the same instruction code. This avoids the need to special case TEST here.
-        if (IsContainableMemoryOp(op2))
+        if (m_lsra->isContainableMemoryOp(op2))
         {
             MakeSrcContained(cmp, op2);
         }
-        else if (IsContainableMemoryOp(op1) && IsSafeToContainMem(cmp, op1))
+        else if (m_lsra->isContainableMemoryOp(op1) && IsSafeToContainMem(cmp, op1))
         {
             MakeSrcContained(cmp, op1);
         }
@@ -2039,7 +1997,7 @@ bool Lowering::LowerRMWMemOp(GenTreeIndir* storeInd)
         // On Xarch RMW operations require the source to be an immediate or in a register.
         // Therefore, if we have previously marked the indirOpSource as contained while lowering
         // the binary node, we need to reset that now.
-        if (IsContainableMemoryOp(indirOpSource))
+        if (m_lsra->isContainableMemoryOp(indirOpSource))
         {
             indirOpSource->ClearContained();
         }
@@ -2145,7 +2103,7 @@ void Lowering::ContainCheckBinary(GenTreeOp* node)
         if (!binOpInRMW)
         {
             const unsigned operatorSize = genTypeSize(node->TypeGet());
-            if (IsContainableMemoryOp(op2) && (genTypeSize(op2->TypeGet()) == operatorSize))
+            if (m_lsra->isContainableMemoryOp(op2) && (genTypeSize(op2->TypeGet()) == operatorSize))
             {
                 directlyEncodable = true;
                 operand           = op2;
@@ -2153,7 +2111,7 @@ void Lowering::ContainCheckBinary(GenTreeOp* node)
             else if (node->OperIsCommutative())
             {
                 if (IsContainableImmed(node, op1) ||
-                    (IsContainableMemoryOp(op1) && (genTypeSize(op1->TypeGet()) == operatorSize) &&
+                    (m_lsra->isContainableMemoryOp(op1) && (genTypeSize(op1->TypeGet()) == operatorSize) &&
                      IsSafeToContainMem(node, op1)))
                 {
                     // If it is safe, we can reverse the order of operands of commutative operations for efficient
@@ -2197,7 +2155,7 @@ void Lowering::ContainCheckBoundsChk(GenTreeBoundsChk* node)
     {
         other = node->gtIndex;
     }
-    else if (IsContainableMemoryOp(node->gtIndex))
+    else if (m_lsra->isContainableMemoryOp(node->gtIndex))
     {
         other = node->gtIndex;
     }
@@ -2208,7 +2166,7 @@ void Lowering::ContainCheckBoundsChk(GenTreeBoundsChk* node)
 
     if (node->gtIndex->TypeGet() == node->gtArrLen->TypeGet())
     {
-        if (IsContainableMemoryOp(other))
+        if (m_lsra->isContainableMemoryOp(other))
         {
             MakeSrcContained(node, other);
         }
@@ -2232,7 +2190,7 @@ void Lowering::ContainCheckIntrinsic(GenTreeOp* node)
     if (node->gtIntrinsic.gtIntrinsicId == CORINFO_INTRINSIC_Sqrt)
     {
         GenTree* op1 = node->gtGetOp1();
-        if (IsContainableMemoryOp(op1) || op1->IsCnsNonZeroFltOrDbl())
+        if (m_lsra->isContainableMemoryOp(op1) || op1->IsCnsNonZeroFltOrDbl())
         {
             MakeSrcContained(node, op1);
         }
@@ -2330,7 +2288,7 @@ void Lowering::ContainCheckSIMD(GenTreeSIMD* simdNode)
             // If the index is a constant, mark it as contained.
             CheckImmedAndMakeContained(simdNode, op2);
 
-            if (IsContainableMemoryOp(op1))
+            if (m_lsra->isContainableMemoryOp(op1))
             {
                 MakeSrcContained(simdNode, op1);
                 if (op1->OperGet() == GT_IND)
@@ -2373,12 +2331,12 @@ void Lowering::ContainCheckFloatBinary(GenTreeOp* node)
     // everything is made explicit by adding casts.
     assert(op1->TypeGet() == op2->TypeGet());
 
-    if (IsContainableMemoryOp(op2) || op2->IsCnsNonZeroFltOrDbl())
+    if (m_lsra->isContainableMemoryOp(op2) || op2->IsCnsNonZeroFltOrDbl())
     {
         MakeSrcContained(node, op2);
     }
     else if (node->OperIsCommutative() &&
-             (op1->IsCnsNonZeroFltOrDbl() || (IsContainableMemoryOp(op1) && IsSafeToContainMem(node, op1))))
+             (op1->IsCnsNonZeroFltOrDbl() || (m_lsra->isContainableMemoryOp(op1) && IsSafeToContainMem(node, op1))))
     {
         // Though we have GT_ADD(op1=memOp, op2=non-memOp, we try to reorder the operands
         // as long as it is safe so that the following efficient code sequence is generated:
index 67e3658..c30f10f 100644 (file)
@@ -1939,6 +1939,44 @@ void LinearScan::identifyCandidatesExceptionDataflow()
     }
 }
 
+//------------------------------------------------------------------------
+// IsContainableMemoryOp: Checks whether this is a memory op that can be contained.
+//
+// Arguments:
+//    node        - the node of interest.
+//
+// Return value:
+//    True if this will definitely be a memory reference that could be contained.
+//
+// Notes:
+//    This differs from the isMemoryOp() method on GenTree because it checks for
+//    the case of doNotEnregister local. This won't include locals that
+//    for some other reason do not become register candidates, nor those that get
+//    spilled.
+//    Also, because we usually call this before we redo dataflow, any new lclVars
+//    introduced after the last dataflow analysis will not yet be marked lvTracked,
+//    so we don't use that.
+//
+bool LinearScan::isContainableMemoryOp(GenTree* node)
+{
+#ifdef _TARGET_XARCH_
+    if (node->isMemoryOp())
+    {
+        return true;
+    }
+    if (node->IsLocal())
+    {
+        if (!enregisterLocalVars)
+        {
+            return true;
+        }
+        LclVarDsc* varDsc = &compiler->lvaTable[node->AsLclVar()->gtLclNum];
+        return varDsc->lvDoNotEnregister;
+    }
+#endif // _TARGET_XARCH_
+    return false;
+}
+
 bool LinearScan::isRegCandidate(LclVarDsc* varDsc)
 {
     // We shouldn't be called if opt settings do not permit register variables.
@@ -4483,9 +4521,6 @@ void LinearScan::buildIntervals()
 {
     BasicBlock* block;
 
-    // start numbering at 1; 0 is the entry
-    LsraLocation currentLoc = 1;
-
     JITDUMP("\nbuildIntervals ========\n");
 
     // Now build (empty) records for all of the physical registers
@@ -4530,7 +4565,7 @@ void LinearScan::buildIntervals()
 
     // second part:
     JITDUMP("\nbuildIntervals second part ========\n");
-    currentLoc = 0;
+    LsraLocation currentLoc = 0;
 
     // Next, create ParamDef RefPositions for all the tracked parameters,
     // in order of their varIndex
@@ -10704,6 +10739,43 @@ void LinearScan::resolveEdge(BasicBlock*      fromBlock,
     }
 }
 
+//------------------------------------------------------------------------
+// GetIndirSourceCount: Get the source registers for an indirection that might be contained.
+//
+// Arguments:
+//    node      - The node of interest
+//
+// Return Value:
+//    The number of source registers used by the *parent* of this node.
+//
+int LinearScan::GetIndirSourceCount(GenTreeIndir* indirTree)
+{
+    GenTree* const addr = indirTree->gtOp1;
+    if (!addr->isContained())
+    {
+        return 1;
+    }
+    if (!addr->OperIs(GT_LEA))
+    {
+        return 0;
+    }
+
+    GenTreeAddrMode* const addrMode = addr->AsAddrMode();
+
+    unsigned srcCount = 0;
+    if ((addrMode->Base() != nullptr) && !addrMode->Base()->isContained())
+    {
+        srcCount++;
+    }
+    if (addrMode->Index() != nullptr)
+    {
+        // We never have a contained index.
+        assert(!addrMode->Index()->isContained());
+        srcCount++;
+    }
+    return srcCount;
+}
+
 void TreeNodeInfo::Initialize(LinearScan* lsra, GenTree* node, LsraLocation location)
 {
     regMaskTP dstCandidates;
index fa7da8d..767dff1 100644 (file)
@@ -672,6 +672,8 @@ public:
     // Used by Lowering when considering whether to split Longs, as well as by identifyCandidates().
     bool isRegCandidate(LclVarDsc* varDsc);
 
+    bool isContainableMemoryOp(GenTree* node);
+
 private:
     // Determine which locals are candidates for allocation
     void identifyCandidates();
@@ -1212,6 +1214,57 @@ private:
     // Set of large vector (TYP_SIMD32 on AVX) variables to consider for callee-save registers.
     VARSET_TP largeVectorCalleeSaveCandidateVars;
 #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
+
+    //-----------------------------------------------------------------------
+    // TreeNodeInfo methods
+    //-----------------------------------------------------------------------
+
+    void TreeNodeInfoInit(GenTree* stmt);
+
+    void TreeNodeInfoInitCheckByteable(GenTree* tree);
+
+    void SetDelayFree(GenTree* delayUseSrc);
+
+    void TreeNodeInfoInitSimple(GenTree* tree);
+    int GetOperandSourceCount(GenTree* node);
+    int GetIndirSourceCount(GenTreeIndir* indirTree);
+    void HandleFloatVarArgs(GenTreeCall* call, GenTree* argNode, bool* callHasFloatRegArgs);
+
+    void TreeNodeInfoInitStoreLoc(GenTree* tree);
+    void TreeNodeInfoInitReturn(GenTree* tree);
+    void TreeNodeInfoInitShiftRotate(GenTree* tree);
+    void TreeNodeInfoInitPutArgReg(GenTreeUnOp* node);
+    void TreeNodeInfoInitCall(GenTreeCall* call);
+    void TreeNodeInfoInitCmp(GenTreePtr tree);
+    void TreeNodeInfoInitStructArg(GenTreePtr structArg);
+    void TreeNodeInfoInitBlockStore(GenTreeBlk* blkNode);
+    void TreeNodeInfoInitModDiv(GenTree* tree);
+    void TreeNodeInfoInitIntrinsic(GenTree* tree);
+    void TreeNodeInfoInitStoreLoc(GenTreeLclVarCommon* tree);
+    void TreeNodeInfoInitIndir(GenTreeIndir* indirTree);
+    void TreeNodeInfoInitGCWriteBarrier(GenTree* tree);
+    void TreeNodeInfoInitCast(GenTree* tree);
+
+#ifdef _TARGET_X86_
+    bool ExcludeNonByteableRegisters(GenTree* tree);
+#endif
+
+#if defined(_TARGET_XARCH_)
+    // returns true if the tree can use the read-modify-write memory instruction form
+    bool isRMWRegOper(GenTreePtr tree);
+    void TreeNodeInfoInitMul(GenTreePtr tree);
+    void SetContainsAVXFlags(bool isFloatingPointType = true, unsigned sizeOfSIMDVector = 0);
+#endif // defined(_TARGET_XARCH_)
+
+#ifdef FEATURE_SIMD
+    void TreeNodeInfoInitSIMD(GenTreeSIMD* tree);
+#endif // FEATURE_SIMD
+
+    void TreeNodeInfoInitPutArgStk(GenTreePutArgStk* argNode);
+#ifdef _TARGET_ARM_
+    void TreeNodeInfoInitPutArgSplit(GenTreePutArgSplit* tree);
+#endif
+    void TreeNodeInfoInitLclHeap(GenTree* tree);
 };
 
 /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
index 8e313d7..2598e27 100644 (file)
@@ -38,11 +38,9 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
 // Return Value:
 //    None.
 //
-void Lowering::TreeNodeInfoInitReturn(GenTree* tree)
+void LinearScan::TreeNodeInfoInitReturn(GenTree* tree)
 {
     TreeNodeInfo* info     = &(tree->gtLsraInfo);
-    LinearScan*   l        = m_lsra;
-    Compiler*     compiler = comp;
     GenTree*      op1      = tree->gtGetOp1();
 
     assert(info->dstCount == 0);
@@ -52,8 +50,8 @@ void Lowering::TreeNodeInfoInitReturn(GenTree* tree)
         GenTree* loVal = op1->gtGetOp1();
         GenTree* hiVal = op1->gtGetOp2();
         info->srcCount = 2;
-        loVal->gtLsraInfo.setSrcCandidates(l, RBM_LNGRET_LO);
-        hiVal->gtLsraInfo.setSrcCandidates(l, RBM_LNGRET_HI);
+        loVal->gtLsraInfo.setSrcCandidates(this, RBM_LNGRET_LO);
+        hiVal->gtLsraInfo.setSrcCandidates(this, RBM_LNGRET_HI);
     }
     else
     {
@@ -98,16 +96,14 @@ void Lowering::TreeNodeInfoInitReturn(GenTree* tree)
 
         if (useCandidates != RBM_NONE)
         {
-            tree->gtOp.gtOp1->gtLsraInfo.setSrcCandidates(l, useCandidates);
+            tree->gtOp.gtOp1->gtLsraInfo.setSrcCandidates(this, useCandidates);
         }
     }
 }
 
-void Lowering::TreeNodeInfoInitLclHeap(GenTree* tree)
+void LinearScan::TreeNodeInfoInitLclHeap(GenTree* tree)
 {
     TreeNodeInfo* info     = &(tree->gtLsraInfo);
-    LinearScan*   l        = m_lsra;
-    Compiler*     compiler = comp;
 
     assert(info->dstCount == 1);
 
@@ -204,11 +200,8 @@ void Lowering::TreeNodeInfoInitLclHeap(GenTree* tree)
 //    requirements needed by LSRA to build the Interval Table (source,
 //    destination and internal [temp] register counts).
 //
-void Lowering::TreeNodeInfoInit(GenTree* tree)
+void LinearScan::TreeNodeInfoInit(GenTree* tree)
 {
-    LinearScan* l        = m_lsra;
-    Compiler*   compiler = comp;
-
     unsigned      kind         = tree->OperKind();
     TreeNodeInfo* info         = &(tree->gtLsraInfo);
     RegisterType  registerType = TypeGet(tree);
@@ -264,7 +257,7 @@ void Lowering::TreeNodeInfoInit(GenTree* tree)
                     assert(info->dstCount == 1);
                     break;
                 default:
-                    NYI_ARM("Lowering::TreeNodeInfoInit for GT_INTRINSIC");
+                    NYI_ARM("LinearScan::TreeNodeInfoInit for GT_INTRINSIC");
                     break;
             }
         }
@@ -307,15 +300,15 @@ void Lowering::TreeNodeInfoInit(GenTree* tree)
             // FloatToIntCast needs a temporary register
             if (varTypeIsFloating(castOpType) && varTypeIsIntOrI(tree))
             {
-                info->setInternalCandidates(m_lsra, RBM_ALLFLOAT);
+                info->setInternalCandidates(this, RBM_ALLFLOAT);
                 info->internalFloatCount     = 1;
                 info->isInternalRegDelayFree = true;
             }
 
-            CastInfo castInfo;
+            Lowering::CastInfo castInfo;
 
             // Get information about the cast.
-            getCastDescription(tree, &castInfo);
+            Lowering::getCastDescription(tree, &castInfo);
 
             if (castInfo.requiresOverflowCheck)
             {
@@ -506,8 +499,8 @@ void Lowering::TreeNodeInfoInit(GenTree* tree)
                 assert(tree->TypeGet() == TYP_INT);
 
                 info->srcCount = 1;
-                info->setSrcCandidates(l, RBM_INTRET);
-                tree->gtOp.gtOp1->gtLsraInfo.setSrcCandidates(l, RBM_INTRET);
+                info->setSrcCandidates(this, RBM_INTRET);
+                tree->gtOp.gtOp1->gtLsraInfo.setSrcCandidates(this, RBM_INTRET);
             }
             break;
 
@@ -639,7 +632,6 @@ void Lowering::TreeNodeInfoInit(GenTree* tree)
         case GT_STORE_BLK:
         case GT_STORE_OBJ:
         case GT_STORE_DYN_BLK:
-            LowerBlockStore(tree->AsBlk());
             TreeNodeInfoInitBlockStore(tree->AsBlk());
             break;
 
@@ -687,7 +679,7 @@ void Lowering::TreeNodeInfoInit(GenTree* tree)
         case GT_CATCH_ARG:
             info->srcCount = 0;
             assert(info->dstCount == 1);
-            info->setDstCandidates(l, RBM_EXCEPTION_OBJECT);
+            info->setDstCandidates(this, RBM_EXCEPTION_OBJECT);
             break;
 
         case GT_CLS_VAR:
index aae14ca..64695cb 100644 (file)
@@ -44,11 +44,8 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
 //    requirements needed by LSRA to build the Interval Table (source,
 //    destination and internal [temp] register counts).
 //
-void Lowering::TreeNodeInfoInit(GenTree* tree)
+void LinearScan::TreeNodeInfoInit(GenTree* tree)
 {
-    LinearScan* l        = m_lsra;
-    Compiler*   compiler = comp;
-
     unsigned      kind         = tree->OperKind();
     TreeNodeInfo* info         = &(tree->gtLsraInfo);
     RegisterType  registerType = TypeGet(tree);
@@ -152,8 +149,8 @@ void Lowering::TreeNodeInfoInit(GenTree* tree)
                 info->srcCount = 1;
                 assert(info->dstCount == 0);
 
-                info->setSrcCandidates(l, RBM_INTRET);
-                tree->gtOp.gtOp1->gtLsraInfo.setSrcCandidates(l, RBM_INTRET);
+                info->setSrcCandidates(this, RBM_INTRET);
+                tree->gtOp.gtOp1->gtLsraInfo.setSrcCandidates(this, RBM_INTRET);
             }
             break;
 
@@ -318,10 +315,9 @@ void Lowering::TreeNodeInfoInit(GenTree* tree)
 #endif // DEBUG
             // Some overflow checks need a temp reg
 
-            CastInfo castInfo;
-
+            Lowering::CastInfo castInfo;
             // Get information about the cast.
-            getCastDescription(tree, &castInfo);
+            Lowering::getCastDescription(tree, &castInfo);
 
             if (castInfo.requiresOverflowCheck)
             {
@@ -404,10 +400,10 @@ void Lowering::TreeNodeInfoInit(GenTree* tree)
         {
             // For a GT_ADDR, the child node should not be evaluated into a register
             GenTreePtr child = tree->gtOp.gtOp1;
-            assert(!l->isCandidateLocalRef(child));
-            MakeSrcContained(tree, child);
-            info->srcCount = 0;
+            assert(!isCandidateLocalRef(child));
+            assert(child->isContained());
             assert(info->dstCount == 1);
+            info->srcCount = 0;
         }
         break;
 
@@ -421,7 +417,6 @@ void Lowering::TreeNodeInfoInit(GenTree* tree)
         case GT_STORE_BLK:
         case GT_STORE_OBJ:
         case GT_STORE_DYN_BLK:
-            LowerBlockStore(tree->AsBlk());
             TreeNodeInfoInitBlockStore(tree->AsBlk());
             break;
 
@@ -651,7 +646,7 @@ void Lowering::TreeNodeInfoInit(GenTree* tree)
         case GT_CATCH_ARG:
             info->srcCount = 0;
             assert(info->dstCount == 1);
-            info->setDstCandidates(l, RBM_EXCEPTION_OBJECT);
+            info->setDstCandidates(this, RBM_EXCEPTION_OBJECT);
             break;
 
         case GT_CLS_VAR:
@@ -686,11 +681,9 @@ void Lowering::TreeNodeInfoInit(GenTree* tree)
 // Return Value:
 //    None.
 //
-void Lowering::TreeNodeInfoInitReturn(GenTree* tree)
+void LinearScan::TreeNodeInfoInitReturn(GenTree* tree)
 {
     TreeNodeInfo* info     = &(tree->gtLsraInfo);
-    LinearScan*   l        = m_lsra;
-    Compiler*     compiler = comp;
 
     GenTree*  op1           = tree->gtGetOp1();
     regMaskTP useCandidates = RBM_NONE;
@@ -735,7 +728,7 @@ void Lowering::TreeNodeInfoInitReturn(GenTree* tree)
 
     if (useCandidates != RBM_NONE)
     {
-        tree->gtOp.gtOp1->gtLsraInfo.setSrcCandidates(l, useCandidates);
+        tree->gtOp.gtOp1->gtLsraInfo.setSrcCandidates(this, useCandidates);
     }
 }
 
index 69bb4de..177d409 100644 (file)
@@ -39,7 +39,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
 //    - Setting the appropriate candidates for a store of a multi-reg call return value.
 //    - Handling of contained immediates.
 //
-void Lowering::TreeNodeInfoInitStoreLoc(GenTreeLclVarCommon* storeLoc)
+void LinearScan::TreeNodeInfoInitStoreLoc(GenTreeLclVarCommon* storeLoc)
 {
     TreeNodeInfo* info = &(storeLoc->gtLsraInfo);
     GenTree*      op1  = storeLoc->gtGetOp1();
@@ -70,8 +70,8 @@ void Lowering::TreeNodeInfoInitStoreLoc(GenTreeLclVarCommon* storeLoc)
         info->srcCount              = retTypeDesc->GetReturnRegCount();
 
         // Call node srcCandidates = Bitwise-OR(allregs(GetReturnRegType(i))) for all i=0..RetRegCount-1
-        regMaskTP srcCandidates = m_lsra->allMultiRegCallNodeRegs(call);
-        op1->gtLsraInfo.setSrcCandidates(m_lsra, srcCandidates);
+        regMaskTP srcCandidates = allMultiRegCallNodeRegs(call);
+        op1->gtLsraInfo.setSrcCandidates(this, srcCandidates);
     }
     else
     {
@@ -88,7 +88,7 @@ void Lowering::TreeNodeInfoInitStoreLoc(GenTreeLclVarCommon* storeLoc)
 // Return Value:
 //    None.
 //
-void Lowering::TreeNodeInfoInitCmp(GenTreePtr tree)
+void LinearScan::TreeNodeInfoInitCmp(GenTreePtr tree)
 {
     TreeNodeInfo* info = &(tree->gtLsraInfo);
 
@@ -103,7 +103,7 @@ void Lowering::TreeNodeInfoInitCmp(GenTreePtr tree)
     }
 }
 
-void Lowering::TreeNodeInfoInitGCWriteBarrier(GenTree* tree)
+void LinearScan::TreeNodeInfoInitGCWriteBarrier(GenTree* tree)
 {
     GenTreePtr dst  = tree;
     GenTreePtr addr = tree->gtOp.gtOp1;
@@ -137,15 +137,15 @@ void Lowering::TreeNodeInfoInitGCWriteBarrier(GenTree* tree)
     // the 'addr' goes into x14 (REG_WRITE_BARRIER_DST_BYREF)
     // the 'src'  goes into x15 (REG_WRITE_BARRIER)
     //
-    addr->gtLsraInfo.setSrcCandidates(m_lsra, RBM_WRITE_BARRIER_DST_BYREF);
-    src->gtLsraInfo.setSrcCandidates(m_lsra, RBM_WRITE_BARRIER);
+    addr->gtLsraInfo.setSrcCandidates(this, RBM_WRITE_BARRIER_DST_BYREF);
+    src->gtLsraInfo.setSrcCandidates(this, RBM_WRITE_BARRIER);
 #else
     // For the standard JIT Helper calls
     // op1 goes into REG_ARG_0 and
     // op2 goes into REG_ARG_1
     //
-    addr->gtLsraInfo.setSrcCandidates(m_lsra, RBM_ARG_0);
-    src->gtLsraInfo.setSrcCandidates(m_lsra, RBM_ARG_1);
+    addr->gtLsraInfo.setSrcCandidates(this, RBM_ARG_0);
+    src->gtLsraInfo.setSrcCandidates(this, RBM_ARG_1);
 #endif // NOGC_WRITE_BARRIERS
 
     // Both src and dst must reside in a register, which they should since we haven't set
@@ -161,7 +161,7 @@ void Lowering::TreeNodeInfoInitGCWriteBarrier(GenTree* tree)
 // Arguments:
 //    indirTree - GT_IND, GT_STOREIND, block node or GT_NULLCHECK gentree node
 //
-void Lowering::TreeNodeInfoInitIndir(GenTreeIndir* indirTree)
+void LinearScan::TreeNodeInfoInitIndir(GenTreeIndir* indirTree)
 {
     // If this is the rhs of a block copy (i.e. non-enregisterable struct),
     // it has no register requirements.
@@ -234,10 +234,9 @@ void Lowering::TreeNodeInfoInitIndir(GenTreeIndir* indirTree)
 // Return Value:
 //    None.
 //
-void Lowering::TreeNodeInfoInitShiftRotate(GenTree* tree)
+void LinearScan::TreeNodeInfoInitShiftRotate(GenTree* tree)
 {
     TreeNodeInfo* info = &(tree->gtLsraInfo);
-    LinearScan*   l    = m_lsra;
 
     GenTreePtr shiftBy = tree->gtOp.gtOp2;
     info->srcCount     = shiftBy->isContained() ? 1 : 2;
@@ -284,7 +283,7 @@ void Lowering::TreeNodeInfoInitShiftRotate(GenTree* tree)
 // Return Value:
 //    None.
 //
-void Lowering::TreeNodeInfoInitPutArgReg(GenTreeUnOp* node)
+void LinearScan::TreeNodeInfoInitPutArgReg(GenTreeUnOp* node)
 {
     assert(node != nullptr);
     assert(node->OperIsPutArgReg());
@@ -304,12 +303,12 @@ void Lowering::TreeNodeInfoInitPutArgReg(GenTreeUnOp* node)
         argMask |= genRegMask(REG_NEXT(argReg));
     }
 #endif // ARM_SOFTFP
-    node->gtLsraInfo.setDstCandidates(m_lsra, argMask);
-    node->gtLsraInfo.setSrcCandidates(m_lsra, argMask);
+    node->gtLsraInfo.setDstCandidates(this, argMask);
+    node->gtLsraInfo.setSrcCandidates(this, argMask);
 
     // To avoid redundant moves, have the argument operand computed in the
     // register in which the argument is passed to the call.
-    node->gtOp.gtOp1->gtLsraInfo.setSrcCandidates(m_lsra, m_lsra->getUseCandidates(node));
+    node->gtOp.gtOp1->gtLsraInfo.setSrcCandidates(this, getUseCandidates(node));
 }
 
 //------------------------------------------------------------------------
@@ -328,7 +327,7 @@ void Lowering::TreeNodeInfoInitPutArgReg(GenTreeUnOp* node)
 //    Since the integer register is not associated with the arg node, we will reserve it as
 //    an internal register on the call so that it is not used during the evaluation of the call node
 //    (e.g. for the target).
-void Lowering::HandleFloatVarArgs(GenTreeCall* call, GenTree* argNode, bool* callHasFloatRegArgs)
+void LinearScan::HandleFloatVarArgs(GenTreeCall* call, GenTree* argNode, bool* callHasFloatRegArgs)
 {
 #if FEATURE_VARARG
     if (call->IsVarargs() && varTypeIsFloating(argNode))
@@ -336,9 +335,9 @@ void Lowering::HandleFloatVarArgs(GenTreeCall* call, GenTree* argNode, bool* cal
         *callHasFloatRegArgs = true;
 
         regNumber argReg    = argNode->gtRegNum;
-        regNumber targetReg = comp->getCallArgIntRegister(argReg);
+        regNumber targetReg = compiler->getCallArgIntRegister(argReg);
         call->gtLsraInfo.setInternalIntCount(call->gtLsraInfo.internalIntCount + 1);
-        call->gtLsraInfo.addInternalCandidates(m_lsra, genRegMask(targetReg));
+        call->gtLsraInfo.addInternalCandidates(this, genRegMask(targetReg));
     }
 #endif // FEATURE_VARARG
 }
@@ -352,11 +351,9 @@ void Lowering::HandleFloatVarArgs(GenTreeCall* call, GenTree* argNode, bool* cal
 // Return Value:
 //    None.
 //
-void Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
+void LinearScan::TreeNodeInfoInitCall(GenTreeCall* call)
 {
     TreeNodeInfo*   info              = &(call->gtLsraInfo);
-    LinearScan*     l                 = m_lsra;
-    Compiler*       compiler          = comp;
     bool            hasMultiRegRetVal = false;
     ReturnTypeDesc* retTypeDesc       = nullptr;
 
@@ -407,7 +404,7 @@ void Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
 #ifdef _TARGET_ARM64_
             // Fast tail call - make sure that call target is always computed in IP0
             // so that epilog sequence can generate "br xip0" to achieve fast tail call.
-            ctrlExpr->gtLsraInfo.setSrcCandidates(l, genRegMask(REG_IP0));
+            ctrlExpr->gtLsraInfo.setSrcCandidates(this, genRegMask(REG_IP0));
 #endif // _TARGET_ARM64_
         }
     }
@@ -427,26 +424,26 @@ void Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
     {
         // The ARM CORINFO_HELP_INIT_PINVOKE_FRAME helper uses a custom calling convention that returns with
         // TCB in REG_PINVOKE_TCB. fgMorphCall() sets the correct argument registers.
-        info->setDstCandidates(l, RBM_PINVOKE_TCB);
+        info->setDstCandidates(this, RBM_PINVOKE_TCB);
     }
     else
 #endif // _TARGET_ARM_
         if (hasMultiRegRetVal)
     {
         assert(retTypeDesc != nullptr);
-        info->setDstCandidates(l, retTypeDesc->GetABIReturnRegs());
+        info->setDstCandidates(this, retTypeDesc->GetABIReturnRegs());
     }
     else if (varTypeIsFloating(registerType))
     {
-        info->setDstCandidates(l, RBM_FLOATRET);
+        info->setDstCandidates(this, RBM_FLOATRET);
     }
     else if (registerType == TYP_LONG)
     {
-        info->setDstCandidates(l, RBM_LNGRET);
+        info->setDstCandidates(this, RBM_LNGRET);
     }
     else
     {
-        info->setDstCandidates(l, RBM_INTRET);
+        info->setDstCandidates(this, RBM_INTRET);
     }
 
     // First, count reg args
@@ -594,7 +591,7 @@ void Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
         // Don't assign the call target to any of the argument registers because
         // we will use them to also pass floating point arguments as required
         // by Arm64 ABI.
-        ctrlExpr->gtLsraInfo.setSrcCandidates(l, l->allRegs(TYP_INT) & ~(RBM_ARG_REGS));
+        ctrlExpr->gtLsraInfo.setSrcCandidates(this, allRegs(TYP_INT) & ~(RBM_ARG_REGS));
     }
 
 #ifdef _TARGET_ARM_
@@ -619,7 +616,7 @@ void Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
 // Notes:
 //    Set the child node(s) to be contained when we have a multireg arg
 //
-void Lowering::TreeNodeInfoInitPutArgStk(GenTreePutArgStk* argNode)
+void LinearScan::TreeNodeInfoInitPutArgStk(GenTreePutArgStk* argNode)
 {
     assert(argNode->gtOper == GT_PUTARG_STK);
 
@@ -702,7 +699,7 @@ void Lowering::TreeNodeInfoInitPutArgStk(GenTreePutArgStk* argNode)
 // Notes:
 //    Set the child node(s) to be contained
 //
-void Lowering::TreeNodeInfoInitPutArgSplit(GenTreePutArgSplit* argNode)
+void LinearScan::TreeNodeInfoInitPutArgSplit(GenTreePutArgSplit* argNode)
 {
     assert(argNode->gtOper == GT_PUTARG_SPLIT);
 
@@ -717,8 +714,8 @@ void Lowering::TreeNodeInfoInitPutArgSplit(GenTreePutArgSplit* argNode)
     {
         argMask |= genRegMask((regNumber)((unsigned)argReg + i));
     }
-    argNode->gtLsraInfo.setDstCandidates(m_lsra, argMask);
-    argNode->gtLsraInfo.setSrcCandidates(m_lsra, argMask);
+    argNode->gtLsraInfo.setDstCandidates(this, argMask);
+    argNode->gtLsraInfo.setSrcCandidates(this, argMask);
 
     if (putArgChild->OperGet() == GT_FIELD_LIST)
     {
@@ -736,7 +733,7 @@ void Lowering::TreeNodeInfoInitPutArgSplit(GenTreePutArgSplit* argNode)
             if (idx < argNode->gtNumRegs)
             {
                 GenTreePtr node = fieldListPtr->gtGetOp1();
-                node->gtLsraInfo.setSrcCandidates(m_lsra, genRegMask((regNumber)((unsigned)argReg + idx)));
+                node->gtLsraInfo.setSrcCandidates(this, genRegMask((regNumber)((unsigned)argReg + idx)));
             }
             else
             {
@@ -754,7 +751,7 @@ void Lowering::TreeNodeInfoInitPutArgSplit(GenTreePutArgSplit* argNode)
         // We can use a ldr/str sequence so we need an internal register
         argNode->gtLsraInfo.internalIntCount = 1;
         regMaskTP internalMask               = RBM_ALLINT & ~argMask;
-        argNode->gtLsraInfo.setInternalCandidates(m_lsra, internalMask);
+        argNode->gtLsraInfo.setInternalCandidates(this, internalMask);
 
         GenTreePtr objChild = putArgChild->gtOp.gtOp1;
         if (objChild->OperGet() == GT_LCL_VAR_ADDR)
@@ -782,13 +779,11 @@ void Lowering::TreeNodeInfoInitPutArgSplit(GenTreePutArgSplit* argNode)
 // Return Value:
 //    None.
 //
-void Lowering::TreeNodeInfoInitBlockStore(GenTreeBlk* blkNode)
+void LinearScan::TreeNodeInfoInitBlockStore(GenTreeBlk* blkNode)
 {
     GenTree*    dstAddr  = blkNode->Addr();
     unsigned    size     = blkNode->gtBlkSize;
     GenTree*    source   = blkNode->Data();
-    LinearScan* l        = m_lsra;
-    Compiler*   compiler = comp;
 
     // Sources are dest address and initVal or source.
     // We may require an additional source or temp register for the size.
@@ -823,14 +818,14 @@ void Lowering::TreeNodeInfoInitBlockStore(GenTreeBlk* blkNode)
         {
             assert(blkNode->gtBlkOpKind == GenTreeBlk::BlkOpKindHelper);
             // The helper follows the regular ABI.
-            dstAddr->gtLsraInfo.setSrcCandidates(l, RBM_ARG_0);
+            dstAddr->gtLsraInfo.setSrcCandidates(this, RBM_ARG_0);
             assert(!initVal->isContained());
             blkNode->gtLsraInfo.srcCount++;
-            initVal->gtLsraInfo.setSrcCandidates(l, RBM_ARG_1);
+            initVal->gtLsraInfo.setSrcCandidates(this, RBM_ARG_1);
             if (size != 0)
             {
                 // Reserve a temp register for the block size argument.
-                blkNode->gtLsraInfo.setInternalCandidates(l, RBM_ARG_2);
+                blkNode->gtLsraInfo.setInternalCandidates(this, RBM_ARG_2);
                 blkNode->gtLsraInfo.internalIntCount = 1;
             }
             else
@@ -839,7 +834,7 @@ void Lowering::TreeNodeInfoInitBlockStore(GenTreeBlk* blkNode)
                 noway_assert(blkNode->gtOper == GT_STORE_DYN_BLK);
                 blkNode->gtLsraInfo.setSrcCount(3);
                 GenTree* sizeNode = blkNode->AsDynBlk()->gtDynamicSize;
-                sizeNode->gtLsraInfo.setSrcCandidates(l, RBM_ARG_2);
+                sizeNode->gtLsraInfo.setSrcCandidates(this, RBM_ARG_2);
             }
         }
     }
@@ -867,17 +862,17 @@ void Lowering::TreeNodeInfoInitBlockStore(GenTreeBlk* blkNode)
 
             // We can't use the special Write Barrier registers, so exclude them from the mask
             regMaskTP internalIntCandidates = RBM_ALLINT & ~(RBM_WRITE_BARRIER_DST_BYREF | RBM_WRITE_BARRIER_SRC_BYREF);
-            blkNode->gtLsraInfo.setInternalCandidates(l, internalIntCandidates);
+            blkNode->gtLsraInfo.setInternalCandidates(this, internalIntCandidates);
 
             // If we have a dest address we want it in RBM_WRITE_BARRIER_DST_BYREF.
-            dstAddr->gtLsraInfo.setSrcCandidates(l, RBM_WRITE_BARRIER_DST_BYREF);
+            dstAddr->gtLsraInfo.setSrcCandidates(this, RBM_WRITE_BARRIER_DST_BYREF);
 
             // If we have a source address we want it in REG_WRITE_BARRIER_SRC_BYREF.
             // Otherwise, if it is a local, codegen will put its address in REG_WRITE_BARRIER_SRC_BYREF,
             // which is killed by a StoreObj (and thus needn't be reserved).
             if (srcAddrOrFill != nullptr)
             {
-                srcAddrOrFill->gtLsraInfo.setSrcCandidates(l, RBM_WRITE_BARRIER_SRC_BYREF);
+                srcAddrOrFill->gtLsraInfo.setSrcCandidates(this, RBM_WRITE_BARRIER_SRC_BYREF);
             }
         }
         else
@@ -911,11 +906,11 @@ void Lowering::TreeNodeInfoInitBlockStore(GenTreeBlk* blkNode)
             else
             {
                 assert(blkNode->gtBlkOpKind == GenTreeBlk::BlkOpKindHelper);
-                dstAddr->gtLsraInfo.setSrcCandidates(l, RBM_ARG_0);
+                dstAddr->gtLsraInfo.setSrcCandidates(this, RBM_ARG_0);
                 // The srcAddr goes in arg1.
                 if (srcAddrOrFill != nullptr)
                 {
-                    srcAddrOrFill->gtLsraInfo.setSrcCandidates(l, RBM_ARG_1);
+                    srcAddrOrFill->gtLsraInfo.setSrcCandidates(this, RBM_ARG_1);
                 }
                 if (size != 0)
                 {
@@ -929,13 +924,13 @@ void Lowering::TreeNodeInfoInitBlockStore(GenTreeBlk* blkNode)
                     assert(blkNode->gtOper == GT_STORE_DYN_BLK);
                     blkNode->gtLsraInfo.srcCount++;
                     GenTree* blockSize = blkNode->AsDynBlk()->gtDynamicSize;
-                    blockSize->gtLsraInfo.setSrcCandidates(l, RBM_ARG_2);
+                    blockSize->gtLsraInfo.setSrcCandidates(this, RBM_ARG_2);
                 }
             }
             if (internalIntCount != 0)
             {
                 blkNode->gtLsraInfo.internalIntCount = internalIntCount;
-                blkNode->gtLsraInfo.setInternalCandidates(l, internalIntCandidates);
+                blkNode->gtLsraInfo.setInternalCandidates(this, internalIntCandidates);
             }
         }
         blkNode->gtLsraInfo.srcCount += GetOperandSourceCount(source);
@@ -951,7 +946,7 @@ void Lowering::TreeNodeInfoInitBlockStore(GenTreeBlk* blkNode)
 // Return Value:
 //    The number of source registers used by the *parent* of this node.
 //
-int Lowering::GetOperandSourceCount(GenTree* node)
+int LinearScan::GetOperandSourceCount(GenTree* node)
 {
     if (!node->isContained())
     {
index ef9eebf..8f7aca8 100644 (file)
@@ -39,7 +39,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
 //    - Setting the appropriate candidates for a store of a multi-reg call return value.
 //    - Requesting an internal register for SIMD12 stores.
 //
-void Lowering::TreeNodeInfoInitStoreLoc(GenTreeLclVarCommon* storeLoc)
+void LinearScan::TreeNodeInfoInitStoreLoc(GenTreeLclVarCommon* storeLoc)
 {
     TreeNodeInfo* info = &(storeLoc->gtLsraInfo);
     assert(info->dstCount == 0);
@@ -70,8 +70,8 @@ void Lowering::TreeNodeInfoInitStoreLoc(GenTreeLclVarCommon* storeLoc)
         info->srcCount              = retTypeDesc->GetReturnRegCount();
 
         // Call node srcCandidates = Bitwise-OR(allregs(GetReturnRegType(i))) for all i=0..RetRegCount-1
-        regMaskTP srcCandidates = m_lsra->allMultiRegCallNodeRegs(call);
-        op1->gtLsraInfo.setSrcCandidates(m_lsra, srcCandidates);
+        regMaskTP srcCandidates = allMultiRegCallNodeRegs(call);
+        op1->gtLsraInfo.setSrcCandidates(this, srcCandidates);
         return;
     }
     else
@@ -86,7 +86,7 @@ void Lowering::TreeNodeInfoInitStoreLoc(GenTreeLclVarCommon* storeLoc)
         {
             // Need an additional register to extract upper 4 bytes of Vector3.
             info->internalFloatCount = 1;
-            info->setInternalCandidates(m_lsra, m_lsra->allSIMDRegs());
+            info->setInternalCandidates(this, allSIMDRegs());
         }
         return;
     }
@@ -108,11 +108,8 @@ void Lowering::TreeNodeInfoInitStoreLoc(GenTreeLclVarCommon* storeLoc)
 //    requirements needed by LSRA to build the Interval Table (source,
 //    destination and internal [temp] register counts).
 //
-void Lowering::TreeNodeInfoInit(GenTree* tree)
+void LinearScan::TreeNodeInfoInit(GenTree* tree)
 {
-    LinearScan* l        = m_lsra;
-    Compiler*   compiler = comp;
-
     TreeNodeInfo* info = &(tree->gtLsraInfo);
 
     if (tree->isContained())
@@ -168,7 +165,7 @@ void Lowering::TreeNodeInfoInit(GenTree* tree)
                 // because both targetReg and internal reg will be in use at the same time.
                 info->internalFloatCount     = 1;
                 info->isInternalRegDelayFree = true;
-                info->setInternalCandidates(m_lsra, m_lsra->allSIMDRegs());
+                info->setInternalCandidates(this, allSIMDRegs());
             }
 #endif
             break;
@@ -237,8 +234,8 @@ void Lowering::TreeNodeInfoInit(GenTree* tree)
 
                 info->srcCount = 1;
 
-                info->setSrcCandidates(l, RBM_INTRET);
-                tree->gtOp.gtOp1->gtLsraInfo.setSrcCandidates(l, RBM_INTRET);
+                info->setSrcCandidates(this, RBM_INTRET);
+                tree->gtOp.gtOp1->gtLsraInfo.setSrcCandidates(this, RBM_INTRET);
             }
             break;
 
@@ -290,7 +287,7 @@ void Lowering::TreeNodeInfoInit(GenTree* tree)
             info->srcCount = 0;
             assert(info->dstCount == 1);
 #ifdef _TARGET_X86_
-            info->setDstCandidates(m_lsra, RBM_BYTE_REGS);
+            info->setDstCandidates(this, RBM_BYTE_REGS);
 #endif // _TARGET_X86_
             break;
 
@@ -355,7 +352,7 @@ void Lowering::TreeNodeInfoInit(GenTree* tree)
             info->srcCount = tree->gtOp.gtOp1->isContained() ? 0 : 1;
             assert(info->dstCount == 0);
             info->internalIntCount = 1;
-            info->setInternalCandidates(l, l->allRegs(TYP_INT));
+            info->setInternalCandidates(this, allRegs(TYP_INT));
             break;
 
         case GT_MOD:
@@ -415,7 +412,7 @@ void Lowering::TreeNodeInfoInit(GenTree* tree)
             if (varTypeIsFloating(tree))
             {
                 info->internalFloatCount = 1;
-                info->setInternalCandidates(l, l->internalFloatRegCandidates());
+                info->setInternalCandidates(this, internalFloatRegCandidates());
             }
             break;
 
@@ -459,10 +456,10 @@ void Lowering::TreeNodeInfoInit(GenTree* tree)
 
             // comparand is preferenced to RAX.
             // Remaining two operands can be in any reg other than RAX.
-            tree->gtCmpXchg.gtOpComparand->gtLsraInfo.setSrcCandidates(l, RBM_RAX);
-            tree->gtCmpXchg.gtOpLocation->gtLsraInfo.setSrcCandidates(l, l->allRegs(TYP_INT) & ~RBM_RAX);
-            tree->gtCmpXchg.gtOpValue->gtLsraInfo.setSrcCandidates(l, l->allRegs(TYP_INT) & ~RBM_RAX);
-            tree->gtLsraInfo.setDstCandidates(l, RBM_RAX);
+            tree->gtCmpXchg.gtOpComparand->gtLsraInfo.setSrcCandidates(this, RBM_RAX);
+            tree->gtCmpXchg.gtOpLocation->gtLsraInfo.setSrcCandidates(this, allRegs(TYP_INT) & ~RBM_RAX);
+            tree->gtCmpXchg.gtOpValue->gtLsraInfo.setSrcCandidates(this, allRegs(TYP_INT) & ~RBM_RAX);
+            tree->gtLsraInfo.setDstCandidates(this, RBM_RAX);
             break;
 
         case GT_LOCKADD:
@@ -483,7 +480,7 @@ void Lowering::TreeNodeInfoInit(GenTree* tree)
         {
             // For a GT_ADDR, the child node should not be evaluated into a register
             GenTreePtr child = tree->gtOp.gtOp1;
-            assert(!l->isCandidateLocalRef(child));
+            assert(!isCandidateLocalRef(child));
             assert(child->isContained());
             assert(info->dstCount == 1);
             info->srcCount = 0;
@@ -602,7 +599,7 @@ void Lowering::TreeNodeInfoInit(GenTree* tree)
         case GT_CATCH_ARG:
             info->srcCount = 0;
             assert(info->dstCount == 1);
-            info->setDstCandidates(l, RBM_EXCEPTION_OBJECT);
+            info->setDstCandidates(this, RBM_EXCEPTION_OBJECT);
             break;
 
 #if !FEATURE_EH_FUNCLETS
@@ -712,7 +709,7 @@ void Lowering::TreeNodeInfoInit(GenTree* tree)
                 delayUseSrc = op1;
             }
             else if ((op2 != nullptr) &&
-                     (!tree->OperIsCommutative() || (IsContainableMemoryOp(op2) && (op2->gtLsraInfo.srcCount == 0))))
+                (!tree->OperIsCommutative() || (isContainableMemoryOp(op2) && (op2->gtLsraInfo.srcCount == 0))))
             {
                 delayUseSrc = op2;
             }
@@ -730,7 +727,7 @@ void Lowering::TreeNodeInfoInit(GenTree* tree)
     assert((info->dstCount < 2) || (tree->IsMultiRegCall() && info->dstCount == MAX_RET_REG_COUNT));
 }
 
-void Lowering::SetDelayFree(GenTree* delayUseSrc)
+void LinearScan::SetDelayFree(GenTree* delayUseSrc)
 {
     // If delayUseSrc is an indirection and it doesn't produce a result, then we need to set "delayFree'
     // on the base & index, if any.
@@ -764,10 +761,9 @@ void Lowering::SetDelayFree(GenTree* delayUseSrc)
 // Return Value:
 //    None.
 //
-void Lowering::TreeNodeInfoInitCheckByteable(GenTree* tree)
+void LinearScan::TreeNodeInfoInitCheckByteable(GenTree* tree)
 {
 #ifdef _TARGET_X86_
-    LinearScan*   l    = m_lsra;
     TreeNodeInfo* info = &(tree->gtLsraInfo);
 
     // Exclude RBM_NON_BYTE_REGS from dst candidates of tree node and src candidates of operands
@@ -782,9 +778,9 @@ void Lowering::TreeNodeInfoInitCheckByteable(GenTree* tree)
         regMaskTP regMask;
         if (info->dstCount > 0)
         {
-            regMask = info->getDstCandidates(l);
+            regMask = info->getDstCandidates(this);
             assert(regMask != RBM_NONE);
-            info->setDstCandidates(l, regMask & ~RBM_NON_BYTE_REGS);
+            info->setDstCandidates(this, regMask & ~RBM_NON_BYTE_REGS);
         }
 
         if (tree->OperIsSimple())
@@ -795,9 +791,9 @@ void Lowering::TreeNodeInfoInitCheckByteable(GenTree* tree)
                 // No need to set src candidates on a contained child operand.
                 if (!op->isContained())
                 {
-                    regMask = op->gtLsraInfo.getSrcCandidates(l);
+                    regMask = op->gtLsraInfo.getSrcCandidates(this);
                     assert(regMask != RBM_NONE);
-                    op->gtLsraInfo.setSrcCandidates(l, regMask & ~RBM_NON_BYTE_REGS);
+                    op->gtLsraInfo.setSrcCandidates(this, regMask & ~RBM_NON_BYTE_REGS);
                 }
             }
 
@@ -806,9 +802,9 @@ void Lowering::TreeNodeInfoInitCheckByteable(GenTree* tree)
                 op = tree->gtOp.gtOp2;
                 if (!op->isContained())
                 {
-                    regMask = op->gtLsraInfo.getSrcCandidates(l);
+                    regMask = op->gtLsraInfo.getSrcCandidates(this);
                     assert(regMask != RBM_NONE);
-                    op->gtLsraInfo.setSrcCandidates(l, regMask & ~RBM_NON_BYTE_REGS);
+                    op->gtLsraInfo.setSrcCandidates(this, regMask & ~RBM_NON_BYTE_REGS);
                 }
             }
         }
@@ -816,6 +812,48 @@ void Lowering::TreeNodeInfoInitCheckByteable(GenTree* tree)
 #endif //_TARGET_X86_
 }
 
+//------------------------------------------------------------------------------
+// isRMWRegOper: Can this binary tree node be used in a Read-Modify-Write format
+//
+// Arguments:
+//    tree      - a binary tree node
+//
+// Return Value:
+//    Returns true if we can use the read-modify-write instruction form
+//
+// Notes:
+//    This is used to determine whether to preference the source to the destination register.
+//
+bool LinearScan::isRMWRegOper(GenTreePtr tree)
+{
+    // TODO-XArch-CQ: Make this more accurate.
+    // For now, We assume that most binary operators are of the RMW form.
+    assert(tree->OperIsBinary());
+
+    if (tree->OperIsCompare() || tree->OperIs(GT_CMP))
+    {
+        return false;
+    }
+
+    switch (tree->OperGet())
+    {
+        // These Opers either support a three op form (i.e. GT_LEA), or do not read/write their first operand
+    case GT_LEA:
+    case GT_STOREIND:
+    case GT_ARR_INDEX:
+    case GT_STORE_BLK:
+    case GT_STORE_OBJ:
+        return false;
+
+        // x86/x64 does support a three op multiply when op2|op1 is a contained immediate
+    case GT_MUL:
+        return (!tree->gtOp.gtOp2->isContainedIntOrIImmed() && !tree->gtOp.gtOp1->isContainedIntOrIImmed());
+
+    default:
+        return true;
+    }
+}
+
 //------------------------------------------------------------------------
 // TreeNodeInfoInitSimple: Sets the srcCount for all the trees
 // without special handling based on the tree node type.
@@ -826,7 +864,7 @@ void Lowering::TreeNodeInfoInitCheckByteable(GenTree* tree)
 // Return Value:
 //    None.
 //
-void Lowering::TreeNodeInfoInitSimple(GenTree* tree)
+void LinearScan::TreeNodeInfoInitSimple(GenTree* tree)
 {
     TreeNodeInfo* info = &(tree->gtLsraInfo);
     if (tree->isContained())
@@ -862,11 +900,9 @@ void Lowering::TreeNodeInfoInitSimple(GenTree* tree)
 // Return Value:
 //    None.
 //
-void Lowering::TreeNodeInfoInitReturn(GenTree* tree)
+void LinearScan::TreeNodeInfoInitReturn(GenTree* tree)
 {
     TreeNodeInfo* info     = &(tree->gtLsraInfo);
-    LinearScan*   l        = m_lsra;
-    Compiler*     compiler = comp;
     GenTree*      op1      = tree->gtGetOp1();
 
 #if !defined(_TARGET_64BIT_)
@@ -876,8 +912,8 @@ void Lowering::TreeNodeInfoInitReturn(GenTree* tree)
         GenTree* loVal = op1->gtGetOp1();
         GenTree* hiVal = op1->gtGetOp2();
         info->srcCount = 2;
-        loVal->gtLsraInfo.setSrcCandidates(l, RBM_LNGRET_LO);
-        hiVal->gtLsraInfo.setSrcCandidates(l, RBM_LNGRET_HI);
+        loVal->gtLsraInfo.setSrcCandidates(this, RBM_LNGRET_LO);
+        hiVal->gtLsraInfo.setSrcCandidates(this, RBM_LNGRET_HI);
         assert(info->dstCount == 0);
     }
     else
@@ -928,7 +964,7 @@ void Lowering::TreeNodeInfoInitReturn(GenTree* tree)
 
         if (useCandidates != RBM_NONE)
         {
-            op1->gtLsraInfo.setSrcCandidates(l, useCandidates);
+            op1->gtLsraInfo.setSrcCandidates(this, useCandidates);
         }
     }
 }
@@ -942,10 +978,9 @@ void Lowering::TreeNodeInfoInitReturn(GenTree* tree)
 // Return Value:
 //    None.
 //
-void Lowering::TreeNodeInfoInitShiftRotate(GenTree* tree)
+void LinearScan::TreeNodeInfoInitShiftRotate(GenTree* tree)
 {
     TreeNodeInfo* info = &(tree->gtLsraInfo);
-    LinearScan*   l    = m_lsra;
 
     // For shift operations, we need that the number
     // of bits moved gets stored in CL in case
@@ -957,9 +992,9 @@ void Lowering::TreeNodeInfoInitShiftRotate(GenTree* tree)
     // We will allow whatever can be encoded - hope you know what you are doing.
     if (!shiftBy->isContained())
     {
-        source->gtLsraInfo.setSrcCandidates(l, l->allRegs(TYP_INT) & ~RBM_RCX);
-        shiftBy->gtLsraInfo.setSrcCandidates(l, RBM_RCX);
-        info->setDstCandidates(l, l->allRegs(TYP_INT) & ~RBM_RCX);
+        source->gtLsraInfo.setSrcCandidates(this, allRegs(TYP_INT) & ~RBM_RCX);
+        shiftBy->gtLsraInfo.setSrcCandidates(this, RBM_RCX);
+        info->setDstCandidates(this, allRegs(TYP_INT) & ~RBM_RCX);
         if (!tree->isContained())
         {
             info->srcCount = 2;
@@ -1022,7 +1057,7 @@ void Lowering::TreeNodeInfoInitShiftRotate(GenTree* tree)
 // Return Value:
 //    None.
 //
-void Lowering::TreeNodeInfoInitPutArgReg(GenTreeUnOp* node)
+void LinearScan::TreeNodeInfoInitPutArgReg(GenTreeUnOp* node)
 {
     assert(node != nullptr);
     assert(node->OperIsPutArgReg());
@@ -1032,12 +1067,12 @@ void Lowering::TreeNodeInfoInitPutArgReg(GenTreeUnOp* node)
 
     // Set the register requirements for the node.
     const regMaskTP argMask = genRegMask(argReg);
-    node->gtLsraInfo.setDstCandidates(m_lsra, argMask);
-    node->gtLsraInfo.setSrcCandidates(m_lsra, argMask);
+    node->gtLsraInfo.setDstCandidates(this, argMask);
+    node->gtLsraInfo.setSrcCandidates(this, argMask);
 
     // To avoid redundant moves, have the argument operand computed in the
     // register in which the argument is passed to the call.
-    node->gtOp.gtOp1->gtLsraInfo.setSrcCandidates(m_lsra, m_lsra->getUseCandidates(node));
+    node->gtOp.gtOp1->gtLsraInfo.setSrcCandidates(this, getUseCandidates(node));
 }
 
 //------------------------------------------------------------------------
@@ -1056,7 +1091,7 @@ void Lowering::TreeNodeInfoInitPutArgReg(GenTreeUnOp* node)
 //    Since the integer register is not associated with the arg node, we will reserve it as
 //    an internal register on the call so that it is not used during the evaluation of the call node
 //    (e.g. for the target).
-void Lowering::HandleFloatVarArgs(GenTreeCall* call, GenTree* argNode, bool* callHasFloatRegArgs)
+void LinearScan::HandleFloatVarArgs(GenTreeCall* call, GenTree* argNode, bool* callHasFloatRegArgs)
 {
 #if FEATURE_VARARG
     if (call->IsVarargs() && varTypeIsFloating(argNode))
@@ -1064,9 +1099,9 @@ void Lowering::HandleFloatVarArgs(GenTreeCall* call, GenTree* argNode, bool* cal
         *callHasFloatRegArgs = true;
 
         regNumber argReg    = argNode->gtRegNum;
-        regNumber targetReg = comp->getCallArgIntRegister(argReg);
+        regNumber targetReg = compiler->getCallArgIntRegister(argReg);
         call->gtLsraInfo.setInternalIntCount(call->gtLsraInfo.internalIntCount + 1);
-        call->gtLsraInfo.addInternalCandidates(m_lsra, genRegMask(targetReg));
+        call->gtLsraInfo.addInternalCandidates(this, genRegMask(targetReg));
     }
 #endif // FEATURE_VARARG
 }
@@ -1080,11 +1115,9 @@ void Lowering::HandleFloatVarArgs(GenTreeCall* call, GenTree* argNode, bool* cal
 // Return Value:
 //    None.
 //
-void Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
+void LinearScan::TreeNodeInfoInitCall(GenTreeCall* call)
 {
     TreeNodeInfo*   info              = &(call->gtLsraInfo);
-    LinearScan*     l                 = m_lsra;
-    Compiler*       compiler          = comp;
     bool            hasMultiRegRetVal = false;
     ReturnTypeDesc* retTypeDesc       = nullptr;
 
@@ -1125,7 +1158,7 @@ void Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
             {
                 // Fast tail call - make sure that call target is always computed in RAX
                 // so that epilog sequence can generate "jmp rax" to achieve fast tail call.
-                ctrlExpr->gtLsraInfo.setSrcCandidates(l, RBM_RAX);
+                ctrlExpr->gtLsraInfo.setSrcCandidates(this, RBM_RAX);
             }
         }
 #ifdef _TARGET_X86_
@@ -1141,7 +1174,7 @@ void Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
             if (call->IsVirtualStub() && (call->gtCallType == CT_INDIRECT))
             {
                 assert(ctrlExpr->isIndir() && ctrlExpr->isContained());
-                ctrlExpr->gtGetOp1()->gtLsraInfo.setSrcCandidates(l, RBM_VIRTUAL_STUB_TARGET);
+                ctrlExpr->gtGetOp1()->gtLsraInfo.setSrcCandidates(this, RBM_VIRTUAL_STUB_TARGET);
             }
         }
 #endif // _TARGET_X86_
@@ -1154,7 +1187,7 @@ void Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
     // the individual specific registers will have no effect.
     if (call->IsVarargs())
     {
-        info->setInternalCandidates(l, RBM_NONE);
+        info->setInternalCandidates(this, RBM_NONE);
     }
 
     RegisterType registerType = call->TypeGet();
@@ -1168,31 +1201,31 @@ void Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
         // The x86 CORINFO_HELP_INIT_PINVOKE_FRAME helper uses a custom calling convention that returns with
         // TCB in REG_PINVOKE_TCB. AMD64/ARM64 use the standard calling convention. fgMorphCall() sets the
         // correct argument registers.
-        info->setDstCandidates(l, RBM_PINVOKE_TCB);
+        info->setDstCandidates(this, RBM_PINVOKE_TCB);
     }
     else
 #endif // _TARGET_X86_
         if (hasMultiRegRetVal)
     {
         assert(retTypeDesc != nullptr);
-        info->setDstCandidates(l, retTypeDesc->GetABIReturnRegs());
+        info->setDstCandidates(this, retTypeDesc->GetABIReturnRegs());
     }
     else if (varTypeIsFloating(registerType))
     {
 #ifdef _TARGET_X86_
         // The return value will be on the X87 stack, and we will need to move it.
-        info->setDstCandidates(l, l->allRegs(registerType));
+        info->setDstCandidates(this, allRegs(registerType));
 #else  // !_TARGET_X86_
-        info->setDstCandidates(l, RBM_FLOATRET);
+        info->setDstCandidates(this, RBM_FLOATRET);
 #endif // !_TARGET_X86_
     }
     else if (registerType == TYP_LONG)
     {
-        info->setDstCandidates(l, RBM_LNGRET);
+        info->setDstCandidates(this, RBM_LNGRET);
     }
     else
     {
-        info->setDstCandidates(l, RBM_INTRET);
+        info->setDstCandidates(this, RBM_INTRET);
     }
 
     // number of args to a call =
@@ -1215,7 +1248,7 @@ void Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
         // - a field list
         // - a put arg
         //
-        // Note that this property is statically checked by Lowering::CheckBlock.
+        // Note that this property is statically checked by LinearScan::CheckBlock.
         GenTreePtr argNode = list->Current();
 
         // Each register argument corresponds to one source.
@@ -1316,7 +1349,7 @@ void Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
         // Don't assign the call target to any of the argument registers because
         // we will use them to also pass floating point arguments as required
         // by Amd64 ABI.
-        ctrlExpr->gtLsraInfo.setSrcCandidates(l, l->allRegs(TYP_INT) & ~(RBM_ARG_REGS));
+        ctrlExpr->gtLsraInfo.setSrcCandidates(this, allRegs(TYP_INT) & ~(RBM_ARG_REGS));
     }
 #endif // !FEATURE_VARARG
 }
@@ -1330,19 +1363,17 @@ void Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
 // Return Value:
 //    None.
 //
-void Lowering::TreeNodeInfoInitBlockStore(GenTreeBlk* blkNode)
+void LinearScan::TreeNodeInfoInitBlockStore(GenTreeBlk* blkNode)
 {
     GenTree*    dstAddr  = blkNode->Addr();
     unsigned    size     = blkNode->gtBlkSize;
     GenTree*    source   = blkNode->Data();
-    LinearScan* l        = m_lsra;
-    Compiler*   compiler = comp;
 
     // Sources are dest address, initVal or source.
     // We may require an additional source or temp register for the size.
     blkNode->gtLsraInfo.srcCount = GetOperandSourceCount(dstAddr);
     assert(blkNode->gtLsraInfo.dstCount == 0);
-    blkNode->gtLsraInfo.setInternalCandidates(l, RBM_NONE);
+    blkNode->gtLsraInfo.setInternalCandidates(this, RBM_NONE);
     GenTreePtr srcAddrOrFill = nullptr;
     bool       isInitBlk     = blkNode->OperIsInitBlkOp();
 
@@ -1372,7 +1403,7 @@ void Lowering::TreeNodeInfoInitBlockStore(GenTreeBlk* blkNode)
                 {
                     // Reserve an XMM register to fill it with a pack of 16 init value constants.
                     blkNode->gtLsraInfo.internalFloatCount = 1;
-                    blkNode->gtLsraInfo.setInternalCandidates(l, l->internalFloatRegCandidates());
+                    blkNode->gtLsraInfo.setInternalCandidates(this, internalFloatRegCandidates());
                     // use XMM register to fill with constants, it's AVX instruction and set the flag
                     SetContainsAVXFlags();
                 }
@@ -1448,7 +1479,7 @@ void Lowering::TreeNodeInfoInitBlockStore(GenTreeBlk* blkNode)
                     if ((size & (XMM_REGSIZE_BYTES - 1)) != 0)
                     {
                         blkNode->gtLsraInfo.internalIntCount++;
-                        regMaskTP regMask = l->allRegs(TYP_INT);
+                        regMaskTP regMask = allRegs(TYP_INT);
 
 #ifdef _TARGET_X86_
                         if ((size & 1) != 0)
@@ -1456,7 +1487,7 @@ void Lowering::TreeNodeInfoInitBlockStore(GenTreeBlk* blkNode)
                             regMask &= ~RBM_NON_BYTE_REGS;
                         }
 #endif
-                        blkNode->gtLsraInfo.setInternalCandidates(l, regMask);
+                        blkNode->gtLsraInfo.setInternalCandidates(this, regMask);
                     }
 
                     if (size >= XMM_REGSIZE_BYTES)
@@ -1465,7 +1496,7 @@ void Lowering::TreeNodeInfoInitBlockStore(GenTreeBlk* blkNode)
                         // reserve an XMM register to use it for a
                         // series of 16-byte loads and stores.
                         blkNode->gtLsraInfo.internalFloatCount = 1;
-                        blkNode->gtLsraInfo.addInternalCandidates(l, l->internalFloatRegCandidates());
+                        blkNode->gtLsraInfo.addInternalCandidates(this, internalFloatRegCandidates());
                         // Uses XMM reg for load and store and hence check to see whether AVX instructions
                         // are used for codegen, set ContainsAVX flag
                         SetContainsAVXFlags();
@@ -1504,18 +1535,18 @@ void Lowering::TreeNodeInfoInitBlockStore(GenTreeBlk* blkNode)
 
     if (dstAddrRegMask != RBM_NONE)
     {
-        dstAddr->gtLsraInfo.setSrcCandidates(l, dstAddrRegMask);
+        dstAddr->gtLsraInfo.setSrcCandidates(this, dstAddrRegMask);
     }
     if (sourceRegMask != RBM_NONE)
     {
         if (srcAddrOrFill != nullptr)
         {
-            srcAddrOrFill->gtLsraInfo.setSrcCandidates(l, sourceRegMask);
+            srcAddrOrFill->gtLsraInfo.setSrcCandidates(this, sourceRegMask);
         }
         else
         {
             // This is a local source; we'll use a temp register for its address.
-            blkNode->gtLsraInfo.addInternalCandidates(l, sourceRegMask);
+            blkNode->gtLsraInfo.addInternalCandidates(this, sourceRegMask);
             blkNode->gtLsraInfo.internalIntCount++;
         }
     }
@@ -1524,7 +1555,7 @@ void Lowering::TreeNodeInfoInitBlockStore(GenTreeBlk* blkNode)
         if (size != 0)
         {
             // Reserve a temp register for the block size argument.
-            blkNode->gtLsraInfo.addInternalCandidates(l, blkSizeRegMask);
+            blkNode->gtLsraInfo.addInternalCandidates(this, blkSizeRegMask);
             blkNode->gtLsraInfo.internalIntCount++;
         }
         else
@@ -1533,7 +1564,7 @@ void Lowering::TreeNodeInfoInitBlockStore(GenTreeBlk* blkNode)
             assert(blkNode->gtOper == GT_STORE_DYN_BLK);
             blkNode->gtLsraInfo.setSrcCount(3);
             GenTree* blockSize = blkNode->AsDynBlk()->gtDynamicSize;
-            blockSize->gtLsraInfo.setSrcCandidates(l, blkSizeRegMask);
+            blockSize->gtLsraInfo.setSrcCandidates(this, blkSizeRegMask);
         }
     }
 }
@@ -1548,10 +1579,9 @@ void Lowering::TreeNodeInfoInitBlockStore(GenTreeBlk* blkNode)
 // Return Value:
 //    None.
 //
-void Lowering::TreeNodeInfoInitPutArgStk(GenTreePutArgStk* putArgStk)
+void LinearScan::TreeNodeInfoInitPutArgStk(GenTreePutArgStk* putArgStk)
 {
     TreeNodeInfo* info = &(putArgStk->gtLsraInfo);
-    LinearScan*   l    = m_lsra;
     info->srcCount     = 0;
     assert(info->dstCount == 0);
 
@@ -1613,12 +1643,12 @@ void Lowering::TreeNodeInfoInitPutArgStk(GenTreePutArgStk* putArgStk)
             // If any of the fields cannot be stored with an actual push, we may need a temporary
             // register to load the value before storing it to the stack location.
             info->internalIntCount = 1;
-            regMaskTP regMask      = l->allRegs(TYP_INT);
+            regMaskTP regMask      = allRegs(TYP_INT);
             if (needsByteTemp)
             {
                 regMask &= ~RBM_NON_BYTE_REGS;
             }
-            info->setInternalCandidates(l, regMask);
+            info->setInternalCandidates(this, regMask);
         }
 
 #if defined(FEATURE_SIMD)
@@ -1628,7 +1658,7 @@ void Lowering::TreeNodeInfoInitPutArgStk(GenTreePutArgStk* putArgStk)
             info->srcCount = putArgStk->gtOp1->gtLsraInfo.dstCount;
             assert(info->dstCount == 0);
             info->internalFloatCount += 1;
-            info->addInternalCandidates(l, l->allSIMDRegs());
+            info->addInternalCandidates(this, allSIMDRegs());
         }
 #endif // defined(FEATURE_SIMD)
 
@@ -1642,7 +1672,7 @@ void Lowering::TreeNodeInfoInitPutArgStk(GenTreePutArgStk* putArgStk)
     {
         info->srcCount           = putArgStk->gtOp1->gtLsraInfo.dstCount;
         info->internalFloatCount = 1;
-        info->setInternalCandidates(l, l->allSIMDRegs());
+        info->setInternalCandidates(this, allSIMDRegs());
         return;
     }
 #endif // defined(FEATURE_SIMD) && defined(_TARGET_X86_)
@@ -1676,7 +1706,7 @@ void Lowering::TreeNodeInfoInitPutArgStk(GenTreePutArgStk* putArgStk)
             if ((putArgStk->gtNumberReferenceSlots == 0) && (size & (XMM_REGSIZE_BYTES - 1)) != 0)
             {
                 info->internalIntCount++;
-                regMaskTP regMask = l->allRegs(TYP_INT);
+                regMaskTP regMask = allRegs(TYP_INT);
 
 #ifdef _TARGET_X86_
                 if ((size % 2) != 0)
@@ -1684,7 +1714,7 @@ void Lowering::TreeNodeInfoInitPutArgStk(GenTreePutArgStk* putArgStk)
                     regMask &= ~RBM_NON_BYTE_REGS;
                 }
 #endif
-                info->setInternalCandidates(l, regMask);
+                info->setInternalCandidates(this, regMask);
             }
 
 #ifdef _TARGET_X86_
@@ -1697,14 +1727,14 @@ void Lowering::TreeNodeInfoInitPutArgStk(GenTreePutArgStk* putArgStk)
                 // or larger than or equal to 8 bytes on x86, reserve an XMM register to use it for a
                 // series of 16-byte loads and stores.
                 info->internalFloatCount = 1;
-                info->addInternalCandidates(l, l->internalFloatRegCandidates());
+                info->addInternalCandidates(this, internalFloatRegCandidates());
                 SetContainsAVXFlags();
             }
             break;
 
         case GenTreePutArgStk::Kind::RepInstr:
             info->internalIntCount += 3;
-            info->setInternalCandidates(l, (RBM_RDI | RBM_RCX | RBM_RSI));
+            info->setInternalCandidates(this, (RBM_RDI | RBM_RCX | RBM_RSI));
             break;
 
         default:
@@ -1722,11 +1752,9 @@ void Lowering::TreeNodeInfoInitPutArgStk(GenTreePutArgStk* putArgStk)
 // Return Value:
 //    None.
 //
-void Lowering::TreeNodeInfoInitLclHeap(GenTree* tree)
+void LinearScan::TreeNodeInfoInitLclHeap(GenTree* tree)
 {
     TreeNodeInfo* info     = &(tree->gtLsraInfo);
-    LinearScan*   l        = m_lsra;
-    Compiler*     compiler = comp;
 
     info->srcCount = 1;
     assert(info->dstCount == 1);
@@ -1819,10 +1847,9 @@ void Lowering::TreeNodeInfoInitLclHeap(GenTree* tree)
 // Return Value:
 //    None.
 //
-void Lowering::TreeNodeInfoInitModDiv(GenTree* tree)
+void LinearScan::TreeNodeInfoInitModDiv(GenTree* tree)
 {
     TreeNodeInfo* info = &(tree->gtLsraInfo);
-    LinearScan*   l    = m_lsra;
 
     GenTree* op1 = tree->gtGetOp1();
     GenTree* op2 = tree->gtGetOp2();
@@ -1844,13 +1871,13 @@ void Lowering::TreeNodeInfoInitModDiv(GenTree* tree)
     {
         // We are interested in just the remainder.
         // RAX is used as a trashable register during computation of remainder.
-        info->setDstCandidates(l, RBM_RDX);
+        info->setDstCandidates(this, RBM_RDX);
     }
     else
     {
         // We are interested in just the quotient.
         // RDX gets used as trashable register during computation of quotient
-        info->setDstCandidates(l, RBM_RAX);
+        info->setDstCandidates(this, RBM_RAX);
     }
 
 #ifdef _TARGET_X86_
@@ -1867,21 +1894,21 @@ void Lowering::TreeNodeInfoInitModDiv(GenTree* tree)
 
         // This situation also requires an internal register.
         info->internalIntCount = 1;
-        info->setInternalCandidates(l, l->allRegs(TYP_INT));
+        info->setInternalCandidates(this, allRegs(TYP_INT));
 
-        loVal->gtLsraInfo.setSrcCandidates(l, RBM_EAX);
-        hiVal->gtLsraInfo.setSrcCandidates(l, RBM_EDX);
+        loVal->gtLsraInfo.setSrcCandidates(this, RBM_EAX);
+        hiVal->gtLsraInfo.setSrcCandidates(this, RBM_EDX);
     }
     else
 #endif
     {
         // If possible would like to have op1 in RAX to avoid a register move
-        op1->gtLsraInfo.setSrcCandidates(l, RBM_RAX);
+        op1->gtLsraInfo.setSrcCandidates(this, RBM_RAX);
     }
 
     if (!op2->isContained())
     {
-        op2->gtLsraInfo.setSrcCandidates(l, l->allRegs(TYP_INT) & ~(RBM_RAX | RBM_RDX));
+        op2->gtLsraInfo.setSrcCandidates(this, allRegs(TYP_INT) & ~(RBM_RAX | RBM_RDX));
     }
 }
 
@@ -1894,10 +1921,9 @@ void Lowering::TreeNodeInfoInitModDiv(GenTree* tree)
 // Return Value:
 //    None.
 //
-void Lowering::TreeNodeInfoInitIntrinsic(GenTree* tree)
+void LinearScan::TreeNodeInfoInitIntrinsic(GenTree* tree)
 {
     TreeNodeInfo* info = &(tree->gtLsraInfo);
-    LinearScan*   l    = m_lsra;
 
     // Both operand and its result must be of floating point type.
     GenTree* op1 = tree->gtGetOp1();
@@ -1928,7 +1954,7 @@ void Lowering::TreeNodeInfoInitIntrinsic(GenTree* tree)
             if (tree->gtIntrinsic.gtIntrinsicId == CORINFO_INTRINSIC_Abs)
             {
                 info->internalFloatCount = 1;
-                info->setInternalCandidates(l, l->internalFloatRegCandidates());
+                info->setInternalCandidates(this, internalFloatRegCandidates());
             }
             break;
 
@@ -1958,10 +1984,10 @@ void Lowering::TreeNodeInfoInitIntrinsic(GenTree* tree)
 // Return Value:
 //    None.
 
-void Lowering::TreeNodeInfoInitSIMD(GenTreeSIMD* simdTree)
+void LinearScan::TreeNodeInfoInitSIMD(GenTreeSIMD* simdTree)
 {
     TreeNodeInfo* info = &(simdTree->gtLsraInfo);
-    LinearScan*   lsra = m_lsra;
+
     // Only SIMDIntrinsicInit can be contained. Other than that,
     // only SIMDIntrinsicOpEquality and SIMDIntrinsicOpInEquality can have 0 dstCount.
     if (simdTree->isContained())
@@ -2020,7 +2046,7 @@ void Lowering::TreeNodeInfoInitSIMD(GenTreeSIMD* simdTree)
                 {
                     // need a temp
                     info->internalFloatCount = 1;
-                    info->setInternalCandidates(lsra, lsra->allSIMDRegs());
+                    info->setInternalCandidates(this, allSIMDRegs());
                     info->isInternalRegDelayFree = true;
                     info->srcCount               = 2;
                 }
@@ -2039,7 +2065,7 @@ void Lowering::TreeNodeInfoInitSIMD(GenTreeSIMD* simdTree)
 
             // Need an internal register to stitch together all the values into a single vector in a SIMD reg.
             info->internalFloatCount = 1;
-            info->setInternalCandidates(lsra, lsra->allSIMDRegs());
+            info->setInternalCandidates(this, allSIMDRegs());
         }
         break;
 
@@ -2061,7 +2087,7 @@ void Lowering::TreeNodeInfoInitSIMD(GenTreeSIMD* simdTree)
             // Must be a Vector<int> or Vector<short> Vector<sbyte>
             assert(simdTree->gtSIMDBaseType == TYP_INT || simdTree->gtSIMDBaseType == TYP_SHORT ||
                    simdTree->gtSIMDBaseType == TYP_BYTE);
-            assert(comp->getSIMDInstructionSet() >= InstructionSet_SSE3_4);
+            assert(compiler->getSIMDInstructionSet() >= InstructionSet_SSE3_4);
             info->srcCount = 1;
             break;
 
@@ -2084,10 +2110,10 @@ void Lowering::TreeNodeInfoInitSIMD(GenTreeSIMD* simdTree)
 
             // SSE2 32-bit integer multiplication requires two temp regs
             if (simdTree->gtSIMDIntrinsicID == SIMDIntrinsicMul && simdTree->gtSIMDBaseType == TYP_INT &&
-                comp->getSIMDInstructionSet() == InstructionSet_SSE2)
+                compiler->getSIMDInstructionSet() == InstructionSet_SSE2)
             {
                 info->internalFloatCount = 2;
-                info->setInternalCandidates(lsra, lsra->allSIMDRegs());
+                info->setInternalCandidates(this, allSIMDRegs());
             }
             break;
 
@@ -2133,7 +2159,7 @@ void Lowering::TreeNodeInfoInitSIMD(GenTreeSIMD* simdTree)
                 // registers reserved are guaranteed to be different from target
                 // integer register without explicitly specifying.
                 info->internalFloatCount = 1;
-                info->setInternalCandidates(lsra, lsra->allSIMDRegs());
+                info->setInternalCandidates(this, allSIMDRegs());
             }
             if (info->isNoRegCompare)
             {
@@ -2142,10 +2168,10 @@ void Lowering::TreeNodeInfoInitSIMD(GenTreeSIMD* simdTree)
                 // A target reg is not needed on AVX when comparing against Vector Zero.
                 // In all other cases we need to reserve an int type internal register if we
                 // don't have a target register on the compare.
-                if (!comp->canUseAVX() || !simdTree->gtGetOp2()->IsIntegralConstVector(0))
+                if (!compiler->canUseAVX() || !simdTree->gtGetOp2()->IsIntegralConstVector(0))
                 {
                     info->internalIntCount = 1;
-                    info->addInternalCandidates(lsra, lsra->allRegs(TYP_INT));
+                    info->addInternalCandidates(this, allRegs(TYP_INT));
                 }
             }
             break;
@@ -2166,24 +2192,24 @@ void Lowering::TreeNodeInfoInitSIMD(GenTreeSIMD* simdTree)
             // and the need for scratch registers.
             if (varTypeIsFloating(simdTree->gtSIMDBaseType))
             {
-                if ((comp->getSIMDInstructionSet() == InstructionSet_SSE2) ||
+                if ((compiler->getSIMDInstructionSet() == InstructionSet_SSE2) ||
                     (simdTree->gtOp.gtOp1->TypeGet() == TYP_SIMD32))
                 {
                     info->internalFloatCount     = 1;
                     info->isInternalRegDelayFree = true;
-                    info->setInternalCandidates(lsra, lsra->allSIMDRegs());
+                    info->setInternalCandidates(this, allSIMDRegs());
                 }
                 // else don't need scratch reg(s).
             }
             else
             {
-                assert(simdTree->gtSIMDBaseType == TYP_INT && comp->getSIMDInstructionSet() >= InstructionSet_SSE3_4);
+                assert(simdTree->gtSIMDBaseType == TYP_INT && compiler->getSIMDInstructionSet() >= InstructionSet_SSE3_4);
 
                 // No need to set isInternalRegDelayFree since targetReg is a
                 // an int type reg and guaranteed to be different from xmm/ymm
                 // regs.
-                info->internalFloatCount = comp->canUseAVX() ? 2 : 1;
-                info->setInternalCandidates(lsra, lsra->allSIMDRegs());
+                info->internalFloatCount = compiler->canUseAVX() ? 2 : 1;
+                info->setInternalCandidates(this, allSIMDRegs());
             }
             info->srcCount = 2;
             break;
@@ -2229,13 +2255,13 @@ void Lowering::TreeNodeInfoInitSIMD(GenTreeSIMD* simdTree)
                 info->srcCount++;
                 if (!op2->IsCnsIntOrI())
                 {
-                    (void)comp->getSIMDInitTempVarNum();
+                    (void)compiler->getSIMDInitTempVarNum();
                 }
                 else if (!varTypeIsFloating(simdTree->gtSIMDBaseType))
                 {
                     bool needFloatTemp;
                     if (varTypeIsSmallInt(simdTree->gtSIMDBaseType) &&
-                        (comp->getSIMDInstructionSet() == InstructionSet_AVX))
+                        (compiler->getSIMDInstructionSet() == InstructionSet_AVX))
                     {
                         int byteShiftCnt = (int)op2->AsIntCon()->gtIconVal * genTypeSize(simdTree->gtSIMDBaseType);
                         needFloatTemp    = (byteShiftCnt >= 16);
@@ -2248,7 +2274,7 @@ void Lowering::TreeNodeInfoInitSIMD(GenTreeSIMD* simdTree)
                     if (needFloatTemp)
                     {
                         info->internalFloatCount = 1;
-                        info->setInternalCandidates(lsra, lsra->allSIMDRegs());
+                        info->setInternalCandidates(this, allSIMDRegs());
                     }
                 }
             }
@@ -2262,10 +2288,10 @@ void Lowering::TreeNodeInfoInitSIMD(GenTreeSIMD* simdTree)
             info->srcCount = 2;
 
             // We need an internal integer register for SSE2 codegen
-            if (comp->getSIMDInstructionSet() == InstructionSet_SSE2)
+            if (compiler->getSIMDInstructionSet() == InstructionSet_SSE2)
             {
                 info->internalIntCount = 1;
-                info->setInternalCandidates(lsra, lsra->allRegs(TYP_INT));
+                info->setInternalCandidates(this, allRegs(TYP_INT));
             }
 
             break;
@@ -2282,7 +2308,7 @@ void Lowering::TreeNodeInfoInitSIMD(GenTreeSIMD* simdTree)
                 info->isInternalRegDelayFree = true;
                 info->internalIntCount       = 1;
                 info->internalFloatCount     = 2;
-                info->setInternalCandidates(lsra, lsra->allSIMDRegs() | lsra->allRegs(TYP_INT));
+                info->setInternalCandidates(this, allSIMDRegs() | allRegs(TYP_INT));
             }
             break;
 
@@ -2299,7 +2325,7 @@ void Lowering::TreeNodeInfoInitSIMD(GenTreeSIMD* simdTree)
                 // We need an internal register different from targetReg.
                 info->isInternalRegDelayFree = true;
                 info->internalFloatCount     = 1;
-                info->setInternalCandidates(lsra, lsra->allSIMDRegs());
+                info->setInternalCandidates(this, allSIMDRegs());
             }
             break;
 
@@ -2309,7 +2335,7 @@ void Lowering::TreeNodeInfoInitSIMD(GenTreeSIMD* simdTree)
             info->isInternalRegDelayFree = true;
             info->srcCount               = 1;
             info->internalIntCount       = 1;
-            if (comp->getSIMDInstructionSet() == InstructionSet_AVX)
+            if (compiler->getSIMDInstructionSet() == InstructionSet_AVX)
             {
                 info->internalFloatCount = 2;
             }
@@ -2317,7 +2343,7 @@ void Lowering::TreeNodeInfoInitSIMD(GenTreeSIMD* simdTree)
             {
                 info->internalFloatCount = 1;
             }
-            info->setInternalCandidates(lsra, lsra->allSIMDRegs() | lsra->allRegs(TYP_INT));
+            info->setInternalCandidates(this, allSIMDRegs() | allRegs(TYP_INT));
             break;
 
         case SIMDIntrinsicConvertToDouble:
@@ -2332,7 +2358,7 @@ void Lowering::TreeNodeInfoInitSIMD(GenTreeSIMD* simdTree)
             }
             else
 #endif
-                if ((comp->getSIMDInstructionSet() == InstructionSet_AVX) || (simdTree->gtSIMDBaseType == TYP_ULONG))
+                if ((compiler->getSIMDInstructionSet() == InstructionSet_AVX) || (simdTree->gtSIMDBaseType == TYP_ULONG))
             {
                 info->internalFloatCount = 2;
             }
@@ -2340,14 +2366,14 @@ void Lowering::TreeNodeInfoInitSIMD(GenTreeSIMD* simdTree)
             {
                 info->internalFloatCount = 1;
             }
-            info->setInternalCandidates(lsra, lsra->allSIMDRegs() | lsra->allRegs(TYP_INT));
+            info->setInternalCandidates(this, allSIMDRegs() | allRegs(TYP_INT));
             break;
 
         case SIMDIntrinsicNarrow:
             // We need an internal register different from targetReg.
             info->isInternalRegDelayFree = true;
             info->srcCount               = 2;
-            if ((comp->getSIMDInstructionSet() == InstructionSet_AVX) && (simdTree->gtSIMDBaseType != TYP_DOUBLE))
+            if ((compiler->getSIMDInstructionSet() == InstructionSet_AVX) && (simdTree->gtSIMDBaseType != TYP_DOUBLE))
             {
                 info->internalFloatCount = 2;
             }
@@ -2355,7 +2381,7 @@ void Lowering::TreeNodeInfoInitSIMD(GenTreeSIMD* simdTree)
             {
                 info->internalFloatCount = 1;
             }
-            info->setInternalCandidates(lsra, lsra->allSIMDRegs());
+            info->setInternalCandidates(this, allSIMDRegs());
             break;
 
         case SIMDIntrinsicShuffleSSE2:
@@ -2391,7 +2417,7 @@ void Lowering::TreeNodeInfoInitSIMD(GenTreeSIMD* simdTree)
 // Return Value:
 //    None.
 //
-void Lowering::TreeNodeInfoInitCast(GenTree* tree)
+void LinearScan::TreeNodeInfoInitCast(GenTree* tree)
 {
     TreeNodeInfo* info = &(tree->gtLsraInfo);
 
@@ -2435,7 +2461,7 @@ void Lowering::TreeNodeInfoInitCast(GenTree* tree)
 // Return Value:
 //    None.
 //
-void Lowering::TreeNodeInfoInitGCWriteBarrier(GenTree* tree)
+void LinearScan::TreeNodeInfoInitGCWriteBarrier(GenTree* tree)
 {
     assert(tree->OperGet() == GT_STOREIND);
 
@@ -2457,7 +2483,7 @@ void Lowering::TreeNodeInfoInitGCWriteBarrier(GenTree* tree)
 
     useOptimizedWriteBarrierHelper = true; // On x86, use the optimized write barriers by default.
 #ifdef DEBUG
-    GCInfo::WriteBarrierForm wbf = comp->codeGen->gcInfo.gcIsWriteBarrierCandidate(tree, src);
+    GCInfo::WriteBarrierForm wbf = compiler->codeGen->gcInfo.gcIsWriteBarrierCandidate(tree, src);
     if (wbf == GCInfo::WBF_NoBarrier_CheckNotHeapInDebug) // This one is always a call to a C++ method.
     {
         useOptimizedWriteBarrierHelper = false;
@@ -2469,8 +2495,8 @@ void Lowering::TreeNodeInfoInitGCWriteBarrier(GenTree* tree)
         // Special write barrier:
         // op1 (addr) goes into REG_WRITE_BARRIER (rdx) and
         // op2 (src) goes into any int register.
-        addr->gtLsraInfo.setSrcCandidates(m_lsra, RBM_WRITE_BARRIER);
-        src->gtLsraInfo.setSrcCandidates(m_lsra, RBM_WRITE_BARRIER_SRC);
+        addr->gtLsraInfo.setSrcCandidates(this, RBM_WRITE_BARRIER);
+        src->gtLsraInfo.setSrcCandidates(this, RBM_WRITE_BARRIER_SRC);
     }
 
 #else // !defined(_TARGET_X86_)
@@ -2484,8 +2510,8 @@ void Lowering::TreeNodeInfoInitGCWriteBarrier(GenTree* tree)
         // For the standard JIT Helper calls:
         // op1 (addr) goes into REG_ARG_0 and
         // op2 (src) goes into REG_ARG_1
-        addr->gtLsraInfo.setSrcCandidates(m_lsra, RBM_ARG_0);
-        src->gtLsraInfo.setSrcCandidates(m_lsra, RBM_ARG_1);
+        addr->gtLsraInfo.setSrcCandidates(this, RBM_ARG_0);
+        src->gtLsraInfo.setSrcCandidates(this, RBM_ARG_1);
     }
 
     // Both src and dst must reside in a register, which they should since we haven't set
@@ -2500,7 +2526,7 @@ void Lowering::TreeNodeInfoInitGCWriteBarrier(GenTree* tree)
 // Arguments:
 //    indirTree    -   GT_IND or GT_STOREIND gentree node
 //
-void Lowering::TreeNodeInfoInitIndir(GenTreeIndir* indirTree)
+void LinearScan::TreeNodeInfoInitIndir(GenTreeIndir* indirTree)
 {
     // If this is the rhs of a block copy (i.e. non-enregisterable struct),
     // it has no register requirements.
@@ -2545,10 +2571,10 @@ void Lowering::TreeNodeInfoInitIndir(GenTreeIndir* indirTree)
                 if (varTypeIsByte(indirTree) && !nonMemSource->isContained())
                 {
                     // If storeInd is of TYP_BYTE, set source to byteable registers.
-                    regMaskTP regMask = nonMemSource->gtLsraInfo.getSrcCandidates(m_lsra);
+                    regMaskTP regMask = nonMemSource->gtLsraInfo.getSrcCandidates(this);
                     regMask &= ~RBM_NON_BYTE_REGS;
                     assert(regMask != RBM_NONE);
-                    nonMemSource->gtLsraInfo.setSrcCandidates(m_lsra, regMask);
+                    nonMemSource->gtLsraInfo.setSrcCandidates(this, regMask);
                 }
 #endif
             }
@@ -2561,10 +2587,10 @@ void Lowering::TreeNodeInfoInitIndir(GenTreeIndir* indirTree)
         if (varTypeIsByte(indirTree) && !source->isContained())
         {
             // If storeInd is of TYP_BYTE, set source to byteable registers.
-            regMaskTP regMask = source->gtLsraInfo.getSrcCandidates(m_lsra);
+            regMaskTP regMask = source->gtLsraInfo.getSrcCandidates(this);
             regMask &= ~RBM_NON_BYTE_REGS;
             assert(regMask != RBM_NONE);
-            source->gtLsraInfo.setSrcCandidates(m_lsra, regMask);
+            source->gtLsraInfo.setSrcCandidates(this, regMask);
         }
 #endif
     }
@@ -2587,7 +2613,7 @@ void Lowering::TreeNodeInfoInitIndir(GenTreeIndir* indirTree)
             info->isInternalRegDelayFree = true;
         }
 
-        info->setInternalCandidates(m_lsra, m_lsra->allSIMDRegs());
+        info->setInternalCandidates(this, allSIMDRegs());
 
         return;
     }
@@ -2605,7 +2631,7 @@ void Lowering::TreeNodeInfoInitIndir(GenTreeIndir* indirTree)
 // Return Value:
 //    None.
 //
-void Lowering::TreeNodeInfoInitCmp(GenTreePtr tree)
+void LinearScan::TreeNodeInfoInitCmp(GenTreePtr tree)
 {
     assert(tree->OperIsCompare() || tree->OperIs(GT_CMP));
 
@@ -2626,7 +2652,7 @@ void Lowering::TreeNodeInfoInitCmp(GenTreePtr tree)
     // We always set the dst candidates, though, because if this is compare is consumed by a jump, they
     // won't be used. We might be able to use GTF_RELOP_JMP_USED to determine this case, but it's not clear
     // that flag is maintained until this location (especially for decomposed long compares).
-    info->setDstCandidates(m_lsra, RBM_BYTE_REGS);
+    info->setDstCandidates(this, RBM_BYTE_REGS);
 #endif // _TARGET_X86_
 
     GenTreePtr op1     = tree->gtOp.gtOp1;
@@ -2664,7 +2690,7 @@ void Lowering::TreeNodeInfoInitCmp(GenTreePtr tree)
 // Return Value:
 //    None.
 //
-void Lowering::TreeNodeInfoInitMul(GenTreePtr tree)
+void LinearScan::TreeNodeInfoInitMul(GenTreePtr tree)
 {
 #if defined(_TARGET_X86_)
     assert(tree->OperIs(GT_MUL, GT_MULHI, GT_MUL_LONG));
@@ -2711,19 +2737,19 @@ void Lowering::TreeNodeInfoInitMul(GenTreePtr tree)
         // Here we set RAX as the only destination candidate
         // In LSRA we set the kill set for this operation to RBM_RAX|RBM_RDX
         //
-        info->setDstCandidates(m_lsra, RBM_RAX);
+        info->setDstCandidates(this, RBM_RAX);
     }
     else if (tree->OperGet() == GT_MULHI)
     {
         // Have to use the encoding:RDX:RAX = RAX * rm. Since we only care about the
         // upper 32 bits of the result set the destination candidate to REG_RDX.
-        info->setDstCandidates(m_lsra, RBM_RDX);
+        info->setDstCandidates(this, RBM_RDX);
     }
 #if defined(_TARGET_X86_)
     else if (tree->OperGet() == GT_MUL_LONG)
     {
         // have to use the encoding:RDX:RAX = RAX * rm
-        info->setDstCandidates(m_lsra, RBM_RAX);
+        info->setDstCandidates(this, RBM_RAX);
     }
 #endif
     GenTree* containedMemOp = nullptr;
@@ -2751,18 +2777,18 @@ void Lowering::TreeNodeInfoInitMul(GenTreePtr tree)
 //    isFloatingPointType   - true if it is floating point type
 //    sizeOfSIMDVector      - SIMD Vector size
 //
-void Lowering::SetContainsAVXFlags(bool isFloatingPointType /* = true */, unsigned sizeOfSIMDVector /* = 0*/)
+void LinearScan::SetContainsAVXFlags(bool isFloatingPointType /* = true */, unsigned sizeOfSIMDVector /* = 0*/)
 {
 #ifdef FEATURE_AVX_SUPPORT
     if (isFloatingPointType)
     {
-        if (comp->getFloatingPointInstructionSet() == InstructionSet_AVX)
+        if (compiler->getFloatingPointInstructionSet() == InstructionSet_AVX)
         {
-            comp->getEmitter()->SetContainsAVX(true);
+            compiler->getEmitter()->SetContainsAVX(true);
         }
-        if (sizeOfSIMDVector == 32 && comp->getSIMDInstructionSet() == InstructionSet_AVX)
+        if (sizeOfSIMDVector == 32 && compiler->getSIMDInstructionSet() == InstructionSet_AVX)
         {
-            comp->getEmitter()->SetContains256bitAVX(true);
+            compiler->getEmitter()->SetContains256bitAVX(true);
         }
     }
 #endif
@@ -2779,7 +2805,7 @@ void Lowering::SetContainsAVXFlags(bool isFloatingPointType /* = true */, unsign
 // Return Value:
 //    If we need to exclude non-byteable registers
 //
-bool Lowering::ExcludeNonByteableRegisters(GenTree* tree)
+bool LinearScan::ExcludeNonByteableRegisters(GenTree* tree)
 {
     // Example1: GT_STOREIND(byte, addr, op2) - storeind of byte sized value from op2 into mem 'addr'
     // Storeind itself will not produce any value and hence dstCount=0. But op2 could be TYP_INT
@@ -2847,7 +2873,7 @@ bool Lowering::ExcludeNonByteableRegisters(GenTree* tree)
                 GenTree*  op1      = simdNode->gtGetOp1();
                 GenTree*  op2      = simdNode->gtGetOp2();
                 var_types baseType = simdNode->gtSIMDBaseType;
-                if (!IsContainableMemoryOp(op1) && op2->IsCnsIntOrI() && varTypeIsSmallInt(baseType))
+                if (!isContainableMemoryOp(op1) && op2->IsCnsIntOrI() && varTypeIsSmallInt(baseType))
                 {
                     bool     ZeroOrSignExtnReqd = true;
                     unsigned baseSize           = genTypeSize(baseType);
@@ -2890,7 +2916,7 @@ bool Lowering::ExcludeNonByteableRegisters(GenTree* tree)
 // Return Value:
 //    The number of source registers used by the *parent* of this node.
 //
-int Lowering::GetOperandSourceCount(GenTree* node)
+int LinearScan::GetOperandSourceCount(GenTree* node)
 {
     if (!node->isContained())
     {