These are mostly refactoring changes, in preparation for the change to the IR for block assignments.
CNS_COMMON:
{
+ // TODO-1stClassStructs: handle constant propagation to struct types.
+ if (varTypeIsStruct(lclVar))
+ {
+ goto DONE_ASSERTION;
+ }
//
// Must either be an OAK_EQUAL or an OAK_NOT_EQUAL assertion
//
{
case GT_ASG:
// VN takes care of non local assertions for assignments and data flow.
- if (optLocalAssertionProp)
+ // TODO-1stClassStructs: Enable assertion prop for struct types.
+ if (varTypeIsStruct(tree))
+ {
+ // Do nothing.
+ }
+ else if (optLocalAssertionProp)
{
assertionIndex = optCreateAssertion(tree->gtOp.gtOp1, tree->gtOp.gtOp2, OAK_EQUAL);
}
break;
case GT_IND:
+ // TODO-1stClassStructs: All indirections should be considered to create a non-null
+ // assertion, but previously, when these indirections were implicit due to a block
+ // copy or init, they were not being considered to do so.
+ if (tree->gtType == TYP_STRUCT)
+ {
+ GenTree* parent = tree->gtGetParent(nullptr);
+ if ((parent != nullptr) && (parent->gtOper == GT_ASG))
+ {
+ break;
+ }
+ }
case GT_NULLCHECK:
- // An indirection can create a non-null assertion
case GT_ARR_LENGTH:
// An array length can create a non-null assertion
assertionIndex = optCreateAssertion(tree->gtOp.gtOp1, nullptr, OAK_NOT_EQUAL);
break;
case O2K_IND_CNS_INT:
- // This is the ngen case where we have a GT_IND of an address.
+ // This is the ngen case where we have an indirection of an address.
noway_assert((impAssertion->op1.kind == O1K_EXACT_TYPE) || (impAssertion->op1.kind == O1K_SUBTYPE));
__fallthrough;
//
// Description:
// Performs value number based non-null propagation on GT_CALL and
-// GT_IND/GT_NULLCHECK. This is different from flow based assertions and helps
+// indirections. This is different from flow based assertions and helps
// unify VN based constant prop and non-null prop in a single pre-order walk.
//
void Compiler::optVnNonNullPropCurStmt(BasicBlock* block, GenTreePtr stmt, GenTreePtr tree)
}
else if (tree->OperGet() == GT_IND || tree->OperGet() == GT_NULLCHECK)
{
+ // TODO-1stClassStructs: All indirections should be handled here, but
+ // previously, when these indirections were GT_OBJ, or implicit due to a block
+ // copy or init, they were not being handled.
newTree = optAssertionProp_Ind(empty, tree, stmt);
}
if (newTree)
LclVarDsc* varDsc = lvaTable + lclNum;
#ifdef DEBUG
-#if !defined(_TARGET_AMD64_) // no addr nodes on AMD and experimenting with with encountering vars in 'random' order
+#if !defined(_TARGET_AMD64_)
+ // There are no addr nodes on ARM and we are experimenting with encountering vars in 'random' order.
// Struct fields are not traversed in a consistent order, so ignore them when
// verifying that we see the var nodes in execution order
if (ForCodeGen)
{
- if (tree->gtOper == GT_OBJ)
- {
- // The tree must have the particular form OBJ(ADDR(LCL)); no need to do the check below.
- assert(indirAddrLocal != NULL);
- }
- else if (tree->OperIsIndir())
+ if (tree->OperIsIndir())
{
assert(indirAddrLocal != NULL);
}
GenTreePtr opsPtr[3];
regMaskTP regsPtr[3];
- noway_assert(oper == GT_COPYBLK || oper == GT_INITBLK);
+ const bool isCopyBlk = (oper == GT_COPYBLK);
+ const bool isInitBlk = (oper == GT_INITBLK);
+ const bool sizeIsConst = op2->IsCnsIntOrI();
+ noway_assert(isCopyBlk || isInitBlk);
noway_assert(op1->IsList());
#ifdef _TARGET_ARM_
destPtr = op1->gtOp.gtOp1;
srcPtrOrVal = op1->gtOp.gtOp2;
noway_assert(destPtr->TypeGet() == TYP_BYREF || varTypeIsIntegral(destPtr->TypeGet()));
- noway_assert((oper == GT_COPYBLK &&
+ noway_assert((isCopyBlk &&
(srcPtrOrVal->TypeGet() == TYP_BYREF || varTypeIsIntegral(srcPtrOrVal->TypeGet()))) ||
- (oper == GT_INITBLK && varTypeIsIntegral(srcPtrOrVal->TypeGet())));
+ (isInitBlk && varTypeIsIntegral(srcPtrOrVal->TypeGet())));
noway_assert(op1 && op1->IsList());
noway_assert(destPtr && srcPtrOrVal);
#if CPU_USES_BLOCK_MOVE
- regs = (oper == GT_INITBLK) ? RBM_EAX : RBM_ESI; // What is the needReg for Val/Src
+ regs = isInitBlk ? RBM_EAX : RBM_ESI; // What is the needReg for Val/Src
/* Some special code for block moves/inits for constant sizes */
// Is this a fixed size COPYBLK?
// or a fixed size INITBLK with a constant init value?
//
- if ((op2->IsCnsIntOrI()) && ((oper == GT_COPYBLK) || (srcPtrOrVal->IsCnsIntOrI())))
+ if ((sizeIsConst) && (isCopyBlk || (srcPtrOrVal->IsCnsIntOrI())))
{
size_t length = (size_t)op2->gtIntCon.gtIconVal;
size_t initVal = 0;
instruction ins_P, ins_PR, ins_B;
- if (oper == GT_INITBLK)
+ if (isInitBlk)
{
ins_P = INS_stosp;
ins_PR = INS_r_stosp;
ins_B = INS_stosb;
/* Properly extend the init constant from a U1 to a U4 */
- initVal = 0xFF & ((unsigned)op1->gtOp.gtOp2->gtIntCon.gtIconVal);
+ initVal = 0xFF & ((unsigned)srcPtrOrVal->gtIntCon.gtIconVal);
/* If it is a non-zero value we have to replicate */
/* the byte value four times to form the DWORD */
}
else
{
- op1->gtOp.gtOp2->gtType = TYP_INT;
+ srcPtrOrVal->gtType = TYP_INT;
}
#endif // _TARGET_64BIT_
}
- op1->gtOp.gtOp2->gtIntCon.gtIconVal = initVal;
+ srcPtrOrVal->gtIntCon.gtIconVal = initVal;
}
else
{
movqLenMax = 48;
}
- if ((compiler->compCodeOpt() == Compiler::FAST_CODE) || (oper == GT_INITBLK))
+ if ((compiler->compCodeOpt() == Compiler::FAST_CODE) || isInitBlk)
{
// Be more aggressive when optimizing for speed
// InitBlk uses fewer instructions
if ((length % 8) == 0)
{
bWillUseOnlySSE2 = true;
- if (oper == GT_INITBLK && (initVal == 0))
+ if (isInitBlk && (initVal == 0))
{
bNeedEvaluateCnst = false;
noway_assert((op1->gtOp.gtOp2->OperGet() == GT_CNS_INT));
#endif // !_TARGET_64BIT_
- const bool bWillTrashRegSrc = ((oper == GT_COPYBLK) && !bWillUseOnlySSE2);
+ const bool bWillTrashRegSrc = (isCopyBlk && !bWillUseOnlySSE2);
/* Evaluate dest and src/val */
if (op1->gtFlags & GTF_REVERSE_OPS)
int blkDisp = 0;
regNumber xmmReg = REG_XMM0;
- if (oper == GT_INITBLK)
+ if (isInitBlk)
{
if (initVal)
{
JITLOG_THIS(compiler,
(LL_INFO100, "Using XMM instructions for %3d byte %s while compiling %s\n", length,
- (oper == GT_INITBLK) ? "initblk" : "copyblk", compiler->info.compFullName));
+ isInitBlk ? "initblk" : "copyblk", compiler->info.compFullName));
while (length > 7)
{
- if (oper == GT_INITBLK)
+ if (isInitBlk)
{
getEmitter()->emitIns_AR_R(INS_movq, EA_8BYTE, xmmReg, REG_EDI, blkDisp);
}
noway_assert(bNeedEvaluateCnst);
noway_assert(!bWillUseOnlySSE2);
- if (oper == GT_COPYBLK)
+ if (isCopyBlk)
{
inst_RV_IV(INS_add, REG_ESI, blkDisp, emitActualTypeSize(srcPtrOrVal->TypeGet()));
bTrashedESI = true;
}
bTrashedEDI = true;
- if (oper == GT_COPYBLK)
+ if (isCopyBlk)
bTrashedESI = true;
}
else
regTracker.rsTrackRegTrash(REG_ECX);
bTrashedEDI = true;
- if (oper == GT_COPYBLK)
+ if (isCopyBlk)
bTrashedESI = true;
}
noway_assert(bNeedEvaluateCnst);
noway_assert(length < 8);
- instGen((oper == GT_INITBLK) ? INS_stosd : INS_movsd);
+ instGen((isInitBlk) ? INS_stosd : INS_movsd);
length -= 4;
bTrashedEDI = true;
- if (oper == GT_COPYBLK)
+ if (isCopyBlk)
bTrashedESI = true;
}
}
bTrashedEDI = true;
- if (oper == GT_COPYBLK)
+ if (isCopyBlk)
bTrashedESI = true;
}
compiler->fgOrderBlockOps(tree, RBM_EDI, regs, RBM_ECX, opsPtr, regsPtr); // OUT arguments
- noway_assert(((oper == GT_INITBLK) && (regs == RBM_EAX)) || ((oper == GT_COPYBLK) && (regs == RBM_ESI)));
+ noway_assert((isInitBlk && (regs == RBM_EAX)) || (isCopyBlk && (regs == RBM_ESI)));
genComputeReg(opsPtr[0], regsPtr[0], RegSet::EXACT_REG, RegSet::KEEP_REG, (regsPtr[0] != RBM_EAX));
genComputeReg(opsPtr[1], regsPtr[1], RegSet::EXACT_REG, RegSet::KEEP_REG, (regsPtr[1] != RBM_EAX));
genComputeReg(opsPtr[2], regsPtr[2], RegSet::EXACT_REG, RegSet::KEEP_REG, (regsPtr[2] != RBM_EAX));
noway_assert((op2->gtFlags & GTF_REG_VAL) && // Size
(op2->gtRegNum == REG_ECX));
- if (oper == GT_INITBLK)
+ if (isInitBlk)
instGen(INS_r_stosb);
else
instGen(INS_r_movsb);
regTracker.rsTrackRegTrash(REG_EDI);
regTracker.rsTrackRegTrash(REG_ECX);
- if (oper == GT_COPYBLK)
+ if (isCopyBlk)
regTracker.rsTrackRegTrash(REG_ESI);
// else No need to trash EAX as it wasnt destroyed by the "rep stos"
// Is this a fixed size COPYBLK?
// or a fixed size INITBLK with a constant init value?
//
- if ((op2->OperGet() == GT_CNS_INT) && ((oper == GT_COPYBLK) || (srcPtrOrVal->OperGet() == GT_CNS_INT)))
+ if ((op2->OperGet() == GT_CNS_INT) && (isCopyBlk || (srcPtrOrVal->OperGet() == GT_CNS_INT)))
{
GenTreePtr dstOp = op1->gtOp.gtOp1;
GenTreePtr srcOp = op1->gtOp.gtOp2;
unsigned initVal = 0;
bool useLoop = false;
- if (oper == GT_INITBLK)
+ if (isInitBlk)
{
/* Properly extend the init constant from a U1 to a U4 */
initVal = 0xFF & ((unsigned)srcOp->gtIntCon.gtIconVal);
}
// Will we be using a loop to implement this INITBLK/COPYBLK?
- if (((oper == GT_COPYBLK) && (fullStoreCount >= 8)) || ((oper == GT_INITBLK) && (fullStoreCount >= 16)))
+ if ((isCopyBlk && (fullStoreCount >= 8)) || (isInitBlk && (fullStoreCount >= 16)))
{
useLoop = true;
}
emitAttr dstType = (varTypeIsGC(dstOp) && !dstIsOnStack) ? EA_BYREF : EA_PTRSIZE;
emitAttr srcType;
- if (oper == GT_COPYBLK)
+ if (isCopyBlk)
{
// Prefer a low register,but avoid one of the ones we've already grabbed
regTemp = regSet.rsGrabReg(regSet.rsNarrowHint(regSet.rsRegMaskCanGrab() & ~usedRegs, RBM_LOW_REGS));
{
for (unsigned i = 0; i < fullStoreCount; i++)
{
- if (oper == GT_COPYBLK)
+ if (isCopyBlk)
{
getEmitter()->emitIns_R_R_I(loadIns, EA_4BYTE, regTemp, regSrc, i * TARGET_POINTER_SIZE);
getEmitter()->emitIns_R_R_I(storeIns, EA_4BYTE, regTemp, regDst, i * TARGET_POINTER_SIZE);
// We need a second temp register for CopyBlk
regNumber regTemp2 = REG_STK;
- if (oper == GT_COPYBLK)
+ if (isCopyBlk)
{
// Prefer a low register, but avoid one of the ones we've already grabbed
regTemp2 =
genDefineTempLabel(loopTopBlock);
// The loop body
- if (oper == GT_COPYBLK)
+ if (isCopyBlk)
{
getEmitter()->emitIns_R_R_I(loadIns, EA_4BYTE, regTemp, regSrc, 0);
getEmitter()->emitIns_R_R_I(loadIns, EA_4BYTE, regTemp2, regSrc, TARGET_POINTER_SIZE);
if (length & TARGET_POINTER_SIZE)
{
- if (oper == GT_COPYBLK)
+ if (isCopyBlk)
{
getEmitter()->emitIns_R_R_I(loadIns, EA_4BYTE, regTemp, regSrc, 0);
getEmitter()->emitIns_R_R_I(storeIns, EA_4BYTE, regTemp, regDst, 0);
loadIns = ins_Load(TYP_USHORT); // INS_ldrh
storeIns = ins_Store(TYP_USHORT); // INS_strh
- if (oper == GT_COPYBLK)
+ if (isCopyBlk)
{
getEmitter()->emitIns_R_R_I(loadIns, EA_2BYTE, regTemp, regSrc, finalOffset);
getEmitter()->emitIns_R_R_I(storeIns, EA_2BYTE, regTemp, regDst, finalOffset);
loadIns = ins_Load(TYP_UBYTE); // INS_ldrb
storeIns = ins_Store(TYP_UBYTE); // INS_strb
- if (oper == GT_COPYBLK)
+ if (isCopyBlk)
{
getEmitter()->emitIns_R_R_I(loadIns, EA_1BYTE, regTemp, regSrc, finalOffset);
getEmitter()->emitIns_R_R_I(storeIns, EA_1BYTE, regTemp, regDst, finalOffset);
regSet.rsLockUsedReg(RBM_ARG_0 | RBM_ARG_1 | RBM_ARG_2);
- genEmitHelperCall(oper == GT_COPYBLK ? CORINFO_HELP_MEMCPY
+ genEmitHelperCall(isCopyBlk ? CORINFO_HELP_MEMCPY
/* GT_INITBLK */
: CORINFO_HELP_MEMSET,
0, EA_UNKNOWN);
genReleaseReg(opsPtr[2]);
}
- if ((oper == GT_COPYBLK) && tree->AsBlkOp()->IsVolatile())
+ if (isCopyBlk && tree->AsBlkOp()->IsVolatile())
{
// Emit a memory barrier instruction after the CopyBlk
instGen_MemoryBarrier();
break;
}
#endif // !defined(_TARGET_64BIT_)
+
+#ifdef FEATURE_SIMD
+ if (varTypeIsSIMD(targetType) && (targetReg != REG_NA) && op1->IsCnsIntOrI())
+ {
+ // This is only possible for a zero-init.
+ noway_assert(op1->IsIntegralConst(0));
+ genSIMDZero(targetType, varDsc->lvBaseType, targetReg);
+ genProduceReg(treeNode);
+ break;
+ }
+#endif // FEATURE_SIMD
genConsumeRegs(op1);
{
// stack store
emit->emitInsMov(ins_Store(targetType, compiler->isSIMDTypeLocalAligned(lclNum)),
- emitTypeSize(treeNode), treeNode);
+ emitTypeSize(targetType), treeNode);
varDsc->lvRegNum = REG_STK;
}
else
objectRefPtr = tree->gtOp.gtOp1;
propKind = optPropKind::OPK_ARRAYLEN;
}
- else if (tree->OperGet() == GT_IND)
+ else if ((tree->OperGet() == GT_IND) && !varTypeIsStruct(tree))
{
+ // TODO-1stClassStructs: The above condition should apply equally to all indirections,
+ // but previously the implicit indirections due to a struct assignment were not
+ // considered, so we are currently limiting it to non-structs to preserve existing
+ // behavior.
// optFoldNullCheck takes care of updating statement info if a null check is removed.
optFoldNullCheck(tree);
bool OperIsInitBlkOp() const;
bool OperIsDynBlkOp();
+ static
+ bool OperIsBlk(genTreeOps gtOper)
+ {
+ return (gtOper == GT_OBJ);
+ }
+
+ bool OperIsBlk() const
+ {
+ return OperIsBlk(OperGet());
+ }
+
bool OperIsPutArgStk() const
{
return gtOper == GT_PUTARG_STK;
void ChangeOper(genTreeOps oper, ValueNumberUpdate vnUpdate = CLEAR_VN);
void ChangeOperUnchecked(genTreeOps oper);
+ void ChangeType(var_types newType)
+ {
+ var_types oldType = gtType;
+ gtType = newType;
+ GenTree* node = this;
+ while (node->gtOper == GT_COMMA)
+ {
+ node = node->gtGetOp2();
+ assert(node->gtType == oldType);
+ node->gtType = newType;
+ }
+ }
+
bool IsLocal() const
{
return OperIsLocal(OperGet());
GenTreePtr expr = stmt->gtStmt.gtStmtExpr;
unsigned flags = expr->gtFlags & GTF_GLOB_EFFECT;
- /* Assignment to (unaliased) locals don't count as a side-effect as
- we handle them specially using impSpillLclRefs(). Temp locals should
- be fine too. */
+ // Assignment to (unaliased) locals don't count as a side-effect as
+ // we handle them specially using impSpillLclRefs(). Temp locals should
+ // be fine too.
+ // TODO-1stClassStructs: The check below should apply equally to struct assignments,
+ // but previously the block ops were always being marked GTF_GLOB_REF, even if
+ // the operands could not be global refs.
if ((expr->gtOper == GT_ASG) && (expr->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
- !(expr->gtOp.gtOp1->gtFlags & GTF_GLOB_REF) && !gtHasLocalsWithAddrOp(expr->gtOp.gtOp2))
+ !(expr->gtOp.gtOp1->gtFlags & GTF_GLOB_REF) && !gtHasLocalsWithAddrOp(expr->gtOp.gtOp2) &&
+ !varTypeIsStruct(expr->gtOp.gtOp1))
{
unsigned op2Flags = expr->gtOp.gtOp2->gtFlags & GTF_GLOB_EFFECT;
assert(flags == (op2Flags | GTF_ASG));
GenTreePtr destAddr;
- if (dest->gtOper == GT_IND || dest->gtOper == GT_OBJ)
+ if (dest->gtOper == GT_IND || dest->OperIsBlk())
{
destAddr = dest->gtOp.gtOp1;
}
}
else
{
- // In general a OBJ is an IND and could raise an exception
+ // In general a OBJ is an indirection and could raise an exception.
structObj->gtFlags |= GTF_EXCEPT;
}
return (structObj);
//
// At this point we are ready to commit to implementing the InitializeArray
- // instrinsic using a struct assignment. Pop the arguments from the stack and
+ // intrinsic using a struct assignment. Pop the arguments from the stack and
// return the struct assignment node.
//
impPopStack();
impPopStack();
- GenTreePtr dst;
+ const unsigned blkSize = size.Value();
+ GenTreePtr dst;
if (isMDArray)
{
dst = gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewIndexRef(elementType, arrayLocalNode, gtNewIconNode(0)));
}
+ GenTreePtr srcAddr = gtNewIconHandleNode((size_t) initData, GTF_ICON_STATIC_HDL);
return gtNewBlkOpNode(GT_COPYBLK,
- dst, // dst
- gtNewIconHandleNode((size_t)initData, GTF_ICON_STATIC_HDL), // src
- gtNewIconNode(size.Value()), // size
+ dst, // dst
+ srcAddr, // src
+ gtNewIconNode(blkSize), // size
false);
}
assert(!"Unexpected fieldAccessor");
}
- /* Create the member assignment, unless we have a struct */
+ // Create the member assignment, unless we have a struct.
+ // TODO-1stClassStructs: This could be limited to TYP_STRUCT, to avoid extra copies.
bool deferStructAssign = varTypeIsStruct(lclTyp);
if (!deferStructAssign)
return;
}
- // if this is a child of an indir, let the parent handle it
- if (data->parentStack->Index(1)->OperIsIndir())
+ // If this is a child of an indir, and it is not a block op, let the parent handle it.
+ GenTree* parent = data->parentStack->Index(1);
+ if (parent->OperIsIndir() && !varTypeIsStruct(parent))
{
return;
}
// if there is a chain of adds, only look at the topmost one
- if (data->parentStack->Index(1)->gtOper == GT_ADD)
+ if (parent->gtOper == GT_ADD)
{
return;
}
arg->gtFlags |= GTF_DONT_CSE;
+#else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+ // Can this type be passed in a single register?
+ // If so, the following call will return the corresponding primitive type.
+ // Otherwise, it will return TYP_UNKNOWN and we will pass by reference.
+
+ bool passedInRegisters = false;
+ structPassingKind kind;
+ CORINFO_CLASS_HANDLE clsHnd = varDsc->lvVerTypeInfo.GetClassHandle();
+ var_types structBaseType = getPrimitiveTypeForStruct(lvaLclExactSize(tmpVarNum), clsHnd);
+
+ if (structBaseType != TYP_UNKNOWN)
+ {
+ passedInRegisters = true;
+ type = structBaseType;
+ }
+#endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
+
// If it is passed in registers, don't get the address of the var. Make it a
// field instead. It will be loaded in registers with putarg_reg tree in lower.
if (passedInRegisters)
{
arg = gtNewOperNode(GT_ADDR, type, arg);
addrNode = arg;
- }
-
-#else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
-
- unsigned structSize = lvaLclExactSize(tmpVarNum);
- switch (structSize)
- {
- case 1:
- type = TYP_BYTE;
- break;
- case 2:
- type = TYP_SHORT;
- break;
-#if defined(_TARGET_AMD64_)
- case 4:
- type = TYP_INT;
- break;
-#elif defined(_TARGET_ARM64_)
- case 3:
- case 4:
- type = TYP_INT;
- break;
- case 5:
- case 6:
- case 7:
- type = TYP_I_IMPL;
- break;
-#endif // defined (_TARGET_ARM64_)
- case 8:
- switch (*lvaGetGcLayout(tmpVarNum))
- {
- case TYPE_GC_NONE:
- type = TYP_I_IMPL;
- break;
- case TYPE_GC_REF:
- type = TYP_REF;
- break;
- case TYPE_GC_BYREF:
- type = TYP_BYREF;
- break;
- default:
- unreached();
- }
- break;
- default:
- break;
- }
-
- // If we didn't change the type of the struct, it means
- // its structure doesn't support to be passed directly through a
- // register, so we need to pass a pointer to the destination where
- // where we copied the struct to.
- if (type == varDsc->TypeGet())
- {
#if FEATURE_MULTIREG_ARGS
#ifdef _TARGET_ARM64_
assert(varTypeIsStruct(type));
{
// ToDo-ARM64: Consider using: arg->ChangeOper(GT_LCL_FLD);
// as that is how FEATURE_UNIX_AMD64_STRUCT_PASSING works.
- // Create a GT_OBJ for the argument
- // This will be passed by value in two registers
- arg = gtNewOperNode(GT_ADDR, TYP_BYREF, arg);
- addrNode = arg;
+ // We will create a GT_OBJ for the argument below.
+ // This will be passed by value in two registers.
+ assert(addrNode != nullptr);
// Create an Obj of the temp to use it as a call argument.
arg = gtNewObjNode(lvaGetStruct(tmpVarNum), arg);
}
- else
#endif // _TARGET_ARM64_
#endif // FEATURE_MULTIREG_ARGS
- {
- arg = gtNewOperNode(GT_ADDR, TYP_I_IMPL, arg);
- addrNode = arg;
- }
- }
- else // type was changed from a struct to a scalar type
- {
- arg->ChangeOper(GT_LCL_FLD);
- arg->gtType = type;
}
-#endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
#else // not (_TARGET_AMD64_ or _TARGET_ARM64_)
// other targets, we pass the struct by value
assert(varTypeIsStruct(type));
- arg = gtNewOperNode(GT_ADDR, TYP_BYREF, arg);
- addrNode = arg;
+ addrNode = gtNewOperNode(GT_ADDR, TYP_BYREF, arg);
// Get a new Obj node temp to use it as a call argument.
// gtNewObjNode will set the GTF_EXCEPT flag if this is not a local stack object.
- arg = gtNewObjNode(lvaGetStruct(tmpVarNum), arg);
+ arg = gtNewObjNode(lvaGetStruct(tmpVarNum), addrNode);
#endif // not (_TARGET_AMD64_ or _TARGET_ARM64_)
argObj = argObj->gtOp.gtOp2;
}
+ // TODO-1stClassStructs: An OBJ node should not be required for lclVars.
if (argObj->gtOper != GT_OBJ)
{
BADCODE("illegal argument tree in fgMorphArgs");
GenTreePtr dest = gtNewLclvNode(tmp, lvaTable[tmp].lvType);
dest->gtFlags |= (GTF_DONT_CSE | GTF_VAR_DEF); // This is a def of the local, "entire" by construction.
dest = gtNewOperNode(GT_ADDR, TYP_BYREF, dest);
+
+ // TODO-Cleanup: This probably shouldn't be done here because arg morphing is done prior
+ // to ref counting of the lclVars.
lvaTable[tmp].incRefCnts(compCurBB->getBBWeight(this), this);
GenTreePtr src;
{
genTreeOps oper = tree->gtOper;
- // Only xxBlk opcodes are possible
+ // This must be a block assignment.
noway_assert(tree->OperIsBlkOp());
-
- GenTreePtr dest = tree->gtOp.gtOp1->gtOp.gtOp1; // Dest address
- GenTreePtr src = tree->gtOp.gtOp1->gtOp.gtOp2; // Src
- GenTreePtr blkShape = tree->gtOp.gtOp2; // [size/clsHnd]
- bool volatil = tree->AsBlkOp()->IsVolatile();
+ var_types asgType = TYP_STRUCT;
+
+ GenTreePtr dest = tree->gtOp.gtOp1->gtOp.gtOp1; // Dest address
+ GenTreePtr src = tree->gtOp.gtOp1->gtOp.gtOp2; // Src
+ GenTreePtr blkShape = tree->gtOp.gtOp2; // [size/clsHnd]
+ bool volatil = tree->AsBlkOp()->IsVolatile();
+ unsigned destVarNum = BAD_VAR_NUM;
+ LclVarDsc* destVarDsc = nullptr;
GenTreePtr result;
- GenTreePtr lclVarTree;
+ GenTreePtr lclVarTree = nullptr;
+ bool isCopyBlock = tree->OperIsCopyBlkOp();
+ bool isInitBlock = !isCopyBlock;
// The dest must be an address
noway_assert(genActualType(dest->gtType) == TYP_I_IMPL || dest->gtType == TYP_BYREF);
// For COPYBLK the src must be an address
- noway_assert(!tree->OperIsCopyBlkOp() || (genActualType(src->gtType) == TYP_I_IMPL || src->gtType == TYP_BYREF));
+ noway_assert(!isCopyBlock || (genActualType( src->gtType) == TYP_I_IMPL || src->gtType == TYP_BYREF));
// For INITBLK the src must be a TYP_INT
noway_assert(oper != GT_INITBLK || (genActualType(src->gtType) == TYP_INT));
CORINFO_CLASS_HANDLE clsHnd;
size_t size;
- var_types type = TYP_UNDEF;
if (blkShape->gtOper != GT_CNS_INT)
{
- goto GENERAL_BLKOP;
+ return nullptr;
}
#ifdef FEATURE_SIMD
// routine will prevent rationalizer logic and we might end up with
// GT_ADDR(GT_SIMD) node post rationalization, leading to a noway assert
// in codegen.
+ // TODO-1stClassStructs: This is here to preserve old behavior.
+ // It should be eliminated.
if (src->OperGet() == GT_ADDR && src->gtGetOp1()->OperGet() == GT_SIMD)
{
- goto GENERAL_BLKOP;
+ return nullptr;
}
#endif
if (!blkShape->IsIconHandle())
{
- clsHnd = nullptr;
+ clsHnd = NO_CLASS_HANDLE;
size = blkShape->gtIntCon.gtIconVal;
-
- /* A four byte BLK_COPY can be treated as an integer asignment */
- if (size == 4)
- {
- type = TYP_INT;
- }
-#ifdef _TARGET_64BIT_
- if (size == 8)
- {
- type = TYP_LONG;
- }
-#endif
}
else
{
// The EE currently does not allow this, but we may change. Lets assert it
// just to be safe.
noway_assert(info.compCompHnd->getClassSize(clsHnd) == size);
-
- if (size == REGSIZE_BYTES)
- {
- BYTE gcPtr;
- info.compCompHnd->getClassGClayout(clsHnd, &gcPtr);
- type = getJitGCType(gcPtr);
- }
}
//
// [dest] [src]
//
- switch (size)
+ if (size == REGSIZE_BYTES)
{
- case 1:
- type = TYP_BYTE;
- goto ONE_SIMPLE_ASG;
- case 2:
- type = TYP_SHORT;
- goto ONE_SIMPLE_ASG;
+ if (clsHnd == NO_CLASS_HANDLE)
+ {
+ // A register-sized cpblk can be treated as an integer asignment.
+ asgType = TYP_I_IMPL;
+ }
+ else
+ {
+ BYTE gcPtr;
+ info.compCompHnd->getClassGClayout(clsHnd, &gcPtr);
+ asgType = getJitGCType(gcPtr);
+ }
+ }
+ else
+ {
+ switch (size)
+ {
+ case 1:
+ asgType = TYP_BYTE;
+ break;
+ case 2:
+ asgType = TYP_SHORT;
+ break;
#ifdef _TARGET_64BIT_
- case 4:
- type = TYP_INT;
- goto ONE_SIMPLE_ASG;
+ case 4:
+ asgType = TYP_INT;
+ break;
#endif // _TARGET_64BIT_
+ }
+ }
- case REGSIZE_BYTES:
- noway_assert(type != TYP_UNDEF);
-
- ONE_SIMPLE_ASG:
-
- noway_assert(size <= REGSIZE_BYTES);
-
- // For INITBLK, a non constant source is not going to allow us to fiddle
- // with the bits to create a single assigment.
+ // TODO-1stClassStructs: Change this to asgType != TYP_STRUCT.
+ if (!varTypeIsStruct(asgType))
+ {
+ // For initBlk, a non constant source is not going to allow us to fiddle
+ // with the bits to create a single assigment.
+ noway_assert(size <= REGSIZE_BYTES);
- if ((oper == GT_INITBLK) && (src->gtOper != GT_CNS_INT))
- {
- goto GENERAL_BLKOP;
- }
+ if (isInitBlock && (src->gtOper != GT_CNS_INT))
+ {
+ return nullptr;
+ }
- if (impIsAddressInLocal(dest, &lclVarTree))
- {
+ bool needsIndirection = true;
+ if (impIsAddressInLocal(dest, &lclVarTree))
+ {
+ destVarNum = lclVarTree->AsLclVarCommon()->gtLclNum;
+ destVarDsc = &(lvaTable[destVarNum]);
+ }
+ if (destVarDsc != nullptr)
+ {
#if LOCAL_ASSERTION_PROP
- // Kill everything about dest
- if (optLocalAssertionProp)
+ // Kill everything about dest
+ if (optLocalAssertionProp)
+ {
+ if (optAssertionCount > 0)
{
- if (optAssertionCount > 0)
- {
- fgKillDependentAssertions(lclVarTree->gtLclVarCommon.gtLclNum DEBUGARG(tree));
- }
+ fgKillDependentAssertions(destVarNum DEBUGARG(tree));
}
+ }
#endif // LOCAL_ASSERTION_PROP
- unsigned lclNum = lclVarTree->gtLclVarCommon.gtLclNum;
- // A previous incarnation of this code also required the local not to be
- // address-exposed(=taken). That seems orthogonal to the decision of whether
- // to do field-wise assignments: being address-exposed will cause it to be
- // "dependently" promoted, so it will be in the right memory location. One possible
- // further reason for avoiding field-wise stores is that the struct might have alignment-induced
- // holes, whose contents could be meaningful in unsafe code. If we decide that's a valid
- // concern, then we could compromise, and say that address-exposed + fields do not completely cover the
- // memory of the struct prevent field-wise assignments. Same situation exists for the "src" decision.
- if (varTypeIsStruct(lclVarTree) && (lvaTable[lclNum].lvPromoted || lclVarIsSIMDType(lclNum)))
- {
-
- // Let fgMorphInitBlock handle it. (Since we'll need to do field-var-wise assignments.)
- goto GENERAL_BLKOP;
- }
- else if (!varTypeIsFloating(lclVarTree->TypeGet()) &&
- size == genTypeSize(var_types(lvaTable[lclNum].lvType)))
- {
- // Use the dest local var directly.
- dest = lclVarTree;
- type = lvaTable[lclNum].lvType; // Make the type used in the GT_IND node match
-
- // If the block operation had been a write to a local var of a small int type,
- // of the exact size of the small int type, and the var is NormalizeOnStore,
- // we would have labeled it GTF_VAR_USEASG, because the block operation wouldn't
- // have done that normalization. If we're now making it into an assignment,
- // the NormalizeOnStore will work, and it can be a full def.
- if (lvaTable[lclNum].lvNormalizeOnStore())
- {
- dest->gtFlags &= (~GTF_VAR_USEASG);
- }
+ // A previous incarnation of this code also required the local not to be
+ // address-exposed(=taken). That seems orthogonal to the decision of whether
+ // to do field-wise assignments: being address-exposed will cause it to be
+ // "dependently" promoted, so it will be in the right memory location. One possible
+ // further reason for avoiding field-wise stores is that the struct might have alignment-induced
+ // holes, whose contents could be meaningful in unsafe code. If we decide that's a valid
+ // concern, then we could compromise, and say that address-exposed + fields do not completely cover the
+ // memory of the struct prevent field-wise assignments. Same situation exists for the "src" decision.
+ if (varTypeIsStruct(lclVarTree) && (destVarDsc->lvPromoted || destVarDsc->lvIsSIMDType()))
+ {
+ // Let fgMorphInitBlock handle it. (Since we'll need to do field-var-wise assignments.)
+ return nullptr;
+ }
+ else if (!varTypeIsFloating(lclVarTree->TypeGet()) &&
+ size == genTypeSize(var_types(lvaTable[destVarNum].lvType)))
+ {
+ // Use the dest local var directly.
+ dest = lclVarTree;
+ asgType = destVarDsc->lvType; // Make the type used in the GT_IND node match
+ needsIndirection = false;
- goto _DoneDest;
- }
- else
+ // If the block operation had been a write to a local var of a small int type,
+ // of the exact size of the small int type, and the var is NormalizeOnStore,
+ // we would have labeled it GTF_VAR_USEASG, because the block operation wouldn't
+ // have done that normalization. If we're now making it into an assignment,
+ // the NormalizeOnStore will work, and it can be a full def.
+ if (destVarDsc->lvNormalizeOnStore())
{
- // Could be a non-promoted struct, or a floating point type local, or
- // an int subject to a partial write. Don't enregister.
- lvaSetVarDoNotEnregister(lclNum DEBUGARG(DNER_LocalField));
- // Fall through to indirect the dest node.
+ dest->gtFlags &= (~GTF_VAR_USEASG);
}
+ }
+ else
+ {
+ // Could be a non-promoted struct, or a floating point type local, or
+ // an int subject to a partial write. Don't enregister.
+ lvaSetVarDoNotEnregister(destVarNum DEBUGARG(DNER_LocalField));
+
// Mark the local var tree as a definition point of the local.
lclVarTree->gtFlags |= GTF_VAR_DEF;
- if (size < lvaTable[lclNum].lvExactSize)
+ if (size < destVarDsc->lvExactSize)
{ // If it's not a full-width assignment....
lclVarTree->gtFlags |= GTF_VAR_USEASG;
}
}
+ }
+ if (needsIndirection)
+ {
// Check to ensure we are not creating a reducible *(& ... )
if (dest->gtOper == GT_ADDR)
{
GenTreePtr addrOp = dest->gtOp.gtOp1;
// Ignore reinterpret casts between int/gc
- if ((addrOp->TypeGet() == type) ||
+ if ((addrOp->TypeGet() == asgType) ||
(varTypeIsIntegralOrI(addrOp) && (genTypeSize(addrOp->TypeGet()) == size)))
{
dest = addrOp;
- type = addrOp->TypeGet();
- goto _DoneDest;
+ asgType = addrOp->TypeGet();
+ needsIndirection = false;
}
}
+ }
+ if (needsIndirection)
+ {
// Indirect the dest node.
- dest = gtNewOperNode(GT_IND, type, dest);
+ dest = gtNewOperNode(GT_IND, asgType, dest);
// If we have no information about the destination, we have to assume it could
// live anywhere (not just in the GC heap).
{
dest->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
}
+ }
- _DoneDest:;
-
- if (volatil)
- {
- dest->gtFlags |= GTF_DONT_CSE;
- }
+ if (volatil)
+ {
+ dest->gtFlags |= GTF_DONT_CSE;
+ }
- if (tree->OperIsCopyBlkOp())
+ LclVarDsc* srcVarDsc = nullptr;
+ if (isCopyBlock)
+ {
+ if (impIsAddressInLocal(src, &lclVarTree))
{
- if (impIsAddressInLocal(src, &lclVarTree))
+ srcVarDsc = &(lvaTable[lclVarTree->AsLclVarCommon()->gtLclNum]);
+ if (varTypeIsStruct(lclVarTree) && (srcVarDsc->lvPromoted || srcVarDsc->lvIsSIMDType()))
+ {
+ // Let fgMorphCopyBlock handle it.
+ return nullptr;
+ }
+ else if (!varTypeIsFloating(lclVarTree->TypeGet()) &&
+ size == genTypeSize(genActualType(lclVarTree->TypeGet())))
+ {
+ // Use the src local var directly.
+ src = lclVarTree;
+ }
+ else
{
- unsigned lclNum = lclVarTree->gtLclVarCommon.gtLclNum;
- if (varTypeIsStruct(lclVarTree) && (lvaTable[lclNum].lvPromoted || lclVarIsSIMDType(lclNum)))
- {
- // Let fgMorphCopyBlock handle it.
- goto GENERAL_BLKOP;
- }
- else if (!varTypeIsFloating(lclVarTree->TypeGet()) &&
- size == genTypeSize(genActualType(lclVarTree->TypeGet())))
- {
- /* Use the src local var directly */
- src = lclVarTree;
- goto _DoneSrc;
- }
- else
- {
#ifndef LEGACY_BACKEND
- // The source argument of the copyblk can potentially
- // be accessed only through indir(addr(lclVar))
- // or indir(lclVarAddr) in rational form and liveness
- // won't account for these uses. That said,
- // we have to mark this local as address exposed so
- // we don't delete it as a dead store later on.
- unsigned lclVarNum = lclVarTree->gtLclVarCommon.gtLclNum;
- lvaTable[lclVarNum].lvAddrExposed = true;
- lvaSetVarDoNotEnregister(lclVarNum DEBUGARG(DNER_AddrExposed));
+ // The source argument of the copyblk can potentially
+ // be accessed only through indir(addr(lclVar))
+ // or indir(lclVarAddr) in rational form and liveness
+ // won't account for these uses. That said,
+ // we have to mark this local as address exposed so
+ // we don't delete it as a dead store later on.
+ unsigned lclVarNum = lclVarTree->gtLclVarCommon.gtLclNum;
+ lvaTable[lclVarNum].lvAddrExposed = true;
+ lvaSetVarDoNotEnregister(lclVarNum DEBUGARG(DNER_AddrExposed));
#else // LEGACY_BACKEND
- lvaSetVarDoNotEnregister(lclVarTree->gtLclVarCommon.gtLclNum DEBUGARG(DNER_LocalField));
+ lvaSetVarDoNotEnregister(lclVarTree->gtLclVarCommon.gtLclNum DEBUGARG(DNER_LocalField));
#endif // LEGACY_BACKEND
-
- // Fall through to indirect the src node.
- }
}
+ }
+ if (src != lclVarTree)
+ {
// Indirect the src node.
- src = gtNewOperNode(GT_IND, type, src);
+ src = gtNewOperNode(GT_IND, asgType, src);
// If we have no information about the src, we have to assume it could
// live anywhere (not just in the GC heap).
{
src->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
}
-
- _DoneSrc:;
-
- if (volatil)
- {
- src->gtFlags |= GTF_DONT_CSE;
- }
}
- else // (oper == GT_INITBLK)
+
+ if (volatil)
{
- // This will mutate the integer constant, in place, to be the correct
- // value for the type were are using in the assignment.
- src->AsIntCon()->FixupInitBlkValue(type);
+ src->gtFlags |= GTF_DONT_CSE;
}
+ }
+ else // (oper == GT_INITBLK)
+ {
+ // This will mutate the integer constant, in place, to be the correct
+ // value for the type we are using in the assignment.
+ src->AsIntCon()->FixupInitBlkValue(asgType);
+ }
- /* Create the assignment node */
+ // Create the assignment node.
- result = gtNewAssignNode(dest, src);
- result->gtType = type;
+ result = gtNewAssignNode(dest, src);
+ result->gtType = asgType;
- return result;
+ return result;
}
-GENERAL_BLKOP:
-
return nullptr;
}
}
else
{
- // Change X into *X
- // First, save the original type, then change the tree type to TYP_BYREF (otherwise we
- // will get an assert when we try to clone the lclVar node because the lclVar is now TYP_BYREF
- // and the types have to match). The reason we clone the lclVar is that we don't pass a
- // possible-modified tree back to the caller, so we modify the original lclVar node in-place
- // to the GT_IND.
+ // Change X into *X.
var_types structType = tree->gtType;
- lclVarTree = gtClone(tree);
- // Now, set the types appropriately.
- lclVarTree->gtType = TYP_BYREF;
- tree->gtType = structType;
- // Now, "insert" the GT_IND by changing the oper of the original node and setting its op1.
- tree->SetOper(GT_IND);
- tree->gtOp.gtOp1 = lclVarTree;
+ tree->gtType = TYP_BYREF;
+ tree = gtNewOperNode(GT_IND, structType, tree);
// TODO-CQ: If the VM ever stops violating the ABI and passing heap references
// we could remove TGTANYWHERE
tree->gtFlags = ((tree->gtFlags & GTF_COMMON_MASK) | GTF_IND_TGTANYWHERE);
CORINFO_CLASS_HANDLE fldClass = info.compCompHnd->getFieldClass(tree->gtField.gtFldHnd);
return width <= info.compCompHnd->getClassSize(fldClass);
}
+ else if (tree->OperGet() == GT_INDEX)
+ {
+ return width <= tree->gtIndex.gtIndElemSize;
+ }
else
{
return false;
case GT_LCL_FLD:
case GT_STORE_LCL_FLD:
+ // TODO-1stClassStructs: Eliminate this.
FixupIfSIMDLocal(comp, tree->AsLclVarCommon());
break;
GenTreeSIMD* simdTree = (*ppTree)->AsSIMD();
simdSize = simdTree->gtSIMDSize;
var_types simdType = comp->getSIMDTypeForSize(simdSize);
- // TODO-Cleanup: This is no-longer required once we plumb SIMD types thru front-end
+ // TODO-1stClassStructs: This should be handled more generally for enregistered or promoted
+ // structs that are passed or returned in a different register type than their enregistered
+ // type(s).
if (simdTree->gtType == TYP_I_IMPL && simdTree->gtSIMDSize == TARGET_POINTER_SIZE)
{
// This happens when it is consumed by a GT_RET_EXPR.
// Return Value:
// None.
//
-// TODO-Cleanup: Once SIMD types are plumbed through the frontend, this will no longer
-// be required.
+// TODO-1stClassStructs: This is now only here to preserve existing behavior. It is actually not
+// desirable to change the lclFld nodes back to TYP_SIMD (it will cause them to be loaded
+// into a vector register, and then moved to an int register).
void Rationalizer::FixupIfSIMDLocal(Compiler* comp, GenTreeLclVarCommon* tree)
{
{
GenTreePtr lhs = tree->gtOp.gtOp1->gtEffectiveVal(/*commaOnly*/ true);
GenTreePtr trueLhs = lhs->gtEffectiveVal(/*commaOnly*/ true);
- if (trueLhs->OperGet() == GT_IND)
+ if (trueLhs->OperIsIndir())
{
trueLhs->gtFlags |= GTF_IND_ASG_LHS;
}
ValueNum excVN,
FieldSeqNode* fldSeq)
{
- assert(tree == nullptr || tree->OperGet() == GT_IND);
+ assert(tree == nullptr || tree->OperIsIndir());
// The VN inputs are required to be non-exceptional values.
assert(arrVN == vnStore->VNNormVal(arrVN));
void Compiler::fgValueNumberBlockAssignment(GenTreePtr tree, bool evalAsgLhsInd)
{
+ GenTree* lhsAddr = tree->gtGetOp1()->gtGetOp1();
+ GenTree* rhsAddrOrVal = tree->gtGetOp1()->gtGetOp2();
+
#ifdef DEBUG
// Sometimes we query the heap ssa map, and need a dummy location for the ignored result.
unsigned heapSsaNum;
#endif
- if (tree->OperGet() == GT_INITBLK)
+ if (tree->OperIsInitBlkOp())
{
GenTreeLclVarCommon* lclVarTree;
bool isEntire;
unsigned lclDefSsaNum = GetSsaNumForLocalVarDef(lclVarTree);
ValueNum initBlkVN = ValueNumStore::NoVN;
- GenTreePtr initConst = tree->gtGetOp1()->gtGetOp2();
+ GenTreePtr initConst = rhsAddrOrVal;
if (isEntire && initConst->OperGet() == GT_CNS_INT)
{
unsigned initVal = 0xFF & (unsigned)initConst->AsIntConCommon()->IconValue();
// Should not have been recorded as updating the heap.
assert(!GetHeapSsaMap()->Lookup(tree, &heapSsaNum));
- unsigned lhsLclNum = lclVarTree->GetLclNum();
- LclVarDsc* rhsVarDsc = &lvaTable[lhsLclNum];
+ unsigned lhsLclNum = lclVarTree->GetLclNum();
// If it's excluded from SSA, don't need to do anything.
if (!fgExcludeFromSsa(lhsLclNum))
{
unsigned lclDefSsaNum = GetSsaNumForLocalVarDef(lclVarTree);
// For addr-of-local expressions, lib/cons shouldn't matter.
- assert(tree->gtOp.gtOp1->gtOp.gtOp1->gtVNPair.BothEqual());
- ValueNum lhsAddrVN = tree->gtOp.gtOp1->gtOp.gtOp1->GetVN(VNK_Liberal);
+ assert(lhsAddr->gtVNPair.BothEqual());
+ ValueNum lhsAddrVN = lhsAddr->GetVN(VNK_Liberal);
// Unpack the PtrToLoc value number of the address.
assert(vnStore->IsVNFunc(lhsAddrVN));
FieldSeqNode* lhsFldSeq = vnStore->FieldSeqVNToFieldSeq(lhsAddrFuncApp.m_args[1]);
// Now we need to get the proper RHS.
- GenTreePtr srcAddr = tree->gtOp.gtOp1->gtOp.gtOp2;
+ GenTreePtr srcAddr = rhsAddrOrVal;
VNFuncApp srcAddrFuncApp;
GenTreeLclVarCommon* rhsLclVarTree = nullptr;
FieldSeqNode* rhsFldSeq = nullptr;
var_types typ = tree->TypeGet();
if (GenTree::OperIsConst(oper))
{
- fgValueNumberTreeConst(tree);
+ // If this is a struct assignment, with a constant rhs, it is an initBlk, and it is not
+ // really useful to value number the constant.
+ if (!varTypeIsStruct(tree))
+ {
+ fgValueNumberTreeConst(tree);
+ }
}
else if (GenTree::OperIsLeaf(oper))
{
// But we didn't know that the parent was an op=. We do now, so go back and evaluate it.
// (We actually check if the effective val is the IND. We will have evaluated any non-last
// args of an LHS comma already -- including their heap effects.)
- GenTreePtr lhsVal = lhs->gtEffectiveVal(/*commaOnly*/ true);
- if ((lhsVal->OperGet() == GT_IND) || (lhsVal->OperGet() == GT_CLS_VAR))
+ GenTreePtr lhsVal = lhs->gtEffectiveVal(/*commaOnly*/true);
+ if (lhsVal->OperIsIndir() || (lhsVal->OperGet() == GT_CLS_VAR))
{
fgValueNumberTree(lhsVal, /*evalAsgLhsInd*/ true);
}
{
// They just cancel, so fetch the ValueNumber from the op1 of the GT_IND node.
//
- tree->gtVNPair = arg->gtOp.gtOp1->gtVNPair;
+ GenTree* addr = arg->AsIndir()->Addr();
+ tree->gtVNPair = addr->gtVNPair;
- // For the CSE phase mark the GT_IND child as GTF_DONT_CSE
+ // For the CSE phase mark the address as GTF_DONT_CSE
// because it will end up with the same value number as tree (the GT_ADDR).
- arg->gtOp.gtOp1->gtFlags |= GTF_DONT_CSE;
+ addr->gtFlags |= GTF_DONT_CSE;
}
}
else
{
// So far, we handle cases in which the address is a ptr-to-local, or if it's
// a pointer to an object field.
- GenTreePtr addr = tree->gtOp.gtOp1;
+ GenTreePtr addr = tree->AsIndir()->Addr();
GenTreeLclVarCommon* lclVarTree = nullptr;
FieldSeqNode* fldSeq1 = nullptr;
FieldSeqNode* fldSeq2 = nullptr;
ValueNumPair op2vnp;
ValueNumPair op2Xvnp = ValueNumStore::VNPForEmptyExcSet();
- if ((tree->gtOp.gtOp2->OperGet() == GT_IND) && (tree->gtOp.gtOp2->gtFlags & GTF_IND_ASG_LHS))
+ GenTree* op2 = tree->gtGetOp2();
+ if (op2->OperIsIndir() && ((op2->gtFlags & GTF_IND_ASG_LHS) != 0))
{
// If op2 represents the lhs of an assignment then we give a VNForVoid for the lhs
op2vnp = ValueNumPair(ValueNumStore::VNForVoid(), ValueNumStore::VNForVoid());
}
- else if ((tree->gtOp.gtOp2->OperGet() == GT_CLS_VAR) &&
- (tree->gtOp.gtOp2->gtFlags & GTF_CLS_VAR_ASG_LHS))
+ else if ((op2->OperGet() == GT_CLS_VAR) && (op2->gtFlags & GTF_CLS_VAR_ASG_LHS))
{
// If op2 represents the lhs of an assignment then we give a VNForVoid for the lhs
op2vnp = ValueNumPair(ValueNumStore::VNForVoid(), ValueNumStore::VNForVoid());
}
else
{
- vnStore->VNPUnpackExc(tree->gtOp.gtOp2->gtVNPair, &op2vnp, &op2Xvnp);
+ vnStore->VNPUnpackExc(op2->gtVNPair, &op2vnp, &op2Xvnp);
}
tree->gtVNPair = vnStore->VNPWithExc(op2vnp, vnStore->VNPExcSetUnion(op1Xvnp, op2Xvnp));