break;
}
-#if FEATURE_PREVENT_BAD_BYREFS
-
// Don't split up a byref calculation and create a new byref. E.g.,
// [byref]+ (ref, [int]+ (int, int)) => [byref]+ ([byref]+ (ref, int), int).
// Doing this transformation could create a situation where the first
break;
}
-#endif // FEATURE_PREVENT_BAD_BYREFS
-
/* Change "(x op (y op z))" to "(x op y) op z" */
/* ie. "(op1 op (ad1 op ad2))" to "(op1 op ad1) op ad2" */
addr = index;
}
-#if FEATURE_PREVENT_BAD_BYREFS
-
// Be careful to only create the byref pointer when the full index expression is added to the array reference.
// We don't want to create a partial byref address expression that doesn't include the full index offset:
// a byref must point within the containing object. It is dangerous (especially when optimizations come into
// play) to create a "partial" byref that doesn't point exactly to the correct object; there is risk that
// the partial byref will not point within the object, and thus not get updated correctly during a GC.
// This is mostly a risk in fully-interruptible code regions.
+ //
+ // NOTE: the tree form created here is pattern matched by optExtractArrIndex(), so changes here must
+ // be reflected there.
/* Add the first element's offset */
addr = gtNewOperNode(GT_ADD, TYP_BYREF, arrRef, addr);
-#else // !FEATURE_PREVENT_BAD_BYREFS
-
- /* Add the object ref to the element's offset */
-
- addr = gtNewOperNode(GT_ADD, TYP_BYREF, arrRef, addr);
-
- /* Add the first element's offset */
-
- GenTree* cns = gtNewIconNode(elemOffs, TYP_I_IMPL);
-
- addr = gtNewOperNode(GT_ADD, TYP_BYREF, addr, cns);
-
-#endif // !FEATURE_PREVENT_BAD_BYREFS
-
#if SMALL_TREE_NODES
assert((tree->gtDebugFlags & GTF_DEBUG_NODE_LARGE) || GenTree::s_gtNodeSizes[GT_IND] == TREE_NODE_SZ_SMALL);
#endif
GenTree* cnsOff = nullptr;
if (addr->OperGet() == GT_ADD)
{
-
-#if FEATURE_PREVENT_BAD_BYREFS
-
assert(addr->TypeGet() == TYP_BYREF);
assert(addr->gtOp.gtOp1->TypeGet() == TYP_REF);
// Label any constant array index contributions with #ConstantIndex and any LclVars with GTF_VAR_ARR_INDEX
addr->LabelIndex(this);
}
-
-#else // !FEATURE_PREVENT_BAD_BYREFS
-
- if (addr->gtOp.gtOp2->gtOper == GT_CNS_INT)
- {
- cnsOff = addr->gtOp.gtOp2;
- addr = addr->gtOp.gtOp1;
- }
-
- while ((addr->OperGet() == GT_ADD) || (addr->OperGet() == GT_SUB))
- {
- assert(addr->TypeGet() == TYP_BYREF);
- GenTree* index = addr->gtOp.gtOp2;
-
- // Label any constant array index contributions with #ConstantIndex and any LclVars with GTF_VAR_ARR_INDEX
- index->LabelIndex(this);
-
- addr = addr->gtOp.gtOp1;
- }
- assert(addr->TypeGet() == TYP_REF);
-
-#endif // !FEATURE_PREVENT_BAD_BYREFS
}
else if (addr->OperGet() == GT_CNS_INT)
{
/* Fold "((x+icon1)+icon2) to (x+(icon1+icon2))" */
CLANG_FORMAT_COMMENT_ANCHOR;
-#if FEATURE_PREVENT_BAD_BYREFS
-
if (op1->gtOper == GT_ADD && //
!gtIsActiveCSE_Candidate(op1) && //
!op1->gtOverflow() && //
(op1->gtOp.gtOp2->OperGet() == op2->OperGet()) && //
(op1->gtOp.gtOp2->TypeGet() != TYP_REF) && // Don't fold REFs
(op2->TypeGet() != TYP_REF)) // Don't fold REFs
-
-#else // !FEATURE_PREVENT_BAD_BYREFS
-
- if (op1->gtOper == GT_ADD && !gtIsActiveCSE_Candidate(op1) && op1->gtOp.gtOp2->IsCnsIntOrI() &&
- !op1->gtOverflow() && op1->gtOp.gtOp2->OperGet() == op2->OperGet())
-
-#endif // !FEATURE_PREVENT_BAD_BYREFS
-
{
cns1 = op1->gtOp.gtOp2;
op2->gtIntConCommon.SetIconValue(cns1->gtIntConCommon.IconValue() +
//
// TODO-CQ: CLONE: After morph make sure this method extracts values before morph.
//
-// [000000001AF828D8] ---XG------- indir int
-// [000000001AF872C8] ------------ const long 16 Fseq[#FirstElem]
-// [000000001AF87340] ------------ + byref
-// [000000001AF87160] -------N---- const long 2
-// [000000001AF871D8] ------------ << long
-// [000000001AF870C0] ------------ cast long <- int
-// [000000001AF86F30] i----------- lclVar int V04 loc0
-// [000000001AF87250] ------------ + byref
-// [000000001AF86EB8] ------------ lclVar ref V01 arg1
-// [000000001AF87468] ---XG------- comma int
-// [000000001AF87020] ---X-------- arrBndsChk void
-// [000000001AF86FA8] ---X-------- arrLen int
-// [000000001AF827E8] ------------ lclVar ref V01 arg1
-// [000000001AF82860] ------------ lclVar int V04 loc0
-// [000000001AF829F0] -A-XG------- = int
-// [000000001AF82978] D------N---- lclVar int V06 tmp0
-//
+// [000024] ------------ * STMT void(IL 0x007...0x00C)
+// [000021] a--XG+------ | /--* IND int
+// [000045] -----+------ | | | /--* CNS_INT long 16 Fseq[#FirstElem]
+// [000046] -----+------ | | | /--* ADD long
+// [000043] -----+-N---- | | | | | /--* CNS_INT long 2
+// [000044] -----+------ | | | | \--* LSH long
+// [000042] -----+------ | | | | \--* CAST long < -int
+// [000039] i----+------ | | | | \--* LCL_VAR int V04 loc0
+// [000047] -----+------ | | \--* ADD byref
+// [000038] -----+------ | | \--* LCL_VAR ref V00 arg0
+// [000048] ---XG+------ | /--* COMMA int
+// [000041] ---X-+------ | | \--* ARR_BOUNDS_CHECK_Rng void
+// [000020] -----+------ | | +--* LCL_VAR int V04 loc0
+// [000040] ---X-+------ | | \--* ARR_LENGTH int
+// [000019] -----+------ | | \--* LCL_VAR ref V00 arg0
+// [000023] -A-XG+------ \--* ASG int
+// [000022] D----+-N---- \--* LCL_VAR int V06 tmp1
+
bool Compiler::optExtractArrIndex(GenTree* tree, ArrIndex* result, unsigned lhsNum)
{
if (tree->gtOper != GT_COMMA)
return false;
}
- GenTree* sibo = after->gtGetOp1();
+ GenTree* sibo = after->gtGetOp1(); // sibo = scale*index + base + offset
if (sibo->gtOper != GT_ADD)
{
return false;
}
- GenTree* sib = sibo->gtGetOp1();
- GenTree* ofs = sibo->gtGetOp2();
- if (ofs->gtOper != GT_CNS_INT)
+ GenTree* base = sibo->gtGetOp1();
+ GenTree* sio = sibo->gtGetOp2(); // sio == scale*index + offset
+ if (base->OperGet() != GT_LCL_VAR || base->gtLclVarCommon.gtLclNum != arrLcl)
{
return false;
}
- if (sib->gtOper != GT_ADD)
+ if (sio->gtOper != GT_ADD)
{
return false;
}
- GenTree* si = sib->gtGetOp2();
- GenTree* base = sib->gtGetOp1();
- if (si->gtOper != GT_LSH)
+ GenTree* ofs = sio->gtGetOp2();
+ GenTree* si = sio->gtGetOp1(); // si = scale*index
+ if (ofs->gtOper != GT_CNS_INT)
{
return false;
}
- if (base->OperGet() != GT_LCL_VAR || base->gtLclVarCommon.gtLclNum != arrLcl)
+ if (si->gtOper != GT_LSH)
{
return false;
}
{
return false;
}
-#ifdef _TARGET_AMD64_
+#ifdef _TARGET_AMD64_ // TODO-ARM64: should this be _TARGET_64BIT_? If not, add comment why not.
if (index->gtOper != GT_CAST)
{
return false;
// #define PSEUDORANDOM_NOP_INSERTION
// #endif
-// TODO-Cleanup: FEATURE_PREVENT_BAD_BYREFS guards code that prevents creating byref pointers to array elements
-// that are not "complete". That is, it only allows byref pointers to the exact array element, not to a portion
-// of the address expression leading to the full addressing expression. This prevents the possibility of creating
-// an illegal byref, which is an expression that points outside of the "host" object. Such bad byrefs won't get
-// updated properly during a GC, leaving them to point to garbage. This led to a GC hole on ARM due to ARM's
-// limited addressing modes (found in GCStress). The change is applicable and possibly desirable for other platforms,
-// but was put under ifdef to avoid introducing potential destabilizing change to those platforms at the end of the
-// .NET Core 2.1 ship cycle. More detail here: https://github.com/dotnet/coreclr/pull/17524. Consider making this
-// all-platform and removing these #ifdefs.
-#if defined(_TARGET_ARM_)
-#define FEATURE_PREVENT_BAD_BYREFS 1
-#else
-#define FEATURE_PREVENT_BAD_BYREFS 0
-#endif
-
/*****************************************************************************/
// clang-format off