From: SingleAccretion <62474226+SingleAccretion@users.noreply.github.com> Date: Sat, 6 May 2023 00:45:26 +0000 (+0300) Subject: Address `TODO-FIELD`s and delete quirks (#85735) X-Git-Tag: accepted/tizen/unified/riscv/20231226.055536~2411 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=e61e022b3c3aabab29dc3a8d0fa2cb19563f9bb6;p=platform%2Fupstream%2Fdotnet%2Fruntime.git Address `TODO-FIELD`s and delete quirks (#85735) * Adjust a comment * Simplify CreateAddressNodeForSimdHWIntrinsicCreate No diffs. * Limit the usages of gtNewFieldIndirNode One tiny regression in a Regex method due to us not clearing GLOB_REF where not needed. * Delete the side effects quirk About neutral in improvements and regressions. Both come from the differences in the handling of unused indirs and NULLCHECK nodes. * Add a convenience gtNewFieldAddrNode overload --- diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index ee1ed8d..2e83e36 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -2815,6 +2815,11 @@ public: GenTree* obj = nullptr, DWORD offset = 0); + GenTreeFieldAddr* gtNewFieldAddrNode(CORINFO_FIELD_HANDLE fldHnd, GenTree* obj, unsigned offset) + { + return gtNewFieldAddrNode(varTypeIsGC(obj) ? TYP_BYREF : TYP_I_IMPL, fldHnd, obj, offset); + } + GenTreeIndir* gtNewFieldIndirNode(var_types type, ClassLayout* layout, GenTreeFieldAddr* addr); GenTreeIndexAddr* gtNewIndexAddr(GenTree* arrayOp, diff --git a/src/coreclr/jit/gentree.cpp b/src/coreclr/jit/gentree.cpp index dded592..b852f15 100644 --- a/src/coreclr/jit/gentree.cpp +++ b/src/coreclr/jit/gentree.cpp @@ -16308,17 +16308,6 @@ bool Compiler::gtNodeHasSideEffects(GenTree* tree, GenTreeFlags flags) { return true; } - - // TODO-FIELD: delete this zero-diff quirk. - if (tree->OperIsIndir() && tree->AsIndir()->Addr()->OperIs(GT_FIELD_ADDR)) - { - GenTreeFieldAddr* addr = tree->AsIndir()->Addr()->AsFieldAddr(); - if (addr->IsInstance() && ((addr->gtFlags & GTF_FLD_DEREFERENCED) != 0) && - fgAddrCouldBeNull(addr->GetFldObj())) - { - return true; - } - } } // Expressions declared as CSE by (e.g.) hoisting code are considered to have relevant side diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index 09b8ba4..1c3fb16 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -1115,14 +1115,7 @@ GenTree* Compiler::impGetStructAddr(GenTree* structVal, unsigned curLevel, bool case GT_IND: if (willDeref) { - GenTree* addr = structVal->AsIndir()->Addr(); - if (addr->OperIs(GT_FIELD_ADDR)) - { - // TODO-FIELD: delete this zero-diff quirk. - addr->gtFlags &= ~GTF_FLD_DEREFERENCED; - } - - return addr; + return structVal->AsIndir()->Addr(); } break; @@ -9157,8 +9150,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) obj = impGetStructAddr(obj, CHECK_SPILL_ALL, true); } - op1 = gtNewFieldAddrNode(varTypeIsGC(obj) ? TYP_BYREF : TYP_I_IMPL, resolvedToken.hField, obj, - fieldInfo.offset); + op1 = gtNewFieldAddrNode(resolvedToken.hField, obj, fieldInfo.offset); #ifdef FEATURE_READYTORUN if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE) @@ -9464,8 +9456,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) case CORINFO_FIELD_INSTANCE_WITH_BASE: #endif { - op1 = gtNewFieldAddrNode(varTypeIsGC(obj) ? TYP_BYREF : TYP_I_IMPL, resolvedToken.hField, obj, - fieldInfo.offset); + op1 = gtNewFieldAddrNode(resolvedToken.hField, obj, fieldInfo.offset); #ifdef FEATURE_READYTORUN if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE) diff --git a/src/coreclr/jit/importercalls.cpp b/src/coreclr/jit/importercalls.cpp index b91d1a1..e80f210 100644 --- a/src/coreclr/jit/importercalls.cpp +++ b/src/coreclr/jit/importercalls.cpp @@ -2897,10 +2897,9 @@ GenTree* Compiler::impIntrinsic(GenTree* newobjThis, CORINFO_FIELD_HANDLE lengthHnd = info.compCompHnd->getFieldInClass(clsHnd, 1); const unsigned lengthOffset = info.compCompHnd->getFieldOffset(lengthHnd); - GenTreeFieldAddr* lengthFieldAddr = - gtNewFieldAddrNode(genActualType(ptrToSpan), lengthHnd, ptrToSpan, lengthOffset); + GenTreeFieldAddr* lengthFieldAddr = gtNewFieldAddrNode(lengthHnd, ptrToSpan, lengthOffset); + GenTree* length = gtNewIndir(TYP_INT, lengthFieldAddr); lengthFieldAddr->SetIsSpanLength(true); - GenTree* length = gtNewFieldIndirNode(TYP_INT, nullptr, lengthFieldAddr); GenTree* boundsCheck = new (this, GT_BOUNDS_CHECK) GenTreeBoundsChk(index, length, SCK_RNGCHK_FAIL); @@ -2914,12 +2913,11 @@ GenTree* Compiler::impIntrinsic(GenTree* newobjThis, index = gtNewOperNode(GT_MUL, TYP_I_IMPL, index, sizeofNode); } - CORINFO_FIELD_HANDLE ptrHnd = info.compCompHnd->getFieldInClass(clsHnd, 0); - const unsigned ptrOffset = info.compCompHnd->getFieldOffset(ptrHnd); - GenTreeFieldAddr* dataFieldAddr = - gtNewFieldAddrNode(genActualType(ptrToSpanClone), ptrHnd, ptrToSpanClone, ptrOffset); - GenTree* data = gtNewFieldIndirNode(TYP_BYREF, nullptr, dataFieldAddr); - GenTree* result = gtNewOperNode(GT_ADD, TYP_BYREF, data, index); + CORINFO_FIELD_HANDLE ptrHnd = info.compCompHnd->getFieldInClass(clsHnd, 0); + const unsigned ptrOffset = info.compCompHnd->getFieldOffset(ptrHnd); + GenTreeFieldAddr* dataFieldAddr = gtNewFieldAddrNode(ptrHnd, ptrToSpanClone, ptrOffset); + GenTree* data = gtNewIndir(TYP_BYREF, dataFieldAddr); + GenTree* result = gtNewOperNode(GT_ADD, TYP_BYREF, data, index); // Prepare result var_types resultType = JITtype2varType(sig->retType); @@ -2962,10 +2960,9 @@ GenTree* Compiler::impIntrinsic(GenTree* newobjThis, CORINFO_FIELD_HANDLE lengthHnd = info.compCompHnd->getFieldInClass(clsHnd, 1); const unsigned lengthOffset = info.compCompHnd->getFieldOffset(lengthHnd); - GenTreeFieldAddr* lengthFieldAddr = - gtNewFieldAddrNode(genActualType(ptrToSpan), lengthHnd, ptrToSpan, lengthOffset); + GenTreeFieldAddr* lengthFieldAddr = gtNewFieldAddrNode(lengthHnd, ptrToSpan, lengthOffset); + GenTree* lengthField = gtNewIndir(TYP_INT, lengthFieldAddr); lengthFieldAddr->SetIsSpanLength(true); - GenTree* lengthField = gtNewFieldIndirNode(TYP_INT, nullptr, lengthFieldAddr); return lengthField; } diff --git a/src/coreclr/jit/importervectorization.cpp b/src/coreclr/jit/importervectorization.cpp index 1d230cc..141073b 100644 --- a/src/coreclr/jit/importervectorization.cpp +++ b/src/coreclr/jit/importervectorization.cpp @@ -787,10 +787,10 @@ GenTree* Compiler::impSpanEqualsOrStartsWith(bool startsWith, CORINFO_SIG_INFO* GenTreeLclVar* spanObjRefLcl = gtNewLclvNode(spanObjRef, TYP_BYREF); GenTreeLclVar* spanDataTmpLcl = gtNewLclvNode(spanDataTmp, TYP_BYREF); - GenTreeFieldAddr* spanLengthAddr = gtNewFieldAddrNode(TYP_BYREF, lengthHnd, gtClone(spanObjRefLcl), lengthOffset); - GenTree* spanLength = gtNewFieldIndirNode(TYP_INT, nullptr, spanLengthAddr); - GenTreeFieldAddr* spanDataAddr = gtNewFieldAddrNode(TYP_BYREF, pointerHnd, spanObjRefLcl); - GenTree* spanData = gtNewFieldIndirNode(TYP_BYREF, nullptr, spanDataAddr); + GenTreeFieldAddr* spanLengthAddr = gtNewFieldAddrNode(lengthHnd, gtClone(spanObjRefLcl), lengthOffset); + GenTree* spanLength = gtNewIndir(TYP_INT, spanLengthAddr); + GenTreeFieldAddr* spanDataAddr = gtNewFieldAddrNode(pointerHnd, spanObjRefLcl, 0); + GenTree* spanData = gtNewIndir(TYP_BYREF, spanDataAddr); GenTree* unrolled = impExpandHalfConstEquals(spanDataTmpLcl, spanLength, false, startsWith, (WCHAR*)str, cnsLength, 0, cmpMode); diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index 5203e1c..e6c276d 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -4436,20 +4436,12 @@ GenTree* Compiler::fgMorphIndexAddr(GenTreeIndexAddr* indexAddr) // If the arrRef or index expressions involves an assignment, a call, or reads from global memory, // then we *must* allocate a temporary in which to "localize" those values, to ensure that the // same values are used in the bounds check and the actual dereference. - // Also we allocate the temporary when the expression is sufficiently complex/expensive. + // Also we allocate the temporary when the expression is sufficiently complex/expensive. We special + // case some simple nodes for which CQ analysis shows it is a little better to do that here than + // leaving them to CSE. // - // TODO-FIELD: review below comment. + // TODO-Bug: GLOB_REF is not yet set for all trees in pre-order morph. // - // Note that if the expression is a GT_FIELD, it has not yet been morphed so its true complexity is - // not exposed. Without that condition there are cases of local struct fields that were previously, - // needlessly, marked as GTF_GLOB_REF, and when that was fixed, there were some regressions that - // were mostly ameliorated by adding this condition. - // - // Likewise, allocate a temporary if the expression is a GT_LCL_FLD node. These used to be created - // after fgMorphIndexAddr from GT_FIELD trees so this preserves the existing behavior. This is - // perhaps a decision that should be left to CSE but FX diffs show that it is slightly better to - // do this here. Likewise for implicit byrefs. - if (((arrRef->gtFlags & (GTF_ASG | GTF_CALL | GTF_GLOB_REF)) != 0) || gtComplexityExceeds(arrRef, MAX_ARR_COMPLEXITY) || arrRef->OperIs(GT_LCL_FLD) || (arrRef->OperIs(GT_LCL_VAR) && lvaIsLocalImplicitlyAccessedByRef(arrRef->AsLclVar()->GetLclNum()))) diff --git a/src/coreclr/jit/simd.cpp b/src/coreclr/jit/simd.cpp index 28768fe..88e1dd6 100644 --- a/src/coreclr/jit/simd.cpp +++ b/src/coreclr/jit/simd.cpp @@ -689,69 +689,43 @@ bool Compiler::areArgumentsContiguous(GenTree* op1, GenTree* op2) GenTree* Compiler::CreateAddressNodeForSimdHWIntrinsicCreate(GenTree* tree, var_types simdBaseType, unsigned simdSize) { assert(tree->OperIs(GT_IND)); - GenTree* addr = tree->AsIndir()->Addr(); - GenTree* byrefNode = nullptr; - unsigned offset = 0; - var_types baseType = tree->gtType; + GenTree* addr = tree->AsIndir()->Addr(); if (addr->OperIs(GT_FIELD_ADDR)) { assert(addr->AsFieldAddr()->IsInstance()); + // If the field is directly from a struct, then in this case, we should set this + // struct's lvUsedInSIMDIntrinsic as true, so that this sturct won't be promoted. GenTree* objRef = addr->AsFieldAddr()->GetFldObj(); - if (objRef->IsLclVarAddr()) + if (objRef->IsLclVarAddr() && varTypeIsSIMD(lvaGetDesc(objRef->AsLclFld()))) { - // If the field is directly from a struct, then in this case, - // we should set this struct's lvUsedInSIMDIntrinsic as true, - // so that this sturct won't be promoted. - // e.g. s.x x is a field, and s is a struct, then we should set the s's lvUsedInSIMDIntrinsic as true. - // so that s won't be promoted. - // Notice that if we have a case like s1.s2.x. s1 s2 are struct, and x is a field, then it is possible that - // s1 can be promoted, so that s2 can be promoted. The reason for that is if we don't allow s1 to be - // promoted, then this will affect the other optimizations which are depend on s1's struct promotion. - // TODO-CQ: - // In future, we should optimize this case so that if there is a nested field like s1.s2.x and s1.s2.x's - // address is used for initializing the vector, then s1 can be promoted but s2 can't. - if (varTypeIsSIMD(lvaGetDesc(objRef->AsLclFld()))) - { - setLclRelatedToSIMDIntrinsic(objRef); - } + setLclRelatedToSIMDIntrinsic(objRef); } - // TODO-FIELD: this seems unnecessary. Simply "return addr;"? - byrefNode = gtCloneExpr(objRef); - assert(byrefNode != nullptr); - offset = addr->AsFieldAddr()->gtFldOffset; - } - else - { - GenTree* arrayRef = addr->AsIndexAddr()->Arr(); - GenTree* index = addr->AsIndexAddr()->Index(); - assert(index->IsCnsIntOrI()); - - GenTree* checkIndexExpr = nullptr; - unsigned indexVal = (unsigned)index->AsIntCon()->gtIconVal; - offset = indexVal * genTypeSize(tree->TypeGet()); - - // Generate the boundary check exception. - // The length for boundary check should be the maximum index number which should be - // (first argument's index number) + (how many array arguments we have) - 1 - // = indexVal + arrayElementsCount - 1 - unsigned arrayElementsCount = simdSize / genTypeSize(simdBaseType); - checkIndexExpr = gtNewIconNode(indexVal + arrayElementsCount - 1); - GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, arrayRef, (int)OFFSETOF__CORINFO_Array__length, compCurBB); - GenTreeBoundsChk* arrBndsChk = - new (this, GT_BOUNDS_CHECK) GenTreeBoundsChk(checkIndexExpr, arrLen, SCK_ARG_RNG_EXCPN); - - offset += OFFSETOF__CORINFO_Array__data; - byrefNode = gtNewOperNode(GT_COMMA, arrayRef->TypeGet(), arrBndsChk, gtCloneExpr(arrayRef)); + return addr; } - GenTree* address = byrefNode; - if (offset != 0) - { - address = gtNewOperNode(GT_ADD, TYP_BYREF, address, gtNewIconNode(offset, TYP_I_IMPL)); - } + GenTree* arrayRef = addr->AsIndexAddr()->Arr(); + GenTree* index = addr->AsIndexAddr()->Index(); + assert(index->IsCnsIntOrI()); + + unsigned indexVal = (unsigned)index->AsIntCon()->gtIconVal; + unsigned offset = indexVal * genTypeSize(tree->TypeGet()); + + // Generate the boundary check exception. + // The length for boundary check should be the maximum index number which should be + // (first argument's index number) + (how many array arguments we have) - 1 = indexVal + arrayElementsCount - 1 + // + unsigned arrayElementsCount = simdSize / genTypeSize(simdBaseType); + GenTree* checkIndexExpr = gtNewIconNode(indexVal + arrayElementsCount - 1); + GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, arrayRef, (int)OFFSETOF__CORINFO_Array__length, compCurBB); + GenTreeBoundsChk* arrBndsChk = + new (this, GT_BOUNDS_CHECK) GenTreeBoundsChk(checkIndexExpr, arrLen, SCK_ARG_RNG_EXCPN); + + offset += OFFSETOF__CORINFO_Array__data; + GenTree* address = gtNewOperNode(GT_COMMA, arrayRef->TypeGet(), arrBndsChk, gtCloneExpr(arrayRef)); + address = gtNewOperNode(GT_ADD, TYP_BYREF, address, gtNewIconNode(offset, TYP_I_IMPL)); return address; }