GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
- unsigned simdSize,
- bool isSimdAsHWIntrinsic = false);
+ unsigned simdSize);
GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op1,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
- unsigned simdSize,
- bool isSimdAsHWIntrinsic = false);
+ unsigned simdSize);
GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
- unsigned simdSize,
- bool isSimdAsHWIntrinsic = false);
+ unsigned simdSize);
GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
- unsigned simdSize,
- bool isSimdAsHWIntrinsic = false);
+ unsigned simdSize);
GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op4,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
- unsigned simdSize,
- bool isSimdAsHWIntrinsic = false);
+ unsigned simdSize);
GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type,
GenTree** operands,
size_t operandCount,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
- unsigned simdSize,
- bool isSimdAsHWIntrinsic = false);
+ unsigned simdSize);
GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type,
IntrinsicNodeBuilder&& nodeBuilder,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
- unsigned simdSize,
- bool isSimdAsHWIntrinsic = false);
+ unsigned simdSize);
GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize)
{
- bool isSimdAsHWIntrinsic = true;
- return gtNewSimdHWIntrinsicNode(type, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, hwIntrinsicID, simdBaseJitType, simdSize);
}
GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(
var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize)
{
- bool isSimdAsHWIntrinsic = true;
- return gtNewSimdHWIntrinsicNode(type, op1, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, op1, hwIntrinsicID, simdBaseJitType, simdSize);
}
GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type,
CorInfoType simdBaseJitType,
unsigned simdSize)
{
- bool isSimdAsHWIntrinsic = true;
- return gtNewSimdHWIntrinsicNode(type, op1, op2, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, op1, op2, hwIntrinsicID, simdBaseJitType, simdSize);
}
GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type,
CorInfoType simdBaseJitType,
unsigned simdSize)
{
- bool isSimdAsHWIntrinsic = true;
- return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, hwIntrinsicID, simdBaseJitType, simdSize,
- isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, hwIntrinsicID, simdBaseJitType, simdSize);
}
GenTree* gtNewSimdAbsNode(
- var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic);
+ var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize);
GenTree* gtNewSimdBinOpNode(genTreeOps op,
var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
- unsigned simdSize,
- bool isSimdAsHWIntrinsic);
+ unsigned simdSize);
GenTree* gtNewSimdCeilNode(
- var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic);
+ var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize);
GenTree* gtNewSimdCmpOpNode(genTreeOps op,
var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
- unsigned simdSize,
- bool isSimdAsHWIntrinsic);
+ unsigned simdSize);
GenTree* gtNewSimdCmpOpAllNode(genTreeOps op,
var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
- unsigned simdSize,
- bool isSimdAsHWIntrinsic);
+ unsigned simdSize);
GenTree* gtNewSimdCmpOpAnyNode(genTreeOps op,
var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
- unsigned simdSize,
- bool isSimdAsHWIntrinsic);
+ unsigned simdSize);
GenTree* gtNewSimdCndSelNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
CorInfoType simdBaseJitType,
- unsigned simdSize,
- bool isSimdAsHWIntrinsic);
+ unsigned simdSize);
GenTree* gtNewSimdCreateBroadcastNode(
- var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false);
+ var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize);
GenTree* gtNewSimdCreateScalarNode(
- var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false);
+ var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize);
GenTree* gtNewSimdCreateScalarUnsafeNode(
- var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false);
+ var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize);
GenTree* gtNewSimdDotProdNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
- unsigned simdSize,
- bool isSimdAsHWIntrinsic);
+ unsigned simdSize);
GenTree* gtNewSimdFloorNode(
- var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic);
+ var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize);
GenTree* gtNewSimdGetElementNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
- unsigned simdSize,
- bool isSimdAsHWIntrinsic);
+ unsigned simdSize);
GenTree* gtNewSimdGetLowerNode(var_types type,
GenTree* op1,
CorInfoType simdBaseJitType,
- unsigned simdSize,
- bool isSimdAsHWIntrinsic);
+ unsigned simdSize);
GenTree* gtNewSimdGetUpperNode(var_types type,
GenTree* op1,
CorInfoType simdBaseJitType,
- unsigned simdSize,
- bool isSimdAsHWIntrinsic);
+ unsigned simdSize);
GenTree* gtNewSimdLoadNode(
- var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic);
+ var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize);
GenTree* gtNewSimdLoadAlignedNode(
- var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic);
+ var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize);
GenTree* gtNewSimdLoadNonTemporalNode(
- var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic);
+ var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize);
GenTree* gtNewSimdMaxNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
- unsigned simdSize,
- bool isSimdAsHWIntrinsic);
+ unsigned simdSize);
GenTree* gtNewSimdMinNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
- unsigned simdSize,
- bool isSimdAsHWIntrinsic);
+ unsigned simdSize);
GenTree* gtNewSimdNarrowNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
- unsigned simdSize,
- bool isSimdAsHWIntrinsic);
+ unsigned simdSize);
GenTree* gtNewSimdShuffleNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
- unsigned simdSize,
- bool isSimdAsHWIntrinsic);
+ unsigned simdSize);
GenTree* gtNewSimdSqrtNode(
- var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic);
+ var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize);
GenTree* gtNewSimdStoreNode(
- GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic);
+ GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize);
GenTree* gtNewSimdStoreAlignedNode(
- GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic);
+ GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize);
GenTree* gtNewSimdStoreNonTemporalNode(
- GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic);
+ GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize);
GenTree* gtNewSimdSumNode(
- var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic);
+ var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize);
GenTree* gtNewSimdUnOpNode(genTreeOps op,
var_types type,
GenTree* op1,
CorInfoType simdBaseJitType,
- unsigned simdSize,
- bool isSimdAsHWIntrinsic);
+ unsigned simdSize);
GenTree* gtNewSimdWidenLowerNode(
- var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic);
+ var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize);
GenTree* gtNewSimdWidenUpperNode(
- var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic);
+ var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize);
GenTree* gtNewSimdWithElementNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
CorInfoType simdBaseJitType,
- unsigned simdSize,
- bool isSimdAsHWIntrinsic);
+ unsigned simdSize);
GenTree* gtNewSimdWithLowerNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
- unsigned simdSize,
- bool isSimdAsHWIntrinsic);
+ unsigned simdSize);
GenTree* gtNewSimdWithUpperNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
- unsigned simdSize,
- bool isSimdAsHWIntrinsic);
+ unsigned simdSize);
GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID);
GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID);
GenTree* gtOptimizeEnumHasFlag(GenTree* thisOp, GenTree* flagOp);
//-------------------------------------------------------------------------
- // Get the handle, if any.
- CORINFO_CLASS_HANDLE gtGetStructHandleIfPresent(GenTree* tree);
- // Get the handle, and assert if not found.
- CORINFO_CLASS_HANDLE gtGetStructHandle(GenTree* tree);
// Get the handle for a ref type.
CORINFO_CLASS_HANDLE gtGetClassHandle(GenTree* tree, bool* pIsExact, bool* pIsNonNull);
// Get the class handle for an helper call
// Create a new temporary variable to hold the result of *ppTree,
// and transform the graph accordingly.
- GenTree* fgInsertCommaFormTemp(GenTree** ppTree, CORINFO_CLASS_HANDLE structType = nullptr);
- TempInfo fgMakeTemp(GenTree* rhs, CORINFO_CLASS_HANDLE structType = nullptr);
- GenTree* fgMakeMultiUse(GenTree** ppTree, CORINFO_CLASS_HANDLE structType = nullptr);
+ GenTree* fgInsertCommaFormTemp(GenTree** ppTree);
+ TempInfo fgMakeTemp(GenTree* rhs);
+ GenTree* fgMakeMultiUse(GenTree** ppTree);
// Recognize a bitwise rotation pattern and convert into a GT_ROL or a GT_ROR node.
GenTree* fgRecognizeAndMorphBitwiseRotation(GenTree* tree);
struct SIMDHandlesCache
{
- // BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG
- // NATIVEINT, NATIVEUINT, FLOAT, and DOUBLE
- static const uint32_t SupportedTypeCount = 12;
-
- // SIMD Types
- CORINFO_CLASS_HANDLE VectorTHandles[SupportedTypeCount];
-
CORINFO_CLASS_HANDLE PlaneHandle;
CORINFO_CLASS_HANDLE QuaternionHandle;
CORINFO_CLASS_HANDLE Vector2Handle;
CORINFO_CLASS_HANDLE Vector4Handle;
CORINFO_CLASS_HANDLE VectorHandle;
-#ifdef FEATURE_HW_INTRINSICS
-#if defined(TARGET_ARM64)
- CORINFO_CLASS_HANDLE Vector64THandles[SupportedTypeCount];
-#endif // defined(TARGET_ARM64)
- CORINFO_CLASS_HANDLE Vector128THandles[SupportedTypeCount];
-#if defined(TARGET_XARCH)
- CORINFO_CLASS_HANDLE Vector256THandles[SupportedTypeCount];
- CORINFO_CLASS_HANDLE Vector512THandles[SupportedTypeCount];
-#endif // defined(TARGET_XARCH)
-#endif // FEATURE_HW_INTRINSICS
-
- CORINFO_CLASS_HANDLE CanonicalSimd8Handle;
- CORINFO_CLASS_HANDLE CanonicalSimd16Handle;
- CORINFO_CLASS_HANDLE CanonicalSimd32Handle;
- CORINFO_CLASS_HANDLE CanonicalSimd64Handle;
-
SIMDHandlesCache()
{
- assert(SupportedTypeCount == static_cast<uint32_t>(CORINFO_TYPE_DOUBLE - CORINFO_TYPE_BYTE + 1));
memset(this, 0, sizeof(*this));
}
};
SIMDHandlesCache* m_simdHandleCache;
-#if defined(FEATURE_HW_INTRINSICS)
- CORINFO_CLASS_HANDLE gtGetStructHandleForSIMD(var_types simdType, CorInfoType simdBaseJitType)
- {
- assert(varTypeIsSIMD(simdType));
- assert((simdBaseJitType >= CORINFO_TYPE_BYTE) && (simdBaseJitType <= CORINFO_TYPE_DOUBLE));
-
- // We should only be called from gtGetStructHandleForSimdOrHW and this should've been checked already
- assert(m_simdHandleCache != nullptr);
-
- if (simdBaseJitType == CORINFO_TYPE_FLOAT)
- {
- switch (simdType)
- {
- case TYP_SIMD8:
- {
- return m_simdHandleCache->Vector2Handle;
- }
-
- case TYP_SIMD12:
- {
- return m_simdHandleCache->Vector3Handle;
- }
-
- case TYP_SIMD16:
- {
- // We order the checks roughly by expected hit count so early exits are possible
-
- if (m_simdHandleCache->Vector4Handle != NO_CLASS_HANDLE)
- {
- return m_simdHandleCache->Vector4Handle;
- }
-
- if (m_simdHandleCache->QuaternionHandle != NO_CLASS_HANDLE)
- {
- return m_simdHandleCache->QuaternionHandle;
- }
-
- if (m_simdHandleCache->PlaneHandle != NO_CLASS_HANDLE)
- {
- return m_simdHandleCache->PlaneHandle;
- }
-
- break;
- }
-
-#if defined(TARGET_XARCH)
- case TYP_SIMD32:
- case TYP_SIMD64:
- {
- // This should be handled by the Vector<T> path below
- break;
- }
-#endif // TARGET_XARCH
-
- default:
- {
- unreached();
- }
- }
- }
-
- if (emitTypeSize(simdType) != getSIMDVectorRegisterByteLength())
- {
- // We have scenarios, such as shifting Vector<T> by a non-constant
- // which may introduce different sized vectors that are marked as
- // isSimdAsHWIntrinsic.
-
- return NO_CLASS_HANDLE;
- }
-
- uint32_t handleIndex = static_cast<uint32_t>(simdBaseJitType - CORINFO_TYPE_BYTE);
- assert(handleIndex < SIMDHandlesCache::SupportedTypeCount);
-
- return m_simdHandleCache->VectorTHandles[handleIndex];
- }
-
- CORINFO_CLASS_HANDLE gtGetStructHandleForHWSIMD(var_types simdType, CorInfoType simdBaseJitType)
- {
- assert(varTypeIsSIMD(simdType));
- assert((simdBaseJitType >= CORINFO_TYPE_BYTE) && (simdBaseJitType <= CORINFO_TYPE_DOUBLE));
-
- // We should only be called from gtGetStructHandleForSimdOrHW and this should've been checked already
- assert(m_simdHandleCache != nullptr);
-
- uint32_t handleIndex = static_cast<uint32_t>(simdBaseJitType - CORINFO_TYPE_BYTE);
- assert(handleIndex < SIMDHandlesCache::SupportedTypeCount);
-
- switch (simdType)
- {
- case TYP_SIMD8:
- {
-#if defined(TARGET_ARM64)
- return m_simdHandleCache->Vector64THandles[handleIndex];
-#else
- // This can only be Vector2 and should've been handled by gtGetStructHandleForSIMD
- return NO_CLASS_HANDLE;
-#endif
- }
-
- case TYP_SIMD12:
- {
- // This can only be Vector3 and should've been handled by gtGetStructHandleForSIMD
- return NO_CLASS_HANDLE;
- }
-
- case TYP_SIMD16:
- {
- return m_simdHandleCache->Vector128THandles[handleIndex];
- }
-
-#if defined(TARGET_XARCH)
- case TYP_SIMD32:
- {
- return m_simdHandleCache->Vector256THandles[handleIndex];
- }
-
- case TYP_SIMD64:
- {
- return m_simdHandleCache->Vector512THandles[handleIndex];
- }
-#endif // TARGET_XARCH
-
- default:
- {
- unreached();
- }
- }
- }
-
- CORINFO_CLASS_HANDLE gtGetStructHandleForSimdOrHW(var_types simdType,
- CorInfoType simdBaseJitType,
- bool isSimdAsHWIntrinsic = false)
- {
- assert(varTypeIsSIMD(simdType));
- assert((simdBaseJitType >= CORINFO_TYPE_BYTE) && (simdBaseJitType <= CORINFO_TYPE_DOUBLE));
-
- if (m_simdHandleCache == nullptr)
- {
- // This may happen if the JIT generates SIMD node on its own, without importing them.
- // Otherwise getBaseJitTypeAndSizeOfSIMDType should have created the cache.
- return NO_CLASS_HANDLE;
- }
-
- CORINFO_CLASS_HANDLE clsHnd = NO_CLASS_HANDLE;
-
- if (isSimdAsHWIntrinsic)
- {
- clsHnd = gtGetStructHandleForSIMD(simdType, simdBaseJitType);
- }
-
- if (clsHnd == NO_CLASS_HANDLE)
- {
- clsHnd = gtGetStructHandleForHWSIMD(simdType, simdBaseJitType);
- }
-
- if (clsHnd == NO_CLASS_HANDLE)
- {
- // TODO-cleanup: We can probably just always use the canonical handle.
- clsHnd = gtGetCanonicalStructHandleForSIMD(simdType);
- }
-
- return clsHnd;
- }
-#endif // FEATURE_HW_INTRINSICS
-
- //------------------------------------------------------------------------
- // gtGetCanonicalStructHandleForSIMD: Get the "canonical" SIMD type handle.
- //
- // Some SIMD-typed trees do not carry struct handles with them (and in
- // some cases, they cannot, due to being created by the compiler itself).
- // To enable CSEing of these trees, we use "canonical" handles. These are
- // captured during importation, and can represent any type normalized to
- // be TYP_SIMD.
- //
- // Arguments:
- // simdType - The SIMD type
- //
- // Return Value:
- // The "canonical" type handle for "simdType", if one was available.
- // "NO_CLASS_HANDLE" otherwise.
- //
- CORINFO_CLASS_HANDLE gtGetCanonicalStructHandleForSIMD(var_types simdType)
- {
- if (m_simdHandleCache == nullptr)
- {
- return NO_CLASS_HANDLE;
- }
-
- switch (simdType)
- {
- case TYP_SIMD8:
- return m_simdHandleCache->CanonicalSimd8Handle;
- case TYP_SIMD12:
- return m_simdHandleCache->Vector3Handle;
- case TYP_SIMD16:
- return m_simdHandleCache->CanonicalSimd16Handle;
-#if defined(TARGET_XARCH)
- case TYP_SIMD32:
- return m_simdHandleCache->CanonicalSimd32Handle;
- case TYP_SIMD64:
- return m_simdHandleCache->CanonicalSimd64Handle;
-#endif // TARGET_XARCH
-
- default:
- unreached();
- }
- }
-
// Returns true if this is a SIMD type that should be considered an opaque
// vector type (i.e. do not analyze or promote its fields).
// Note that all but the fixed vector types are opaque, even though they may
if (varTypeIsStruct(argType))
{
- structHnd = gtGetStructHandleIfPresent(argNode);
- noway_assert((structHnd != NO_CLASS_HANDLE) || (argType != TYP_STRUCT));
+ structHnd = lclVarInfo[argNum].lclVerTypeInfo.GetClassHandleForValueClass();
+ assert(structHnd != NO_CLASS_HANDLE);
}
- // Unsafe value cls check is not needed for
- // argTmpNum here since in-linee compiler instance
- // would have iterated over these and marked them
- // accordingly.
+ // Unsafe value cls check is not needed for argTmpNum here since in-linee compiler instance
+ // would have iterated over these and marked them accordingly.
impAssignTempGen(tmpNum, argNode, structHnd, CHECK_SPILL_NONE, &afterStmt, callDI, block);
// We used to refine the temp type here based on
// ret(...) ->
// ret(comma(comma(tmp=...,call mon_exit), tmp))
//
- //
- // Before morph stage, it is possible to have a case of GT_RETURN(TYP_LONG, op1) where op1's type is
- // TYP_STRUCT (of 8-bytes) and op1 is call node. See the big comment block in impReturnInstruction()
- // for details for the case where info.compRetType is not the same as info.compRetNativeType. For
- // this reason pass compMethodInfo->args.retTypeClass which is guaranteed to be a valid class handle
- // if the return type is a value class. Note that fgInsertCommFormTemp() in turn uses this class handle
- // if the type of op1 is TYP_STRUCT to perform lvaSetStruct() on the new temp that is created, which
- // in turn passes it to VM to know the size of value type.
- GenTree* temp = fgInsertCommaFormTemp(&retNode->AsOp()->gtOp1, info.compMethodInfo->args.retTypeClass);
-
+ GenTree* temp = fgInsertCommaFormTemp(&retNode->AsOp()->gtOp1);
GenTree* lclVar = retNode->AsOp()->gtOp1->AsOp()->gtOp2;
// The return can't handle all of the trees that could be on the right-hand-side of an assignment,
if (varTypeIsStruct(SignatureType))
{
assert(SignatureClsHnd != NO_CLASS_HANDLE);
+ assert(SignatureType == Node->TypeGet());
- Compiler* comp = JitTls::GetCompiler();
- CORINFO_CLASS_HANDLE clsHnd = comp->gtGetStructHandleIfPresent(Node);
- assert((clsHnd == nullptr) || (SignatureClsHnd == clsHnd) ||
- (comp->info.compCompHnd->getClassSize(SignatureClsHnd) == comp->info.compCompHnd->getClassSize(clsHnd)));
+ if (SignatureType == TYP_STRUCT)
+ {
+ Compiler* comp = JitTls::GetCompiler();
+ assert(ClassLayout::AreCompatible(comp->typGetObjLayout(SignatureClsHnd), Node->GetLayout(comp)));
+ }
}
}
#endif
copy = new (this, GT_HWINTRINSIC)
GenTreeHWIntrinsic(tree->TypeGet(), IntrinsicNodeBuilder(getAllocator(CMK_ASTNode), tree->AsMultiOp()),
tree->AsHWIntrinsic()->GetHWIntrinsicId(),
- tree->AsHWIntrinsic()->GetSimdBaseJitType(), tree->AsHWIntrinsic()->GetSimdSize(),
- tree->AsHWIntrinsic()->IsSimdAsHWIntrinsic());
+ tree->AsHWIntrinsic()->GetSimdBaseJitType(), tree->AsHWIntrinsic()->GetSimdSize());
copy->AsHWIntrinsic()->SetAuxiliaryJitType(tree->AsHWIntrinsic()->GetAuxiliaryJitType());
goto CLONE_MULTIOP_OPERANDS;
#endif
return false;
}
-CORINFO_CLASS_HANDLE Compiler::gtGetStructHandleIfPresent(GenTree* tree)
-{
- CORINFO_CLASS_HANDLE structHnd = NO_CLASS_HANDLE;
- tree = tree->gtEffectiveVal();
- if (varTypeIsStruct(tree->gtType))
- {
- switch (tree->gtOper)
- {
- case GT_MKREFANY:
- structHnd = impGetRefAnyClass();
- break;
- case GT_OBJ:
- structHnd = tree->AsObj()->GetLayout()->GetClassHandle();
- break;
- case GT_BLK:
- structHnd = tree->AsBlk()->GetLayout()->GetClassHandle();
- break;
- case GT_CALL:
- structHnd = tree->AsCall()->gtRetClsHnd;
- break;
- case GT_RET_EXPR:
- structHnd = tree->AsRetExpr()->gtInlineCandidate->gtRetClsHnd;
- break;
- case GT_FIELD:
- info.compCompHnd->getFieldType(tree->AsField()->gtFldHnd, &structHnd);
- break;
- case GT_ASG:
- structHnd = gtGetStructHandleIfPresent(tree->gtGetOp1());
- break;
- case GT_LCL_FLD:
-#ifdef FEATURE_SIMD
- if (varTypeIsSIMD(tree))
- {
- structHnd = gtGetCanonicalStructHandleForSIMD(tree->TypeGet());
- }
- else
-#endif // FEATURE_SIMD
- {
- structHnd = tree->AsLclFld()->GetLayout()->GetClassHandle();
- }
- break;
- case GT_LCL_VAR:
- {
- LclVarDsc* dsc = lvaGetDesc(tree->AsLclVar());
- if ((dsc->GetLayout() != nullptr) && !dsc->GetLayout()->IsBlockLayout())
- {
- structHnd = dsc->GetLayout()->GetClassHandle();
- }
- break;
- }
- case GT_RETURN:
- structHnd = gtGetStructHandleIfPresent(tree->AsOp()->gtOp1);
- break;
-#ifdef FEATURE_SIMD
- case GT_IND:
- if (varTypeIsSIMD(tree))
- {
- structHnd = gtGetCanonicalStructHandleForSIMD(tree->TypeGet());
- }
- break;
- case GT_CNS_VEC:
- structHnd = gtGetCanonicalStructHandleForSIMD(tree->TypeGet());
- break;
-#endif // FEATURE_SIMD
-#ifdef FEATURE_HW_INTRINSICS
- case GT_HWINTRINSIC:
- if (varTypeIsSIMD(tree))
- {
- structHnd =
- gtGetStructHandleForSimdOrHW(tree->TypeGet(), tree->AsHWIntrinsic()->GetSimdBaseJitType(),
- tree->AsHWIntrinsic()->IsSimdAsHWIntrinsic());
- }
- break;
-#endif
- default:
- break;
- }
- }
-
- return structHnd;
-}
-
-CORINFO_CLASS_HANDLE Compiler::gtGetStructHandle(GenTree* tree)
-{
- CORINFO_CLASS_HANDLE structHnd = gtGetStructHandleIfPresent(tree);
- assert(structHnd != NO_CLASS_HANDLE);
- return structHnd;
-}
-
//------------------------------------------------------------------------
// gtGetClassHandle: find class handle for a ref type
//
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
- unsigned simdSize,
- bool isSimdAsHWIntrinsic)
+ unsigned simdSize)
{
- return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID,
- simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return new (this, GT_HWINTRINSIC)
+ GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, simdBaseJitType, simdSize);
}
-GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
- GenTree* op1,
- NamedIntrinsic hwIntrinsicID,
- CorInfoType simdBaseJitType,
- unsigned simdSize,
- bool isSimdAsHWIntrinsic)
+GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(
+ var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
- return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID,
- simdBaseJitType, simdSize, isSimdAsHWIntrinsic, op1);
+ return new (this, GT_HWINTRINSIC)
+ GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, simdBaseJitType, simdSize, op1);
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op2,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
- unsigned simdSize,
- bool isSimdAsHWIntrinsic)
+ unsigned simdSize)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
SetOpLclRelatedToSIMDIntrinsic(op2);
- return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID,
- simdBaseJitType, simdSize, isSimdAsHWIntrinsic, op1, op2);
+ return new (this, GT_HWINTRINSIC)
+ GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, simdBaseJitType, simdSize, op1, op2);
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op3,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
- unsigned simdSize,
- bool isSimdAsHWIntrinsic)
+ unsigned simdSize)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
SetOpLclRelatedToSIMDIntrinsic(op2);
SetOpLclRelatedToSIMDIntrinsic(op3);
- return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID,
- simdBaseJitType, simdSize, isSimdAsHWIntrinsic, op1, op2, op3);
+ return new (this, GT_HWINTRINSIC)
+ GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, simdBaseJitType, simdSize, op1, op2, op3);
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op4,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
- unsigned simdSize,
- bool isSimdAsHWIntrinsic)
+ unsigned simdSize)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
SetOpLclRelatedToSIMDIntrinsic(op2);
SetOpLclRelatedToSIMDIntrinsic(op3);
SetOpLclRelatedToSIMDIntrinsic(op4);
- return new (this, GT_HWINTRINSIC)
- GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, simdBaseJitType, simdSize,
- isSimdAsHWIntrinsic, op1, op2, op3, op4);
+ return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID,
+ simdBaseJitType, simdSize, op1, op2, op3, op4);
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
size_t operandCount,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
- unsigned simdSize,
- bool isSimdAsHWIntrinsic)
+ unsigned simdSize)
{
IntrinsicNodeBuilder nodeBuilder(getAllocator(CMK_ASTNode), operandCount);
for (size_t i = 0; i < operandCount; i++)
}
return new (this, GT_HWINTRINSIC)
- GenTreeHWIntrinsic(type, std::move(nodeBuilder), hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ GenTreeHWIntrinsic(type, std::move(nodeBuilder), hwIntrinsicID, simdBaseJitType, simdSize);
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
IntrinsicNodeBuilder&& nodeBuilder,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
- unsigned simdSize,
- bool isSimdAsHWIntrinsic)
+ unsigned simdSize)
{
for (size_t i = 0; i < nodeBuilder.GetOperandCount(); i++)
{
}
return new (this, GT_HWINTRINSIC)
- GenTreeHWIntrinsic(type, std::move(nodeBuilder), hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ GenTreeHWIntrinsic(type, std::move(nodeBuilder), hwIntrinsicID, simdBaseJitType, simdSize);
}
-GenTree* Compiler::gtNewSimdAbsNode(
- var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
+GenTree* Compiler::gtNewSimdAbsNode(var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
}
}
- return gtNewSimdBinOpNode(GT_AND_NOT, type, op1, bitMask, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdBinOpNode(GT_AND_NOT, type, op1, bitMask, simdBaseJitType, simdSize);
}
assert((simdSize != 32) || compIsaSupportedDebugOnly(InstructionSet_AVX2));
if ((simdBaseType != TYP_LONG) && ((simdSize == 32) || compOpportunisticallyDependsOn(InstructionSet_SSSE3)))
{
NamedIntrinsic intrinsic = (simdSize == 32) ? NI_AVX2_Abs : NI_SSSE3_Abs;
- return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize);
}
else
{
- GenTree* tmp;
- CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSimdOrHW(type, simdBaseJitType, isSimdAsHWIntrinsic);
-
- GenTree* op1Dup1 = fgMakeMultiUse(&op1, clsHnd);
- GenTree* op1Dup2 = fgMakeMultiUse(&op1Dup1, clsHnd);
+ GenTree* tmp;
+ GenTree* op1Dup1 = fgMakeMultiUse(&op1);
+ GenTree* op1Dup2 = fgMakeMultiUse(&op1Dup1);
// op1 = op1 < Zero
tmp = gtNewZeroConNode(type);
- op1 = gtNewSimdCmpOpNode(GT_LT, type, op1, tmp, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ op1 = gtNewSimdCmpOpNode(GT_LT, type, op1, tmp, simdBaseJitType, simdSize);
// tmp = Zero - op1Dup1
tmp = gtNewZeroConNode(type);
- tmp = gtNewSimdBinOpNode(GT_SUB, type, tmp, op1Dup1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ tmp = gtNewSimdBinOpNode(GT_SUB, type, tmp, op1Dup1, simdBaseJitType, simdSize);
// result = ConditionalSelect(op1, tmp, op1Dup2)
- return gtNewSimdCndSelNode(type, op1, tmp, op1Dup2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdCndSelNode(type, op1, tmp, op1Dup2, simdBaseJitType, simdSize);
}
#elif defined(TARGET_ARM64)
NamedIntrinsic intrinsic = NI_AdvSimd_Abs;
}
assert(intrinsic != NI_Illegal);
- return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize);
#else
#error Unsupported platform
#endif
}
-GenTree* Compiler::gtNewSimdBinOpNode(genTreeOps op,
- var_types type,
- GenTree* op1,
- GenTree* op2,
- CorInfoType simdBaseJitType,
- unsigned simdSize,
- bool isSimdAsHWIntrinsic)
+GenTree* Compiler::gtNewSimdBinOpNode(
+ genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
(op2->TypeIs(TYP_SIMD12) && type == TYP_SIMD16));
}
- NamedIntrinsic intrinsic = NI_Illegal;
- CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSimdOrHW(type, simdBaseJitType, isSimdAsHWIntrinsic);
+ NamedIntrinsic intrinsic = NI_Illegal;
switch (op)
{
if (varTypeIsArithmetic(op2))
{
- op2 = gtNewSimdCreateBroadcastNode(type, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ op2 = gtNewSimdCreateBroadcastNode(type, op2, simdBaseJitType, simdSize);
}
if (simdSize == 32)
{
op2 = gtNewOperNode(GT_AND, TYP_INT, op2, gtNewIconNode(shiftCountMask));
op2 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op2, NI_SSE2_ConvertScalarToVector128Int32, CORINFO_TYPE_INT,
- 16, isSimdAsHWIntrinsic);
+ 16);
}
if (simdSize == 32)
if (broadcastOp != nullptr)
{
- *broadcastOp =
- gtNewSimdCreateBroadcastNode(type, *broadcastOp, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ *broadcastOp = gtNewSimdCreateBroadcastNode(type, *broadcastOp, simdBaseJitType, simdSize);
}
switch (simdBaseType)
else
{
// op1Dup = op1
- GenTree* op1Dup = fgMakeMultiUse(&op1, clsHnd);
+ GenTree* op1Dup = fgMakeMultiUse(&op1);
// op2Dup = op2
- GenTree* op2Dup = fgMakeMultiUse(&op2, clsHnd);
+ GenTree* op2Dup = fgMakeMultiUse(&op2);
// op1Dup = Sse2.ShiftRightLogical128BitLane(op1Dup, 4)
- op1Dup = gtNewSimdHWIntrinsicNode(type, op1Dup, gtNewIconNode(4, TYP_INT),
- NI_SSE2_ShiftRightLogical128BitLane, simdBaseJitType,
- simdSize, isSimdAsHWIntrinsic);
+ op1Dup =
+ gtNewSimdHWIntrinsicNode(type, op1Dup, gtNewIconNode(4, TYP_INT),
+ NI_SSE2_ShiftRightLogical128BitLane, simdBaseJitType, simdSize);
// op2Dup = Sse2.ShiftRightLogical128BitLane(op2Dup, 4)
- op2Dup = gtNewSimdHWIntrinsicNode(type, op2Dup, gtNewIconNode(4, TYP_INT),
- NI_SSE2_ShiftRightLogical128BitLane, simdBaseJitType,
- simdSize, isSimdAsHWIntrinsic);
+ op2Dup =
+ gtNewSimdHWIntrinsicNode(type, op2Dup, gtNewIconNode(4, TYP_INT),
+ NI_SSE2_ShiftRightLogical128BitLane, simdBaseJitType, simdSize);
// op2Dup = Sse2.Multiply(op1Dup.AsUInt32(), op2Dup.AsUInt32()).AsInt32()
op2Dup = gtNewSimdHWIntrinsicNode(type, op1Dup, op2Dup, NI_SSE2_Multiply, CORINFO_TYPE_ULONG,
- simdSize, isSimdAsHWIntrinsic);
+ simdSize);
// op2Dup = Sse2.Shuffle(op2Dup, (0, 0, 2, 0))
- op2Dup =
- gtNewSimdHWIntrinsicNode(type, op2Dup, gtNewIconNode(SHUFFLE_XXZX, TYP_INT),
- NI_SSE2_Shuffle, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ op2Dup = gtNewSimdHWIntrinsicNode(type, op2Dup, gtNewIconNode(SHUFFLE_XXZX, TYP_INT),
+ NI_SSE2_Shuffle, simdBaseJitType, simdSize);
// op1 = Sse2.Multiply(op1.AsUInt32(), op2.AsUInt32()).AsInt32()
- op1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_SSE2_Multiply, CORINFO_TYPE_ULONG, simdSize,
- isSimdAsHWIntrinsic);
+ op1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_SSE2_Multiply, CORINFO_TYPE_ULONG, simdSize);
// op1 = Sse2.Shuffle(op1, (0, 0, 2, 0))
op1 = gtNewSimdHWIntrinsicNode(type, op1, gtNewIconNode(SHUFFLE_XXZX, TYP_INT), NI_SSE2_Shuffle,
- simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ simdBaseJitType, simdSize);
// op2 = op2Dup;
op2 = op2Dup;
if (varTypeIsArithmetic(op2))
{
- op2 = gtNewSimdCreateBroadcastNode(type, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ op2 = gtNewSimdCreateBroadcastNode(type, op2, simdBaseJitType, simdSize);
}
if ((simdSize == 8) && (simdBaseType == TYP_DOUBLE))
op2 = gtNewOperNode(GT_NEG, TYP_INT, op2);
}
- op2 = gtNewSimdCreateBroadcastNode(type, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ op2 = gtNewSimdCreateBroadcastNode(type, op2, simdBaseJitType, simdSize);
if ((simdSize == 8) && varTypeIsLong(simdBaseType))
{
{
if (scalarOp != nullptr)
{
- *scalarOp = gtNewSimdCreateBroadcastNode(type, *scalarOp, simdBaseJitType, simdSize,
- isSimdAsHWIntrinsic);
+ *scalarOp = gtNewSimdCreateBroadcastNode(type, *scalarOp, simdBaseJitType, simdSize);
}
intrinsic = NI_AdvSimd_Multiply;
break;
if (scalarOp != nullptr)
{
intrinsic = NI_AdvSimd_MultiplyByScalar;
- *scalarOp = gtNewSimdCreateScalarUnsafeNode(TYP_SIMD8, *scalarOp, simdBaseJitType, 8,
- isSimdAsHWIntrinsic);
+ *scalarOp = gtNewSimdCreateScalarUnsafeNode(TYP_SIMD8, *scalarOp, simdBaseJitType, 8);
}
else
{
if (scalarOp != nullptr)
{
intrinsic = NI_AdvSimd_Arm64_MultiplyByScalar;
- *scalarOp = gtNewSimdCreateScalarUnsafeNode(TYP_SIMD8, *scalarOp, simdBaseJitType, 8,
- isSimdAsHWIntrinsic);
+ *scalarOp = gtNewSimdCreateScalarUnsafeNode(TYP_SIMD8, *scalarOp, simdBaseJitType, 8);
}
else
{
}
assert(intrinsic != NI_Illegal);
- return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize);
}
-GenTree* Compiler::gtNewSimdCeilNode(
- var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
+GenTree* Compiler::gtNewSimdCeilNode(var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
#endif // !TARGET_XARCH && !TARGET_ARM64
assert(intrinsic != NI_Illegal);
- return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize);
}
-GenTree* Compiler::gtNewSimdCmpOpNode(genTreeOps op,
- var_types type,
- GenTree* op1,
- GenTree* op2,
- CorInfoType simdBaseJitType,
- unsigned simdSize,
- bool isSimdAsHWIntrinsic)
+GenTree* Compiler::gtNewSimdCmpOpNode(
+ genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
- NamedIntrinsic intrinsic = NI_Illegal;
- CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSimdOrHW(type, simdBaseJitType, isSimdAsHWIntrinsic);
+ NamedIntrinsic intrinsic = NI_Illegal;
switch (op)
{
// Shuffle is meant to swap the comparison results of low-32-bits and high 32-bits of
// respective long elements.
- GenTree* tmp =
- gtNewSimdCmpOpNode(op, type, op1, op2, CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
+ GenTree* tmp = gtNewSimdCmpOpNode(op, type, op1, op2, CORINFO_TYPE_INT, simdSize);
- op1 = fgMakeMultiUse(&tmp, clsHnd);
+ op1 = fgMakeMultiUse(&tmp);
op2 = gtNewSimdHWIntrinsicNode(type, op1, gtNewIconNode(SHUFFLE_ZWXY), NI_SSE2_Shuffle,
- CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
+ CORINFO_TYPE_INT, simdSize);
- return gtNewSimdBinOpNode(GT_AND, type, tmp, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdBinOpNode(GT_AND, type, tmp, op2, simdBaseJitType, simdSize);
}
}
else
if (!varTypeIsLong(simdBaseType))
{
assert(!varTypeIsFloating(simdBaseType));
- GenTree* op1Dup = fgMakeMultiUse(&op1, clsHnd);
+ GenTree* op1Dup = fgMakeMultiUse(&op1);
// EQ(Max(op1, op2), op1)
- GenTree* maxNode =
- gtNewSimdMaxNode(type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
- return gtNewSimdCmpOpNode(GT_EQ, type, maxNode, op1Dup, simdBaseJitType, simdSize,
- isSimdAsHWIntrinsic);
+ GenTree* maxNode = gtNewSimdMaxNode(type, op1, op2, simdBaseJitType, simdSize);
+ return gtNewSimdCmpOpNode(GT_EQ, type, maxNode, op1Dup, simdBaseJitType, simdSize);
}
}
//
// result = BitwiseOr(op1, op2)
- GenTree* op1Dup = fgMakeMultiUse(&op1, clsHnd);
- GenTree* op2Dup = fgMakeMultiUse(&op2, clsHnd);
+ GenTree* op1Dup = fgMakeMultiUse(&op1);
+ GenTree* op2Dup = fgMakeMultiUse(&op2);
- op1 = gtNewSimdCmpOpNode(GT_GT, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
- op2 = gtNewSimdCmpOpNode(GT_EQ, type, op1Dup, op2Dup, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ op1 = gtNewSimdCmpOpNode(GT_GT, type, op1, op2, simdBaseJitType, simdSize);
+ op2 = gtNewSimdCmpOpNode(GT_EQ, type, op1Dup, op2Dup, simdBaseJitType, simdSize);
- return gtNewSimdBinOpNode(GT_OR, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdBinOpNode(GT_OR, type, op1, op2, simdBaseJitType, simdSize);
}
break;
}
GenTree* vecCon2 = gtCloneExpr(vecCon1);
// op1 = op1 - constVector
- op1 = gtNewSimdBinOpNode(GT_SUB, type, op1, vecCon1, opJitType, simdSize, isSimdAsHWIntrinsic);
+ op1 = gtNewSimdBinOpNode(GT_SUB, type, op1, vecCon1, opJitType, simdSize);
// op2 = op2 - constVector
- op2 = gtNewSimdBinOpNode(GT_SUB, type, op2, vecCon2, opJitType, simdSize, isSimdAsHWIntrinsic);
+ op2 = gtNewSimdBinOpNode(GT_SUB, type, op2, vecCon2, opJitType, simdSize);
}
// This should have been mutated by the above path
//
// result = BitwiseOr(op1, op2)
- GenTree* op1Dup1 = fgMakeMultiUse(&op1, clsHnd);
- GenTree* op1Dup2 = fgMakeMultiUse(&op1Dup1, clsHnd);
+ GenTree* op1Dup1 = fgMakeMultiUse(&op1);
+ GenTree* op1Dup2 = fgMakeMultiUse(&op1Dup1);
- GenTree* op2Dup1 = fgMakeMultiUse(&op2, clsHnd);
- GenTree* op2Dup2 = fgMakeMultiUse(&op2Dup1, clsHnd);
+ GenTree* op2Dup1 = fgMakeMultiUse(&op2);
+ GenTree* op2Dup2 = fgMakeMultiUse(&op2Dup1);
- GenTree* t =
- gtNewSimdCmpOpNode(op, type, op1, op2, CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
- GenTree* u = gtNewSimdCmpOpNode(GT_EQ, type, op1Dup1, op2Dup1, CORINFO_TYPE_INT, simdSize,
- isSimdAsHWIntrinsic);
- GenTree* v = gtNewSimdCmpOpNode(op, type, op1Dup2, op2Dup2, CORINFO_TYPE_UINT, simdSize,
- isSimdAsHWIntrinsic);
+ GenTree* t = gtNewSimdCmpOpNode(op, type, op1, op2, CORINFO_TYPE_INT, simdSize);
+ GenTree* u = gtNewSimdCmpOpNode(GT_EQ, type, op1Dup1, op2Dup1, CORINFO_TYPE_INT, simdSize);
+ GenTree* v = gtNewSimdCmpOpNode(op, type, op1Dup2, op2Dup2, CORINFO_TYPE_UINT, simdSize);
op1 = gtNewSimdHWIntrinsicNode(type, t, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_SSE2_Shuffle,
- CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
+ CORINFO_TYPE_INT, simdSize);
u = gtNewSimdHWIntrinsicNode(type, u, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_SSE2_Shuffle,
- CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
+ CORINFO_TYPE_INT, simdSize);
v = gtNewSimdHWIntrinsicNode(type, v, gtNewIconNode(SHUFFLE_ZZXX, TYP_INT), NI_SSE2_Shuffle,
- CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
+ CORINFO_TYPE_INT, simdSize);
- op2 = gtNewSimdBinOpNode(GT_AND, type, u, v, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
- return gtNewSimdBinOpNode(GT_OR, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ op2 = gtNewSimdBinOpNode(GT_AND, type, u, v, simdBaseJitType, simdSize);
+ return gtNewSimdBinOpNode(GT_OR, type, op1, op2, simdBaseJitType, simdSize);
}
}
else
if (!varTypeIsLong(simdBaseType))
{
assert(!varTypeIsFloating(simdBaseType));
- GenTree* op1Dup = fgMakeMultiUse(&op1, clsHnd);
+ GenTree* op1Dup = fgMakeMultiUse(&op1);
// EQ(Min(op1, op2), op1)
- GenTree* minNode =
- gtNewSimdMinNode(type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
- return gtNewSimdCmpOpNode(GT_EQ, type, minNode, op1Dup, simdBaseJitType, simdSize,
- isSimdAsHWIntrinsic);
+ GenTree* minNode = gtNewSimdMinNode(type, op1, op2, simdBaseJitType, simdSize);
+ return gtNewSimdCmpOpNode(GT_EQ, type, minNode, op1Dup, simdBaseJitType, simdSize);
}
}
//
// result = BitwiseOr(op1, op2)
- GenTree* op1Dup = fgMakeMultiUse(&op1, clsHnd);
- GenTree* op2Dup = fgMakeMultiUse(&op2, clsHnd);
+ GenTree* op1Dup = fgMakeMultiUse(&op1);
+ GenTree* op2Dup = fgMakeMultiUse(&op2);
- op1 = gtNewSimdCmpOpNode(GT_LT, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
- op2 = gtNewSimdCmpOpNode(GT_EQ, type, op1Dup, op2Dup, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ op1 = gtNewSimdCmpOpNode(GT_LT, type, op1, op2, simdBaseJitType, simdSize);
+ op2 = gtNewSimdCmpOpNode(GT_EQ, type, op1Dup, op2Dup, simdBaseJitType, simdSize);
- return gtNewSimdBinOpNode(GT_OR, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdBinOpNode(GT_OR, type, op1, op2, simdBaseJitType, simdSize);
}
break;
}
GenTree* vecCon2 = gtCloneExpr(vecCon1);
// op1 = op1 - constVector
- op1 = gtNewSimdBinOpNode(GT_SUB, type, op1, vecCon1, opJitType, simdSize, isSimdAsHWIntrinsic);
+ op1 = gtNewSimdBinOpNode(GT_SUB, type, op1, vecCon1, opJitType, simdSize);
// op2 = op2 - constVector
- op2 = gtNewSimdBinOpNode(GT_SUB, type, op2, vecCon2, opJitType, simdSize, isSimdAsHWIntrinsic);
+ op2 = gtNewSimdBinOpNode(GT_SUB, type, op2, vecCon2, opJitType, simdSize);
}
// This should have been mutated by the above path
//
// result = BitwiseOr(op1, op2)
- GenTree* op1Dup1 = fgMakeMultiUse(&op1, clsHnd);
- GenTree* op1Dup2 = fgMakeMultiUse(&op1Dup1, clsHnd);
+ GenTree* op1Dup1 = fgMakeMultiUse(&op1);
+ GenTree* op1Dup2 = fgMakeMultiUse(&op1Dup1);
- GenTree* op2Dup1 = fgMakeMultiUse(&op2, clsHnd);
- GenTree* op2Dup2 = fgMakeMultiUse(&op2Dup1, clsHnd);
+ GenTree* op2Dup1 = fgMakeMultiUse(&op2);
+ GenTree* op2Dup2 = fgMakeMultiUse(&op2Dup1);
- GenTree* t =
- gtNewSimdCmpOpNode(op, type, op1, op2, CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
- GenTree* u = gtNewSimdCmpOpNode(GT_EQ, type, op1Dup1, op2Dup1, CORINFO_TYPE_INT, simdSize,
- isSimdAsHWIntrinsic);
- GenTree* v = gtNewSimdCmpOpNode(op, type, op1Dup2, op2Dup2, CORINFO_TYPE_UINT, simdSize,
- isSimdAsHWIntrinsic);
+ GenTree* t = gtNewSimdCmpOpNode(op, type, op1, op2, CORINFO_TYPE_INT, simdSize);
+ GenTree* u = gtNewSimdCmpOpNode(GT_EQ, type, op1Dup1, op2Dup1, CORINFO_TYPE_INT, simdSize);
+ GenTree* v = gtNewSimdCmpOpNode(op, type, op1Dup2, op2Dup2, CORINFO_TYPE_UINT, simdSize);
op1 = gtNewSimdHWIntrinsicNode(type, t, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_SSE2_Shuffle,
- CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
+ CORINFO_TYPE_INT, simdSize);
u = gtNewSimdHWIntrinsicNode(type, u, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_SSE2_Shuffle,
- CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
+ CORINFO_TYPE_INT, simdSize);
v = gtNewSimdHWIntrinsicNode(type, v, gtNewIconNode(SHUFFLE_ZZXX, TYP_INT), NI_SSE2_Shuffle,
- CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
+ CORINFO_TYPE_INT, simdSize);
- op2 = gtNewSimdBinOpNode(GT_AND, type, u, v, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
- return gtNewSimdBinOpNode(GT_OR, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ op2 = gtNewSimdBinOpNode(GT_AND, type, u, v, simdBaseJitType, simdSize);
+ return gtNewSimdBinOpNode(GT_OR, type, op1, op2, simdBaseJitType, simdSize);
}
}
else
}
assert(intrinsic != NI_Illegal);
- return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize);
}
-GenTree* Compiler::gtNewSimdCmpOpAllNode(genTreeOps op,
- var_types type,
- GenTree* op1,
- GenTree* op2,
- CorInfoType simdBaseJitType,
- unsigned simdSize,
- bool isSimdAsHWIntrinsic)
+GenTree* Compiler::gtNewSimdCmpOpAllNode(
+ genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(type == TYP_BOOL);
intrinsic = NI_Vector128_op_Equality;
}
- op1 = gtNewSimdCmpOpNode(op, simdType, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ op1 = gtNewSimdCmpOpNode(op, simdType, op1, op2, simdBaseJitType, simdSize);
op2 = gtNewAllBitsSetConNode(simdType);
if (simdBaseType == TYP_FLOAT)
intrinsic = NI_Vector128_op_Equality;
}
- op1 = gtNewSimdCmpOpNode(op, simdType, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ op1 = gtNewSimdCmpOpNode(op, simdType, op1, op2, simdBaseJitType, simdSize);
op2 = gtNewAllBitsSetConNode(simdType);
if (simdBaseType == TYP_FLOAT)
}
assert(intrinsic != NI_Illegal);
- return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize);
}
-GenTree* Compiler::gtNewSimdCmpOpAnyNode(genTreeOps op,
- var_types type,
- GenTree* op1,
- GenTree* op2,
- CorInfoType simdBaseJitType,
- unsigned simdSize,
- bool isSimdAsHWIntrinsic)
+GenTree* Compiler::gtNewSimdCmpOpAnyNode(
+ genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(type == TYP_BOOL);
intrinsic = NI_Vector128_op_Inequality;
}
- op1 = gtNewSimdCmpOpNode(op, simdType, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ op1 = gtNewSimdCmpOpNode(op, simdType, op1, op2, simdBaseJitType, simdSize);
op2 = gtNewZeroConNode(simdType);
if (simdBaseType == TYP_FLOAT)
intrinsic = (simdSize == 8) ? NI_Vector64_op_Inequality : NI_Vector128_op_Inequality;
- op1 = gtNewSimdCmpOpNode(op, simdType, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ op1 = gtNewSimdCmpOpNode(op, simdType, op1, op2, simdBaseJitType, simdSize);
op2 = gtNewZeroConNode(simdType);
if (simdBaseType == TYP_FLOAT)
}
assert(intrinsic != NI_Illegal);
- return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize);
}
-GenTree* Compiler::gtNewSimdCndSelNode(var_types type,
- GenTree* op1,
- GenTree* op2,
- GenTree* op3,
- CorInfoType simdBaseJitType,
- unsigned simdSize,
- bool isSimdAsHWIntrinsic)
+GenTree* Compiler::gtNewSimdCndSelNode(
+ var_types type, GenTree* op1, GenTree* op2, GenTree* op3, CorInfoType simdBaseJitType, unsigned simdSize)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
#if defined(TARGET_XARCH)
assert((simdSize != 32) || compIsaSupportedDebugOnly(InstructionSet_AVX));
intrinsic = (simdSize == 32) ? NI_Vector256_ConditionalSelect : NI_Vector128_ConditionalSelect;
- return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, intrinsic, simdBaseJitType, simdSize);
#elif defined(TARGET_ARM64)
- return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, NI_AdvSimd_BitwiseSelect, simdBaseJitType, simdSize,
- isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, NI_AdvSimd_BitwiseSelect, simdBaseJitType, simdSize);
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
// op1 - The value of broadcast to every element of the simd value
// simdBaseJitType - The base JIT type of SIMD type of the intrinsic
// simdSize - The size of the SIMD type of the intrinsic
-// isSimdAsHWIntrinsic - true if this is a SimdAsHWIntrinsic node; otherwise, false
//
// Returns:
// The created CreateBroadcast node
//
-GenTree* Compiler::gtNewSimdCreateBroadcastNode(
- var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
+GenTree* Compiler::gtNewSimdCreateBroadcastNode(var_types type,
+ GenTree* op1,
+ CorInfoType simdBaseJitType,
+ unsigned simdSize)
{
NamedIntrinsic hwIntrinsicID = NI_Vector128_Create;
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
- return gtNewSimdHWIntrinsicNode(type, op1, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, op1, hwIntrinsicID, simdBaseJitType, simdSize);
}
//----------------------------------------------------------------------------------------------
// op1 - The value of element 0 of the simd value
// simdBaseJitType - The base JIT type of SIMD type of the intrinsic
// simdSize - The size of the SIMD type of the intrinsic
-// isSimdAsHWIntrinsic - true if this is a SimdAsHWIntrinsic node; otherwise, false
//
// Returns:
// The created CreateScalar node
//
-GenTree* Compiler::gtNewSimdCreateScalarNode(
- var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
+GenTree* Compiler::gtNewSimdCreateScalarNode(var_types type,
+ GenTree* op1,
+ CorInfoType simdBaseJitType,
+ unsigned simdSize)
{
NamedIntrinsic hwIntrinsicID = NI_Vector128_CreateScalar;
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
- return gtNewSimdHWIntrinsicNode(type, op1, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, op1, hwIntrinsicID, simdBaseJitType, simdSize);
}
//----------------------------------------------------------------------------------------------
// op1 - The value of element 0 of the simd value
// simdBaseJitType - The base JIT type of SIMD type of the intrinsic
// simdSize - The size of the SIMD type of the intrinsic
-// isSimdAsHWIntrinsic - true if this is a SimdAsHWIntrinsic node; otherwise, false
//
// Returns:
// The created CreateScalarUnsafe node
// Remarks:
// This API is unsafe as it leaves the upper-bits of the vector undefined
//
-GenTree* Compiler::gtNewSimdCreateScalarUnsafeNode(
- var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
+GenTree* Compiler::gtNewSimdCreateScalarUnsafeNode(var_types type,
+ GenTree* op1,
+ CorInfoType simdBaseJitType,
+ unsigned simdSize)
{
NamedIntrinsic hwIntrinsicID = NI_Vector128_CreateScalarUnsafe;
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
- return gtNewSimdHWIntrinsicNode(type, op1, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, op1, hwIntrinsicID, simdBaseJitType, simdSize);
}
-GenTree* Compiler::gtNewSimdDotProdNode(var_types type,
- GenTree* op1,
- GenTree* op2,
- CorInfoType simdBaseJitType,
- unsigned simdSize,
- bool isSimdAsHWIntrinsic)
+GenTree* Compiler::gtNewSimdDotProdNode(
+ var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
#endif // !TARGET_XARCH && !TARGET_ARM64
assert(intrinsic != NI_Illegal);
- return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize);
}
-GenTree* Compiler::gtNewSimdFloorNode(
- var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
+GenTree* Compiler::gtNewSimdFloorNode(var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
#endif // !TARGET_XARCH && !TARGET_ARM64
assert(intrinsic != NI_Illegal);
- return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize);
}
-GenTree* Compiler::gtNewSimdGetElementNode(var_types type,
- GenTree* op1,
- GenTree* op2,
- CorInfoType simdBaseJitType,
- unsigned simdSize,
- bool isSimdAsHWIntrinsic)
+GenTree* Compiler::gtNewSimdGetElementNode(
+ var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize)
{
NamedIntrinsic intrinsicId = NI_Vector128_GetElement;
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
op2 = addRangeCheckForHWIntrinsic(op2, 0, immUpperBound);
}
- return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsicId, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsicId, simdBaseJitType, simdSize);
}
-GenTree* Compiler::gtNewSimdGetLowerNode(
- var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
+GenTree* Compiler::gtNewSimdGetLowerNode(var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize)
{
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
#endif // !TARGET_XARCH && !TARGET_ARM64
assert(intrinsicId != NI_Illegal);
- return gtNewSimdHWIntrinsicNode(type, op1, intrinsicId, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, op1, intrinsicId, simdBaseJitType, simdSize);
}
-GenTree* Compiler::gtNewSimdGetUpperNode(
- var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
+GenTree* Compiler::gtNewSimdGetUpperNode(var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize)
{
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
#endif // !TARGET_XARCH && !TARGET_ARM64
assert(intrinsicId != NI_Illegal);
- return gtNewSimdHWIntrinsicNode(type, op1, intrinsicId, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, op1, intrinsicId, simdBaseJitType, simdSize);
}
//----------------------------------------------------------------------------------------------
// op1 - The address of the value to be loaded
// simdBaseJitType - The base JIT type of SIMD type of the intrinsic
// simdSize - The size of the SIMD type of the intrinsic
-// isSimdAsHWIntrinsic - true if this is a SimdAsHWIntrinsic node; otherwise, false
//
// Returns:
// The created Load node
//
-GenTree* Compiler::gtNewSimdLoadNode(
- var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
+GenTree* Compiler::gtNewSimdLoadNode(var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize)
{
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
// op1 - The address of the value to be loaded
// simdBaseJitType - The base JIT type of SIMD type of the intrinsic
// simdSize - The size of the SIMD type of the intrinsic
-// isSimdAsHWIntrinsic - true if this is a SimdAsHWIntrinsic node; otherwise, false
//
// Returns:
// The created LoadAligned node
//
-GenTree* Compiler::gtNewSimdLoadAlignedNode(
- var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
+GenTree* Compiler::gtNewSimdLoadAlignedNode(var_types type,
+ GenTree* op1,
+ CorInfoType simdBaseJitType,
+ unsigned simdSize)
{
#if defined(TARGET_XARCH)
assert(IsBaselineSimdIsaSupportedDebugOnly());
}
assert(intrinsic != NI_Illegal);
- return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize);
#elif defined(TARGET_ARM64)
// ARM64 doesn't have aligned loads, but aligned loads are only validated to be
// aligned when optimizations are disable, so only skip the intrinsic handling
// if optimizations are enabled
assert(opts.OptimizationEnabled());
- return gtNewSimdLoadNode(type, op1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdLoadNode(type, op1, simdBaseJitType, simdSize);
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
// op1 - The address of the value to be loaded
// simdBaseJitType - The base JIT type of SIMD type of the intrinsic
// simdSize - The size of the SIMD type of the intrinsic
-// isSimdAsHWIntrinsic - true if this is a SimdAsHWIntrinsic node; otherwise, false
//
// Returns:
// The created LoadNonTemporal node
//
-GenTree* Compiler::gtNewSimdLoadNonTemporalNode(
- var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
+GenTree* Compiler::gtNewSimdLoadNonTemporalNode(var_types type,
+ GenTree* op1,
+ CorInfoType simdBaseJitType,
+ unsigned simdSize)
{
#if defined(TARGET_XARCH)
assert(IsBaselineSimdIsaSupportedDebugOnly());
}
assert(intrinsic != NI_Illegal);
- return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize);
#elif defined(TARGET_ARM64)
// ARM64 doesn't have aligned loads, but aligned loads are only validated to be
// aligned when optimizations are disable, so only skip the intrinsic handling
// if optimizations are enabled
assert(opts.OptimizationEnabled());
- return gtNewSimdLoadNode(type, op1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdLoadNode(type, op1, simdBaseJitType, simdSize);
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
}
-GenTree* Compiler::gtNewSimdMaxNode(var_types type,
- GenTree* op1,
- GenTree* op2,
- CorInfoType simdBaseJitType,
- unsigned simdSize,
- bool isSimdAsHWIntrinsic)
+GenTree* Compiler::gtNewSimdMaxNode(
+ var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
- NamedIntrinsic intrinsic = NI_Illegal;
- CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSimdOrHW(type, simdBaseJitType, isSimdAsHWIntrinsic);
+ NamedIntrinsic intrinsic = NI_Illegal;
#if defined(TARGET_XARCH)
if (simdSize == 32)
// op1 = op1 - constVector
// -or-
// op1 = op1 + constVector
- op1 = gtNewSimdBinOpNode(fixupOp1, type, op1, vecCon1, opJitType, simdSize, isSimdAsHWIntrinsic);
+ op1 = gtNewSimdBinOpNode(fixupOp1, type, op1, vecCon1, opJitType, simdSize);
// op2 = op2 - constVector
// -or-
// op2 = op2 + constVector
- op2 = gtNewSimdBinOpNode(fixupOp1, type, op2, vecCon2, opJitType, simdSize, isSimdAsHWIntrinsic);
+ op2 = gtNewSimdBinOpNode(fixupOp1, type, op2, vecCon2, opJitType, simdSize);
// op1 = Max(op1, op2)
- op1 = gtNewSimdMaxNode(type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ op1 = gtNewSimdMaxNode(type, op1, op2, simdBaseJitType, simdSize);
// result = op1 + constVector
// -or-
// result = op1 - constVector
- return gtNewSimdBinOpNode(fixupOp2, type, op1, vecCon3, opJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdBinOpNode(fixupOp2, type, op1, vecCon3, opJitType, simdSize);
}
case TYP_INT:
if (intrinsic != NI_Illegal)
{
- return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize);
}
- GenTree* op1Dup = fgMakeMultiUse(&op1, clsHnd);
- GenTree* op2Dup = fgMakeMultiUse(&op2, clsHnd);
+ GenTree* op1Dup = fgMakeMultiUse(&op1);
+ GenTree* op2Dup = fgMakeMultiUse(&op2);
// op1 = op1 > op2
- op1 = gtNewSimdCmpOpNode(GT_GT, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ op1 = gtNewSimdCmpOpNode(GT_GT, type, op1, op2, simdBaseJitType, simdSize);
// result = ConditionalSelect(op1, op1Dup, op2Dup)
- return gtNewSimdCndSelNode(type, op1, op1Dup, op2Dup, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdCndSelNode(type, op1, op1Dup, op2Dup, simdBaseJitType, simdSize);
}
-GenTree* Compiler::gtNewSimdMinNode(var_types type,
- GenTree* op1,
- GenTree* op2,
- CorInfoType simdBaseJitType,
- unsigned simdSize,
- bool isSimdAsHWIntrinsic)
+GenTree* Compiler::gtNewSimdMinNode(
+ var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
- NamedIntrinsic intrinsic = NI_Illegal;
- CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSimdOrHW(type, simdBaseJitType, isSimdAsHWIntrinsic);
+ NamedIntrinsic intrinsic = NI_Illegal;
#if defined(TARGET_XARCH)
if (simdSize == 32)
assert(opJitType != simdBaseJitType);
assert(opType != simdBaseType);
- GenTree* constVector =
- gtNewSimdCreateBroadcastNode(type, constVal, CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
+ GenTree* constVector = gtNewSimdCreateBroadcastNode(type, constVal, CORINFO_TYPE_INT, simdSize);
- GenTree* constVectorDup1 = fgMakeMultiUse(&constVector, clsHnd);
- GenTree* constVectorDup2 = fgMakeMultiUse(&constVectorDup1, clsHnd);
+ GenTree* constVectorDup1 = fgMakeMultiUse(&constVector);
+ GenTree* constVectorDup2 = fgMakeMultiUse(&constVectorDup1);
// op1 = op1 - constVector
// -or-
// op1 = op1 + constVector
- op1 = gtNewSimdBinOpNode(fixupOp1, type, op1, constVector, opJitType, simdSize, isSimdAsHWIntrinsic);
+ op1 = gtNewSimdBinOpNode(fixupOp1, type, op1, constVector, opJitType, simdSize);
// op2 = op2 - constVectorDup1
// -or-
// op2 = op2 + constVectorDup1
- op2 =
- gtNewSimdBinOpNode(fixupOp1, type, op2, constVectorDup1, opJitType, simdSize, isSimdAsHWIntrinsic);
+ op2 = gtNewSimdBinOpNode(fixupOp1, type, op2, constVectorDup1, opJitType, simdSize);
// op1 = Min(op1, op2)
- op1 = gtNewSimdMinNode(type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ op1 = gtNewSimdMinNode(type, op1, op2, simdBaseJitType, simdSize);
// result = op1 + constVectorDup2
// -or-
// result = op1 - constVectorDup2
- return gtNewSimdBinOpNode(fixupOp2, type, op1, constVectorDup2, opJitType, simdSize,
- isSimdAsHWIntrinsic);
+ return gtNewSimdBinOpNode(fixupOp2, type, op1, constVectorDup2, opJitType, simdSize);
}
case TYP_INT:
if (intrinsic != NI_Illegal)
{
- return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize);
}
- GenTree* op1Dup = fgMakeMultiUse(&op1, clsHnd);
- GenTree* op2Dup = fgMakeMultiUse(&op2, clsHnd);
+ GenTree* op1Dup = fgMakeMultiUse(&op1);
+ GenTree* op2Dup = fgMakeMultiUse(&op2);
// op1 = op1 < op2
- op1 = gtNewSimdCmpOpNode(GT_LT, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ op1 = gtNewSimdCmpOpNode(GT_LT, type, op1, op2, simdBaseJitType, simdSize);
// result = ConditionalSelect(op1, op1Dup, op2Dup)
- return gtNewSimdCndSelNode(type, op1, op1Dup, op2Dup, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdCndSelNode(type, op1, op1Dup, op2Dup, simdBaseJitType, simdSize);
}
-GenTree* Compiler::gtNewSimdNarrowNode(var_types type,
- GenTree* op1,
- GenTree* op2,
- CorInfoType simdBaseJitType,
- unsigned simdSize,
- bool isSimdAsHWIntrinsic)
+GenTree* Compiler::gtNewSimdNarrowNode(
+ var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
}
}
- tmp1 = gtNewSimdHWIntrinsicNode(tmpSimdType, op1, intrinsicId, opBaseJitType, simdSize, isSimdAsHWIntrinsic);
- tmp2 = gtNewSimdHWIntrinsicNode(tmpSimdType, op2, intrinsicId, opBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ tmp1 = gtNewSimdHWIntrinsicNode(tmpSimdType, op1, intrinsicId, opBaseJitType, simdSize);
+ tmp2 = gtNewSimdHWIntrinsicNode(tmpSimdType, op2, intrinsicId, opBaseJitType, simdSize);
if (simdSize == 16)
{
- return gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_SSE_MoveLowToHigh, CORINFO_TYPE_FLOAT, simdSize,
- isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_SSE_MoveLowToHigh, CORINFO_TYPE_FLOAT, simdSize);
}
intrinsicId = (simdSize == 64) ? NI_Vector256_ToVector512Unsafe : NI_Vector128_ToVector256Unsafe;
- tmp1 = gtNewSimdHWIntrinsicNode(type, tmp1, intrinsicId, simdBaseJitType, simdSize / 2, isSimdAsHWIntrinsic);
- return gtNewSimdWithUpperNode(type, tmp1, tmp2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ tmp1 = gtNewSimdHWIntrinsicNode(type, tmp1, intrinsicId, simdBaseJitType, simdSize / 2);
+ return gtNewSimdWithUpperNode(type, tmp1, tmp2, simdBaseJitType, simdSize);
}
else if (simdSize == 32)
{
// This is the same in principle to the other comments below, however due to
// code formatting, its too long to reasonably display here.
-
- CorInfoType opBaseJitType = (simdBaseType == TYP_BYTE) ? CORINFO_TYPE_SHORT : CORINFO_TYPE_USHORT;
- CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSimdOrHW(type, opBaseJitType, isSimdAsHWIntrinsic);
-
GenTreeVecCon* vecCon1 = gtNewVconNode(type);
for (unsigned i = 0; i < (simdSize / 8); i++)
GenTree* vecCon2 = gtCloneExpr(vecCon1);
- tmp1 = gtNewSimdBinOpNode(GT_AND, type, op1, vecCon1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
- tmp2 = gtNewSimdBinOpNode(GT_AND, type, op2, vecCon2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ tmp1 = gtNewSimdBinOpNode(GT_AND, type, op1, vecCon1, simdBaseJitType, simdSize);
+ tmp2 = gtNewSimdBinOpNode(GT_AND, type, op2, vecCon2, simdBaseJitType, simdSize);
tmp3 = gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_AVX2_PackUnsignedSaturate, CORINFO_TYPE_UBYTE,
- simdSize, isSimdAsHWIntrinsic);
+ simdSize);
CorInfoType permuteBaseJitType = (simdBaseType == TYP_BYTE) ? CORINFO_TYPE_LONG : CORINFO_TYPE_ULONG;
return gtNewSimdHWIntrinsicNode(type, tmp3, gtNewIconNode(SHUFFLE_WYZX), NI_AVX2_Permute4x64,
- permuteBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ permuteBaseJitType, simdSize);
}
case TYP_SHORT:
// var tmp3 = Avx2.PackUnsignedSaturate(tmp1, tmp2);
// return Avx2.Permute4x64(tmp3.AsUInt64(), SHUFFLE_WYZX).As<T>();
- CorInfoType opBaseJitType = (simdBaseType == TYP_SHORT) ? CORINFO_TYPE_INT : CORINFO_TYPE_UINT;
- CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSimdOrHW(type, opBaseJitType, isSimdAsHWIntrinsic);
-
GenTreeVecCon* vecCon1 = gtNewVconNode(type);
for (unsigned i = 0; i < (simdSize / 8); i++)
GenTree* vecCon2 = gtCloneExpr(vecCon1);
- tmp1 = gtNewSimdBinOpNode(GT_AND, type, op1, vecCon1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
- tmp2 = gtNewSimdBinOpNode(GT_AND, type, op2, vecCon2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ tmp1 = gtNewSimdBinOpNode(GT_AND, type, op1, vecCon1, simdBaseJitType, simdSize);
+ tmp2 = gtNewSimdBinOpNode(GT_AND, type, op2, vecCon2, simdBaseJitType, simdSize);
tmp3 = gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_AVX2_PackUnsignedSaturate, CORINFO_TYPE_USHORT,
- simdSize, isSimdAsHWIntrinsic);
+ simdSize);
CorInfoType permuteBaseJitType = (simdBaseType == TYP_BYTE) ? CORINFO_TYPE_LONG : CORINFO_TYPE_ULONG;
return gtNewSimdHWIntrinsicNode(type, tmp3, gtNewIconNode(SHUFFLE_WYZX), NI_AVX2_Permute4x64,
- permuteBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ permuteBaseJitType, simdSize);
}
case TYP_INT:
// var tmp3 = Avx2.UnpackLow(tmp1, tmp2);
// return Avx2.Permute4x64(tmp3.AsUInt64(), SHUFFLE_WYZX).AsUInt32();
- CorInfoType opBaseJitType = (simdBaseType == TYP_INT) ? CORINFO_TYPE_LONG : CORINFO_TYPE_ULONG;
- CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSimdOrHW(type, opBaseJitType, isSimdAsHWIntrinsic);
+ CorInfoType opBaseJitType = (simdBaseType == TYP_INT) ? CORINFO_TYPE_LONG : CORINFO_TYPE_ULONG;
- GenTree* op1Dup = fgMakeMultiUse(&op1, clsHnd);
- GenTree* op2Dup = fgMakeMultiUse(&op2, clsHnd);
+ GenTree* op1Dup = fgMakeMultiUse(&op1);
+ GenTree* op2Dup = fgMakeMultiUse(&op2);
- tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_AVX2_UnpackLow, simdBaseJitType, simdSize,
- isSimdAsHWIntrinsic);
- tmp2 = gtNewSimdHWIntrinsicNode(type, op1Dup, op2Dup, NI_AVX2_UnpackHigh, simdBaseJitType, simdSize,
- isSimdAsHWIntrinsic);
- tmp3 = gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_AVX2_UnpackLow, simdBaseJitType, simdSize,
- isSimdAsHWIntrinsic);
+ tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_AVX2_UnpackLow, simdBaseJitType, simdSize);
+ tmp2 = gtNewSimdHWIntrinsicNode(type, op1Dup, op2Dup, NI_AVX2_UnpackHigh, simdBaseJitType, simdSize);
+ tmp3 = gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_AVX2_UnpackLow, simdBaseJitType, simdSize);
return gtNewSimdHWIntrinsicNode(type, tmp3, gtNewIconNode(SHUFFLE_WYZX), NI_AVX2_Permute4x64,
- opBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ opBaseJitType, simdSize);
}
case TYP_FLOAT:
CorInfoType opBaseJitType = CORINFO_TYPE_DOUBLE;
- tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_AVX_ConvertToVector128Single, opBaseJitType,
- simdSize, isSimdAsHWIntrinsic);
- tmp2 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op2, NI_AVX_ConvertToVector128Single, opBaseJitType,
- simdSize, isSimdAsHWIntrinsic);
+ tmp1 =
+ gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_AVX_ConvertToVector128Single, opBaseJitType, simdSize);
+ tmp2 =
+ gtNewSimdHWIntrinsicNode(TYP_SIMD16, op2, NI_AVX_ConvertToVector128Single, opBaseJitType, simdSize);
- tmp1 = gtNewSimdHWIntrinsicNode(type, tmp1, NI_Vector128_ToVector256Unsafe, simdBaseJitType, 16,
- isSimdAsHWIntrinsic);
- return gtNewSimdWithUpperNode(type, tmp1, tmp2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ tmp1 = gtNewSimdHWIntrinsicNode(type, tmp1, NI_Vector128_ToVector256Unsafe, simdBaseJitType, 16);
+ return gtNewSimdWithUpperNode(type, tmp1, tmp2, simdBaseJitType, simdSize);
}
default:
// var tmp2 = Sse2.And(op2.AsSByte(), vcns);
// return Sse2.PackUnsignedSaturate(tmp1, tmp2).As<T>();
- CorInfoType opBaseJitType = (simdBaseType == TYP_BYTE) ? CORINFO_TYPE_SHORT : CORINFO_TYPE_USHORT;
- CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSimdOrHW(type, opBaseJitType, isSimdAsHWIntrinsic);
-
GenTreeVecCon* vecCon1 = gtNewVconNode(type);
for (unsigned i = 0; i < (simdSize / 8); i++)
GenTree* vecCon2 = gtCloneExpr(vecCon1);
- tmp1 = gtNewSimdBinOpNode(GT_AND, type, op1, vecCon1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
- tmp2 = gtNewSimdBinOpNode(GT_AND, type, op2, vecCon2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ tmp1 = gtNewSimdBinOpNode(GT_AND, type, op1, vecCon1, simdBaseJitType, simdSize);
+ tmp2 = gtNewSimdBinOpNode(GT_AND, type, op2, vecCon2, simdBaseJitType, simdSize);
return gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_SSE2_PackUnsignedSaturate, CORINFO_TYPE_UBYTE,
- simdSize, isSimdAsHWIntrinsic);
+ simdSize);
}
case TYP_SHORT:
// op2 = Elements 4, 5, 6, 7; 4L, 4U, 5L, 5U, 6L, 6U, 7L, 7U
//
// ...
-
- CorInfoType opBaseJitType = (simdBaseType == TYP_SHORT) ? CORINFO_TYPE_INT : CORINFO_TYPE_UINT;
- CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSimdOrHW(type, opBaseJitType, isSimdAsHWIntrinsic);
-
if (compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
// ...
GenTree* vecCon2 = gtCloneExpr(vecCon1);
- tmp1 =
- gtNewSimdBinOpNode(GT_AND, type, op1, vecCon1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
- tmp2 =
- gtNewSimdBinOpNode(GT_AND, type, op2, vecCon2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ tmp1 = gtNewSimdBinOpNode(GT_AND, type, op1, vecCon1, simdBaseJitType, simdSize);
+ tmp2 = gtNewSimdBinOpNode(GT_AND, type, op2, vecCon2, simdBaseJitType, simdSize);
return gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_SSE41_PackUnsignedSaturate,
- CORINFO_TYPE_USHORT, simdSize, isSimdAsHWIntrinsic);
+ CORINFO_TYPE_USHORT, simdSize);
}
else
{
// var tmp4 = Sse2.UnpackHigh(tmp1, tmp2);
// return Sse2.UnpackLow(tmp3, tmp4).As<T>();
- GenTree* op1Dup = fgMakeMultiUse(&op1, clsHnd);
- GenTree* op2Dup = fgMakeMultiUse(&op2, clsHnd);
-
- tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_SSE2_UnpackLow, simdBaseJitType, simdSize,
- isSimdAsHWIntrinsic);
- tmp2 = gtNewSimdHWIntrinsicNode(type, op1Dup, op2Dup, NI_SSE2_UnpackHigh, simdBaseJitType, simdSize,
- isSimdAsHWIntrinsic);
+ GenTree* op1Dup = fgMakeMultiUse(&op1);
+ GenTree* op2Dup = fgMakeMultiUse(&op2);
- clsHnd = gtGetStructHandleForSimdOrHW(type, simdBaseJitType, isSimdAsHWIntrinsic);
+ tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_SSE2_UnpackLow, simdBaseJitType, simdSize);
+ tmp2 =
+ gtNewSimdHWIntrinsicNode(type, op1Dup, op2Dup, NI_SSE2_UnpackHigh, simdBaseJitType, simdSize);
- GenTree* tmp1Dup = fgMakeMultiUse(&tmp1, clsHnd);
- GenTree* tmp2Dup = fgMakeMultiUse(&tmp2, clsHnd);
+ GenTree* tmp1Dup = fgMakeMultiUse(&tmp1);
+ GenTree* tmp2Dup = fgMakeMultiUse(&tmp2);
- tmp3 = gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_SSE2_UnpackLow, simdBaseJitType, simdSize,
- isSimdAsHWIntrinsic);
- tmp4 = gtNewSimdHWIntrinsicNode(type, tmp1Dup, tmp2Dup, NI_SSE2_UnpackHigh, simdBaseJitType,
- simdSize, isSimdAsHWIntrinsic);
+ tmp3 = gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_SSE2_UnpackLow, simdBaseJitType, simdSize);
+ tmp4 =
+ gtNewSimdHWIntrinsicNode(type, tmp1Dup, tmp2Dup, NI_SSE2_UnpackHigh, simdBaseJitType, simdSize);
- return gtNewSimdHWIntrinsicNode(type, tmp3, tmp4, NI_SSE2_UnpackLow, simdBaseJitType, simdSize,
- isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, tmp3, tmp4, NI_SSE2_UnpackLow, simdBaseJitType, simdSize);
}
}
// var tmp2 = Sse2.UnpackHigh(op1.AsUInt32(), op2.AsUInt32());
// return Sse2.UnpackLow(tmp1, tmp2).As<T>();
- CorInfoType opBaseJitType = (simdBaseType == TYP_INT) ? CORINFO_TYPE_LONG : CORINFO_TYPE_ULONG;
- CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSimdOrHW(type, opBaseJitType, isSimdAsHWIntrinsic);
+ GenTree* op1Dup = fgMakeMultiUse(&op1);
+ GenTree* op2Dup = fgMakeMultiUse(&op2);
- GenTree* op1Dup = fgMakeMultiUse(&op1, clsHnd);
- GenTree* op2Dup = fgMakeMultiUse(&op2, clsHnd);
+ tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_SSE2_UnpackLow, simdBaseJitType, simdSize);
+ tmp2 = gtNewSimdHWIntrinsicNode(type, op1Dup, op2Dup, NI_SSE2_UnpackHigh, simdBaseJitType, simdSize);
- tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_SSE2_UnpackLow, simdBaseJitType, simdSize,
- isSimdAsHWIntrinsic);
- tmp2 = gtNewSimdHWIntrinsicNode(type, op1Dup, op2Dup, NI_SSE2_UnpackHigh, simdBaseJitType, simdSize,
- isSimdAsHWIntrinsic);
-
- return gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_SSE2_UnpackLow, simdBaseJitType, simdSize,
- isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_SSE2_UnpackLow, simdBaseJitType, simdSize);
}
case TYP_FLOAT:
CorInfoType opBaseJitType = CORINFO_TYPE_DOUBLE;
- tmp1 = gtNewSimdHWIntrinsicNode(type, op1, NI_SSE2_ConvertToVector128Single, opBaseJitType, simdSize,
- isSimdAsHWIntrinsic);
- tmp2 = gtNewSimdHWIntrinsicNode(type, op2, NI_SSE2_ConvertToVector128Single, opBaseJitType, simdSize,
- isSimdAsHWIntrinsic);
+ tmp1 = gtNewSimdHWIntrinsicNode(type, op1, NI_SSE2_ConvertToVector128Single, opBaseJitType, simdSize);
+ tmp2 = gtNewSimdHWIntrinsicNode(type, op2, NI_SSE2_ConvertToVector128Single, opBaseJitType, simdSize);
- return gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_SSE_MoveLowToHigh, simdBaseJitType, simdSize,
- isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_SSE_MoveLowToHigh, simdBaseJitType, simdSize);
}
default:
// var tmp1 = AdvSimd.Arm64.ConvertToSingleLower(op1);
// return AdvSimd.Arm64.ConvertToSingleUpper(tmp1, op2);
- tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_ConvertToSingleLower, simdBaseJitType, 8,
- isSimdAsHWIntrinsic);
+ tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_ConvertToSingleLower, simdBaseJitType, 8);
return gtNewSimdHWIntrinsicNode(type, tmp1, op2, NI_AdvSimd_Arm64_ConvertToSingleUpper, simdBaseJitType,
- simdSize, isSimdAsHWIntrinsic);
+ simdSize);
}
else
{
// var tmp1 = AdvSimd.ExtractNarrowingLower(op1);
// return AdvSimd.ExtractNarrowingUpper(tmp1, op2);
- tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_ExtractNarrowingLower, simdBaseJitType, 8,
- isSimdAsHWIntrinsic);
+ tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_ExtractNarrowingLower, simdBaseJitType, 8);
return gtNewSimdHWIntrinsicNode(type, tmp1, op2, NI_AdvSimd_ExtractNarrowingUpper, simdBaseJitType,
- simdSize, isSimdAsHWIntrinsic);
+ simdSize);
}
}
else if (varTypeIsFloating(simdBaseType))
CorInfoType tmp2BaseJitType = CORINFO_TYPE_DOUBLE;
- tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector64_ToVector128Unsafe, simdBaseJitType, simdSize,
- isSimdAsHWIntrinsic);
- tmp2 = gtNewSimdWithUpperNode(TYP_SIMD16, tmp1, op2, tmp2BaseJitType, 16, isSimdAsHWIntrinsic);
+ tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector64_ToVector128Unsafe, simdBaseJitType, simdSize);
+ tmp2 = gtNewSimdWithUpperNode(TYP_SIMD16, tmp1, op2, tmp2BaseJitType, 16);
- return gtNewSimdHWIntrinsicNode(type, tmp2, NI_AdvSimd_Arm64_ConvertToSingleLower, simdBaseJitType, simdSize,
- isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, tmp2, NI_AdvSimd_Arm64_ConvertToSingleLower, simdBaseJitType, simdSize);
}
else
{
CorInfoType tmp2BaseJitType = varTypeIsSigned(simdBaseType) ? CORINFO_TYPE_LONG : CORINFO_TYPE_ULONG;
- tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector64_ToVector128Unsafe, simdBaseJitType, simdSize,
- isSimdAsHWIntrinsic);
- tmp2 = gtNewSimdWithUpperNode(TYP_SIMD16, tmp1, op2, tmp2BaseJitType, 16, isSimdAsHWIntrinsic);
+ tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector64_ToVector128Unsafe, simdBaseJitType, simdSize);
+ tmp2 = gtNewSimdWithUpperNode(TYP_SIMD16, tmp1, op2, tmp2BaseJitType, 16);
- return gtNewSimdHWIntrinsicNode(type, tmp2, NI_AdvSimd_ExtractNarrowingLower, simdBaseJitType, simdSize,
- isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, tmp2, NI_AdvSimd_ExtractNarrowingLower, simdBaseJitType, simdSize);
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
}
-GenTree* Compiler::gtNewSimdShuffleNode(var_types type,
- GenTree* op1,
- GenTree* op2,
- CorInfoType simdBaseJitType,
- unsigned simdSize,
- bool isSimdAsHWIntrinsic)
+GenTree* Compiler::gtNewSimdShuffleNode(
+ var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
// If we aren't crossing lanes, then we can decompose the byte/sbyte
// and short/ushort operations into 2x 128-bit operations
- CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSimdOrHW(type, simdBaseJitType, isSimdAsHWIntrinsic);
-
// We want to build what is essentially the following managed code:
// var op1Lower = op1.GetLower();
// op1Lower = Ssse3.Shuffle(op1Lower, Vector128.Create(...));
simdBaseJitType = varTypeIsUnsigned(simdBaseType) ? CORINFO_TYPE_UBYTE : CORINFO_TYPE_BYTE;
- GenTree* op1Dup = fgMakeMultiUse(&op1, clsHnd);
- GenTree* op1Lower = gtNewSimdGetLowerNode(TYP_SIMD16, op1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ GenTree* op1Dup = fgMakeMultiUse(&op1);
+ GenTree* op1Lower = gtNewSimdGetLowerNode(TYP_SIMD16, op1, simdBaseJitType, simdSize);
op2 = gtNewVconNode(TYP_SIMD16);
op2->AsVecCon()->gtSimd16Val = vecCns.v128[0];
- op1Lower = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1Lower, op2, NI_SSSE3_Shuffle, simdBaseJitType, 16,
- isSimdAsHWIntrinsic);
+ op1Lower = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1Lower, op2, NI_SSSE3_Shuffle, simdBaseJitType, 16);
- GenTree* op1Upper =
- gtNewSimdGetUpperNode(TYP_SIMD16, op1Dup, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ GenTree* op1Upper = gtNewSimdGetUpperNode(TYP_SIMD16, op1Dup, simdBaseJitType, simdSize);
op2 = gtNewVconNode(TYP_SIMD16);
op2->AsVecCon()->gtSimd16Val = vecCns.v128[1];
- op1Upper = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1Upper, op2, NI_SSSE3_Shuffle, simdBaseJitType, 16,
- isSimdAsHWIntrinsic);
+ op1Upper = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1Upper, op2, NI_SSSE3_Shuffle, simdBaseJitType, 16);
- return gtNewSimdWithUpperNode(type, op1Lower, op1Upper, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdWithUpperNode(type, op1Lower, op1Upper, simdBaseJitType, simdSize);
}
if (elementSize == 4)
op2->AsVecCon()->gtSimdVal = vecCns;
// swap the operands to match the encoding requirements
- retNode = gtNewSimdHWIntrinsicNode(type, op2, op1, NI_AVX2_PermuteVar8x32, simdBaseJitType, simdSize,
- isSimdAsHWIntrinsic);
+ retNode = gtNewSimdHWIntrinsicNode(type, op2, op1, NI_AVX2_PermuteVar8x32, simdBaseJitType, simdSize);
}
else
{
assert(elementSize == 8);
cnsNode = gtNewIconNode(control);
- retNode = gtNewSimdHWIntrinsicNode(type, op1, cnsNode, NI_AVX2_Permute4x64, simdBaseJitType, simdSize,
- isSimdAsHWIntrinsic);
+ retNode = gtNewSimdHWIntrinsicNode(type, op1, cnsNode, NI_AVX2_Permute4x64, simdBaseJitType, simdSize);
}
}
else
op2 = gtNewVconNode(type);
op2->AsVecCon()->gtSimd16Val = vecCns.v128[0];
- return gtNewSimdHWIntrinsicNode(type, op1, op2, NI_SSSE3_Shuffle, simdBaseJitType, simdSize,
- isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, op1, op2, NI_SSSE3_Shuffle, simdBaseJitType, simdSize);
}
if (varTypeIsLong(simdBaseType))
if (varTypeIsIntegral(simdBaseType))
{
- retNode = gtNewSimdHWIntrinsicNode(type, op1, cnsNode, NI_SSE2_Shuffle, simdBaseJitType, simdSize,
- isSimdAsHWIntrinsic);
+ retNode = gtNewSimdHWIntrinsicNode(type, op1, cnsNode, NI_SSE2_Shuffle, simdBaseJitType, simdSize);
}
else if (compOpportunisticallyDependsOn(InstructionSet_AVX))
{
- retNode = gtNewSimdHWIntrinsicNode(type, op1, cnsNode, NI_AVX_Permute, simdBaseJitType, simdSize,
- isSimdAsHWIntrinsic);
+ retNode = gtNewSimdHWIntrinsicNode(type, op1, cnsNode, NI_AVX_Permute, simdBaseJitType, simdSize);
}
else
{
- CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSimdOrHW(type, simdBaseJitType, isSimdAsHWIntrinsic);
// for double we need SSE2, but we can't use the integral path ^ because we still need op1Dup here
NamedIntrinsic ni = simdBaseType == TYP_DOUBLE ? NI_SSE2_Shuffle : NI_SSE_Shuffle;
- GenTree* op1Dup = fgMakeMultiUse(&op1, clsHnd);
- retNode = gtNewSimdHWIntrinsicNode(type, op1, op1Dup, cnsNode, ni, simdBaseJitType, simdSize,
- isSimdAsHWIntrinsic);
+ GenTree* op1Dup = fgMakeMultiUse(&op1);
+ retNode = gtNewSimdHWIntrinsicNode(type, op1, op1Dup, cnsNode, ni, simdBaseJitType, simdSize);
}
}
op2->AsVecCon()->gtSimd16Val = mskCns.v128[0];
GenTree* zero = gtNewZeroConNode(type);
- retNode = gtNewSimdCndSelNode(type, op2, retNode, zero, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ retNode = gtNewSimdCndSelNode(type, op2, retNode, zero, simdBaseJitType, simdSize);
}
return retNode;
{
lookupIntrinsic = NI_AdvSimd_Arm64_VectorTableLookup;
- op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector64_ToVector128, simdBaseJitType, simdSize,
- isSimdAsHWIntrinsic);
+ op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector64_ToVector128, simdBaseJitType, simdSize);
}
// VectorTableLookup is only valid on byte/sbyte
op2 = gtNewVconNode(type);
op2->AsVecCon()->gtSimdVal = vecCns;
- return gtNewSimdHWIntrinsicNode(type, op1, op2, lookupIntrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
-
+ return gtNewSimdHWIntrinsicNode(type, op1, op2, lookupIntrinsic, simdBaseJitType, simdSize);
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
}
-GenTree* Compiler::gtNewSimdSqrtNode(
- var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
+GenTree* Compiler::gtNewSimdSqrtNode(var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
#endif // !TARGET_XARCH && !TARGET_ARM64
assert(intrinsic != NI_Illegal);
- return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize);
}
//----------------------------------------------------------------------------------------------
// op2 - The SIMD value to be stored at op1
// simdBaseJitType - The base JIT type of SIMD type of the intrinsic
// simdSize - The size of the SIMD type of the intrinsic
-// isSimdAsHWIntrinsic - true if this is a SimdAsHWIntrinsic node; otherwise, false
//
// Returns:
// The created Store node
//
-GenTree* Compiler::gtNewSimdStoreNode(
- GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
+GenTree* Compiler::gtNewSimdStoreNode(GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize)
{
assert(op1 != nullptr);
assert(op2 != nullptr);
// op2 - The SIMD value to be stored at op1
// simdBaseJitType - The base JIT type of SIMD type of the intrinsic
// simdSize - The size of the SIMD type of the intrinsic
-// isSimdAsHWIntrinsic - true if this is a SimdAsHWIntrinsic node; otherwise, false
//
// Returns:
// The created StoreAligned node
//
-GenTree* Compiler::gtNewSimdStoreAlignedNode(
- GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
+GenTree* Compiler::gtNewSimdStoreAlignedNode(GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize)
{
#if defined(TARGET_XARCH)
assert(IsBaselineSimdIsaSupportedDebugOnly());
intrinsic = NI_SSE_StoreAligned;
}
- return gtNewSimdHWIntrinsicNode(TYP_VOID, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(TYP_VOID, op1, op2, intrinsic, simdBaseJitType, simdSize);
#elif defined(TARGET_ARM64)
// ARM64 doesn't have aligned stores, but aligned stores are only validated to be
// aligned when optimizations are disable, so only skip the intrinsic handling
// if optimizations are enabled
assert(opts.OptimizationEnabled());
- return gtNewSimdStoreNode(op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdStoreNode(op1, op2, simdBaseJitType, simdSize);
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
// op2 - The SIMD value to be stored at op1
// simdBaseJitType - The base JIT type of SIMD type of the intrinsic
// simdSize - The size of the SIMD type of the intrinsic
-// isSimdAsHWIntrinsic - true if this is a SimdAsHWIntrinsic node; otherwise, false
//
// Returns:
// The created StoreNonTemporal node
//
-GenTree* Compiler::gtNewSimdStoreNonTemporalNode(
- GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
+GenTree* Compiler::gtNewSimdStoreNonTemporalNode(GenTree* op1,
+ GenTree* op2,
+ CorInfoType simdBaseJitType,
+ unsigned simdSize)
{
#if defined(TARGET_XARCH)
assert(IsBaselineSimdIsaSupportedDebugOnly());
intrinsic = NI_SSE_StoreAlignedNonTemporal;
}
- return gtNewSimdHWIntrinsicNode(TYP_VOID, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(TYP_VOID, op1, op2, intrinsic, simdBaseJitType, simdSize);
#elif defined(TARGET_ARM64)
// ARM64 doesn't have aligned stores, but aligned stores are only validated to be
// aligned when optimizations are disable, so only skip the intrinsic handling
// if optimizations are enabled
assert(opts.OptimizationEnabled());
- return gtNewSimdStoreNode(op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdStoreNode(op1, op2, simdBaseJitType, simdSize);
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
}
-GenTree* Compiler::gtNewSimdSumNode(
- var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
+GenTree* Compiler::gtNewSimdSumNode(var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
- NamedIntrinsic intrinsic = NI_Illegal;
- GenTree* tmp = nullptr;
- CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSimdOrHW(simdType, simdBaseJitType, isSimdAsHWIntrinsic);
+ NamedIntrinsic intrinsic = NI_Illegal;
+ GenTree* tmp = nullptr;
#if defined(TARGET_XARCH)
assert(!varTypeIsByte(simdBaseType) && !varTypeIsLong(simdBaseType));
for (int i = 0; i < haddCount; i++)
{
- tmp = fgMakeMultiUse(&op1, clsHnd);
- op1 = gtNewSimdHWIntrinsicNode(simdType, op1, tmp, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ tmp = fgMakeMultiUse(&op1);
+ op1 = gtNewSimdHWIntrinsicNode(simdType, op1, tmp, intrinsic, simdBaseJitType, simdSize);
}
if (simdSize == 32)
{
intrinsic = (simdBaseType == TYP_FLOAT) ? NI_SSE_Add : NI_SSE2_Add;
- tmp = fgMakeMultiUse(&op1, clsHnd);
- op1 = gtNewSimdGetUpperNode(TYP_SIMD16, op1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ tmp = fgMakeMultiUse(&op1);
+ op1 = gtNewSimdGetUpperNode(TYP_SIMD16, op1, simdBaseJitType, simdSize);
- tmp = gtNewSimdGetLowerNode(TYP_SIMD16, tmp, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
- op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, tmp, intrinsic, simdBaseJitType, 16, isSimdAsHWIntrinsic);
+ tmp = gtNewSimdGetLowerNode(TYP_SIMD16, tmp, simdBaseJitType, simdSize);
+ op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, tmp, intrinsic, simdBaseJitType, 16);
}
- return gtNewSimdHWIntrinsicNode(type, op1, NI_Vector128_ToScalar, simdBaseJitType, 16, isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, op1, NI_Vector128_ToScalar, simdBaseJitType, 16);
#elif defined(TARGET_ARM64)
switch (simdBaseType)
{
case TYP_SHORT:
case TYP_USHORT:
{
- tmp = gtNewSimdHWIntrinsicNode(simdType, op1, NI_AdvSimd_Arm64_AddAcross, simdBaseJitType, simdSize,
- isSimdAsHWIntrinsic);
- return gtNewSimdHWIntrinsicNode(type, tmp, NI_Vector64_ToScalar, simdBaseJitType, 8, isSimdAsHWIntrinsic);
+ tmp = gtNewSimdHWIntrinsicNode(simdType, op1, NI_AdvSimd_Arm64_AddAcross, simdBaseJitType, simdSize);
+ return gtNewSimdHWIntrinsicNode(type, tmp, NI_Vector64_ToScalar, simdBaseJitType, 8);
}
case TYP_INT:
{
if (simdSize == 8)
{
- tmp = fgMakeMultiUse(&op1, clsHnd);
- tmp = gtNewSimdHWIntrinsicNode(simdType, op1, tmp, NI_AdvSimd_AddPairwise, simdBaseJitType, simdSize,
- isSimdAsHWIntrinsic);
+ tmp = fgMakeMultiUse(&op1);
+ tmp = gtNewSimdHWIntrinsicNode(simdType, op1, tmp, NI_AdvSimd_AddPairwise, simdBaseJitType, simdSize);
}
else
{
- tmp = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_AdvSimd_Arm64_AddAcross, simdBaseJitType, 16,
- isSimdAsHWIntrinsic);
+ tmp = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_AdvSimd_Arm64_AddAcross, simdBaseJitType, 16);
}
- return gtNewSimdHWIntrinsicNode(type, tmp, NI_Vector64_ToScalar, simdBaseJitType, 8, isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, tmp, NI_Vector64_ToScalar, simdBaseJitType, 8);
}
case TYP_FLOAT:
if (simdSize == 8)
{
op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_AddPairwiseScalar, simdBaseJitType,
- simdSize, isSimdAsHWIntrinsic);
+ simdSize);
}
else
{
for (int i = 0; i < haddCount; i++)
{
- tmp = fgMakeMultiUse(&op1, clsHnd);
+ tmp = fgMakeMultiUse(&op1);
op1 = gtNewSimdHWIntrinsicNode(simdType, op1, tmp, NI_AdvSimd_Arm64_AddPairwise, simdBaseJitType,
- simdSize, isSimdAsHWIntrinsic);
+ simdSize);
}
}
- return gtNewSimdHWIntrinsicNode(type, op1, NI_Vector128_ToScalar, simdBaseJitType, simdSize,
- isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, op1, NI_Vector128_ToScalar, simdBaseJitType, simdSize);
}
case TYP_DOUBLE:
if (simdSize == 16)
{
op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_AddPairwiseScalar, simdBaseJitType,
- simdSize, isSimdAsHWIntrinsic);
+ simdSize);
}
- return gtNewSimdHWIntrinsicNode(type, op1, NI_Vector64_ToScalar, simdBaseJitType, 8, isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, op1, NI_Vector64_ToScalar, simdBaseJitType, 8);
}
default:
{
#endif // !TARGET_XARCH && !TARGET_ARM64
}
-GenTree* Compiler::gtNewSimdUnOpNode(genTreeOps op,
- var_types type,
- GenTree* op1,
- CorInfoType simdBaseJitType,
- unsigned simdSize,
- bool isSimdAsHWIntrinsic)
+GenTree* Compiler::gtNewSimdUnOpNode(
+ genTreeOps op, var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
op2 = gtNewZeroConNode(type);
// Zero - op1
- return gtNewSimdBinOpNode(GT_SUB, type, op2, op1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdBinOpNode(GT_SUB, type, op2, op1, simdBaseJitType, simdSize);
}
case GT_NOT:
}
op2 = gtNewAllBitsSetConNode(type);
- return gtNewSimdBinOpNode(GT_XOR, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdBinOpNode(GT_XOR, type, op1, op2, simdBaseJitType, simdSize);
}
#elif defined(TARGET_ARM64)
case GT_NEG:
intrinsic = NI_AdvSimd_Negate;
}
- return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize);
}
else
{
// Zero - op1
op2 = gtNewZeroConNode(type);
- return gtNewSimdBinOpNode(GT_SUB, type, op2, op1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdBinOpNode(GT_SUB, type, op2, op1, simdBaseJitType, simdSize);
}
}
case GT_NOT:
{
- return gtNewSimdHWIntrinsicNode(type, op1, NI_AdvSimd_Not, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, op1, NI_AdvSimd_Not, simdBaseJitType, simdSize);
}
#else
#error Unsupported platform
}
}
-GenTree* Compiler::gtNewSimdWidenLowerNode(
- var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
+GenTree* Compiler::gtNewSimdWidenLowerNode(var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
{
assert(IsBaselineVector512IsaSupportedDebugOnly());
- tmp1 = gtNewSimdGetLowerNode(TYP_SIMD32, op1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ tmp1 = gtNewSimdGetLowerNode(TYP_SIMD32, op1, simdBaseJitType, simdSize);
switch (simdBaseType)
{
}
assert(intrinsic != NI_Illegal);
- return gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, simdSize);
}
else if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
assert(!varTypeIsIntegral(simdBaseType) || compIsaSupportedDebugOnly(InstructionSet_AVX2));
- tmp1 = gtNewSimdGetLowerNode(TYP_SIMD16, op1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ tmp1 = gtNewSimdGetLowerNode(TYP_SIMD16, op1, simdBaseJitType, simdSize);
switch (simdBaseType)
{
}
assert(intrinsic != NI_Illegal);
- return gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, simdSize);
}
else if ((simdBaseType == TYP_FLOAT) || compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
}
assert(intrinsic != NI_Illegal);
- return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize);
}
else
{
if (varTypeIsSigned(simdBaseType))
{
- CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSimdOrHW(type, simdBaseJitType, isSimdAsHWIntrinsic);
-
- GenTree* op1Dup = fgMakeMultiUse(&op1, clsHnd);
+ GenTree* op1Dup = fgMakeMultiUse(&op1);
- tmp1 = gtNewSimdHWIntrinsicNode(type, op1Dup, tmp1, NI_SSE2_CompareLessThan, simdBaseJitType, simdSize,
- isSimdAsHWIntrinsic);
+ tmp1 = gtNewSimdHWIntrinsicNode(type, op1Dup, tmp1, NI_SSE2_CompareLessThan, simdBaseJitType, simdSize);
}
- return gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_UnpackLow, simdBaseJitType, simdSize,
- isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_UnpackLow, simdBaseJitType, simdSize);
}
#elif defined(TARGET_ARM64)
if (simdSize == 16)
{
- tmp1 = gtNewSimdGetLowerNode(TYP_SIMD8, op1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ tmp1 = gtNewSimdGetLowerNode(TYP_SIMD8, op1, simdBaseJitType, simdSize);
}
else
{
}
assert(intrinsic != NI_Illegal);
- tmp1 = gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, 8, isSimdAsHWIntrinsic);
+ tmp1 = gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, 8);
if (simdSize == 8)
{
- tmp1 = gtNewSimdGetLowerNode(TYP_SIMD8, tmp1, simdBaseJitType, 16, isSimdAsHWIntrinsic);
+ tmp1 = gtNewSimdGetLowerNode(TYP_SIMD8, tmp1, simdBaseJitType, 16);
}
return tmp1;
#endif // !TARGET_XARCH && !TARGET_ARM64
}
-GenTree* Compiler::gtNewSimdWidenUpperNode(
- var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
+GenTree* Compiler::gtNewSimdWidenUpperNode(var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
{
assert(IsBaselineVector512IsaSupportedDebugOnly());
- tmp1 = gtNewSimdGetUpperNode(TYP_SIMD32, op1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ tmp1 = gtNewSimdGetUpperNode(TYP_SIMD32, op1, simdBaseJitType, simdSize);
switch (simdBaseType)
{
}
assert(intrinsic != NI_Illegal);
- return gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, simdSize);
}
else if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
assert(!varTypeIsIntegral(simdBaseType) || compIsaSupportedDebugOnly(InstructionSet_AVX2));
- tmp1 = gtNewSimdGetUpperNode(TYP_SIMD16, op1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ tmp1 = gtNewSimdGetUpperNode(TYP_SIMD16, op1, simdBaseJitType, simdSize);
switch (simdBaseType)
{
}
assert(intrinsic != NI_Illegal);
- return gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, simdSize);
}
else if (varTypeIsFloating(simdBaseType))
{
assert(simdBaseType == TYP_FLOAT);
- CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSimdOrHW(type, simdBaseJitType, isSimdAsHWIntrinsic);
- GenTree* op1Dup = fgMakeMultiUse(&op1, clsHnd);
+ GenTree* op1Dup = fgMakeMultiUse(&op1);
- tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op1Dup, NI_SSE_MoveHighToLow, simdBaseJitType, simdSize,
- isSimdAsHWIntrinsic);
- return gtNewSimdHWIntrinsicNode(type, tmp1, NI_SSE2_ConvertToVector128Double, simdBaseJitType, simdSize,
- isSimdAsHWIntrinsic);
+ tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op1Dup, NI_SSE_MoveHighToLow, simdBaseJitType, simdSize);
+ return gtNewSimdHWIntrinsicNode(type, tmp1, NI_SSE2_ConvertToVector128Double, simdBaseJitType, simdSize);
}
else if (compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, gtNewIconNode(8), NI_SSE2_ShiftRightLogical128BitLane,
- simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ simdBaseJitType, simdSize);
switch (simdBaseType)
{
}
assert(intrinsic != NI_Illegal);
- return gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, simdSize);
}
else
{
if (varTypeIsSigned(simdBaseType))
{
- CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSimdOrHW(type, simdBaseJitType, isSimdAsHWIntrinsic);
-
- GenTree* op1Dup = fgMakeMultiUse(&op1, clsHnd);
+ GenTree* op1Dup = fgMakeMultiUse(&op1);
- tmp1 = gtNewSimdHWIntrinsicNode(type, op1Dup, tmp1, NI_SSE2_CompareLessThan, simdBaseJitType, simdSize,
- isSimdAsHWIntrinsic);
+ tmp1 = gtNewSimdHWIntrinsicNode(type, op1Dup, tmp1, NI_SSE2_CompareLessThan, simdBaseJitType, simdSize);
}
- return gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_UnpackHigh, simdBaseJitType, simdSize,
- isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_UnpackHigh, simdBaseJitType, simdSize);
}
#elif defined(TARGET_ARM64)
GenTree* zero;
}
assert(intrinsic != NI_Illegal);
- return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize);
}
else
{
assert(intrinsic != NI_Illegal);
- tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, intrinsic, simdBaseJitType, simdSize);
zero = gtNewZeroConNode(TYP_SIMD16);
- return gtNewSimdGetUpperNode(TYP_SIMD8, tmp1, simdBaseJitType, 16, isSimdAsHWIntrinsic);
+ return gtNewSimdGetUpperNode(TYP_SIMD8, tmp1, simdBaseJitType, 16);
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
}
-GenTree* Compiler::gtNewSimdWithElementNode(var_types type,
- GenTree* op1,
- GenTree* op2,
- GenTree* op3,
- CorInfoType simdBaseJitType,
- unsigned simdSize,
- bool isSimdAsHWIntrinsic)
+GenTree* Compiler::gtNewSimdWithElementNode(
+ var_types type, GenTree* op1, GenTree* op2, GenTree* op3, CorInfoType simdBaseJitType, unsigned simdSize)
{
NamedIntrinsic hwIntrinsicID = NI_Vector128_WithElement;
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
case TYP_DOUBLE:
if (simdSize == 8)
{
- return gtNewSimdHWIntrinsicNode(type, op3, NI_Vector64_Create, simdBaseJitType, simdSize,
- isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, op3, NI_Vector64_Create, simdBaseJitType, simdSize);
}
break;
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
- return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, hwIntrinsicID, simdBaseJitType, simdSize);
}
#ifdef TARGET_ARM64
}
#endif // TARGET_ARM64
-GenTree* Compiler::gtNewSimdWithLowerNode(var_types type,
- GenTree* op1,
- GenTree* op2,
- CorInfoType simdBaseJitType,
- unsigned simdSize,
- bool isSimdAsHWIntrinsic)
+GenTree* Compiler::gtNewSimdWithLowerNode(
+ var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize)
{
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
- return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsicId, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsicId, simdBaseJitType, simdSize);
}
-GenTree* Compiler::gtNewSimdWithUpperNode(var_types type,
- GenTree* op1,
- GenTree* op2,
- CorInfoType simdBaseJitType,
- unsigned simdSize,
- bool isSimdAsHWIntrinsic)
+GenTree* Compiler::gtNewSimdWithUpperNode(
+ var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize)
{
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
- return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsicId, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsicId, simdBaseJitType, simdSize);
}
GenTreeHWIntrinsic* Compiler::gtNewScalarHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID)
{
- return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID,
- CORINFO_TYPE_UNDEF, 0, /* isSimdAsHWIntrinsic */ false);
+ return new (this, GT_HWINTRINSIC)
+ GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, CORINFO_TYPE_UNDEF, 0);
}
GenTreeHWIntrinsic* Compiler::gtNewScalarHWIntrinsicNode(var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
- return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID,
- CORINFO_TYPE_UNDEF, 0, /* isSimdAsHWIntrinsic */ false, op1);
+ return new (this, GT_HWINTRINSIC)
+ GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, CORINFO_TYPE_UNDEF, 0, op1);
}
GenTreeHWIntrinsic* Compiler::gtNewScalarHWIntrinsicNode(var_types type,
SetOpLclRelatedToSIMDIntrinsic(op2);
return new (this, GT_HWINTRINSIC)
- GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, CORINFO_TYPE_UNDEF, 0,
- /* isSimdAsHWIntrinsic */ false, op1, op2);
+ GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, CORINFO_TYPE_UNDEF, 0, op1, op2);
}
GenTreeHWIntrinsic* Compiler::gtNewScalarHWIntrinsicNode(
SetOpLclRelatedToSIMDIntrinsic(op3);
return new (this, GT_HWINTRINSIC)
- GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, CORINFO_TYPE_UNDEF, 0,
- /* isSimdAsHWIntrinsic */ false, op1, op2, op3);
+ GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, CORINFO_TYPE_UNDEF, 0, op1, op2, op3);
}
//------------------------------------------------------------------------
GTF_MDARRLEN_NONFAULTING = 0x20000000, // GT_MDARR_LENGTH -- An MD array length operation that cannot fault. Same as GT_IND_NONFAULTING.
GTF_MDARRLOWERBOUND_NONFAULTING = 0x20000000, // GT_MDARR_LOWER_BOUND -- An MD array lower bound operation that cannot fault. Same as GT_IND_NONFAULTING.
-
- GTF_SIMDASHW_OP = 0x80000000, // GT_HWINTRINSIC -- Indicates that the structHandle should be gotten from gtGetStructHandleForSIMD
- // rather than from gtGetStructHandleForHWSIMD.
};
inline constexpr GenTreeFlags operator ~(GenTreeFlags a)
IntrinsicNodeBuilder&& nodeBuilder,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
- unsigned simdSize,
- bool isSimdAsHWIntrinsic)
+ unsigned simdSize)
: GenTreeJitIntrinsic(GT_HWINTRINSIC, type, std::move(nodeBuilder), simdBaseJitType, simdSize)
{
- Initialize(hwIntrinsicID, isSimdAsHWIntrinsic);
+ Initialize(hwIntrinsicID);
}
template <typename... Operands>
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
- bool isSimdAsHWIntrinsic,
Operands... operands)
: GenTreeJitIntrinsic(GT_HWINTRINSIC, type, allocator, simdBaseJitType, simdSize, operands...)
{
- Initialize(hwIntrinsicID, isSimdAsHWIntrinsic);
+ Initialize(hwIntrinsicID);
}
#if DEBUGGABLE_GENTREE
bool OperIsMemoryStore(GenTree** pAddr = nullptr) const;
bool OperIsMemoryLoadOrStore() const;
- bool IsSimdAsHWIntrinsic() const
- {
- return (gtFlags & GTF_SIMDASHW_OP) != 0;
- }
-
unsigned GetResultOpNumForFMA(GenTree* use, GenTree* op1, GenTree* op2, GenTree* op3);
ClassLayout* GetLayout(Compiler* compiler) const;
private:
void SetHWIntrinsicId(NamedIntrinsic intrinsicId);
- void Initialize(NamedIntrinsic intrinsicId, bool isSimdAsHWIntrinsic)
+ void Initialize(NamedIntrinsic intrinsicId)
{
SetHWIntrinsicId(intrinsicId);
gtFlags |= GTF_ASG;
}
}
-
- if (isSimdAsHWIntrinsic)
- {
- gtFlags |= GTF_SIMDASHW_OP;
- }
}
};
#endif // FEATURE_HW_INTRINSICS
{
assert(sig->numArgs == 1);
op1 = impSIMDPopStack(retType);
- retNode = gtNewSimdAbsNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdAbsNode(retType, op1, simdBaseJitType, simdSize);
break;
}
op2 = impSIMDPopStack(retType);
op1 = impSIMDPopStack(retType);
- retNode = gtNewSimdBinOpNode(GT_ADD, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdBinOpNode(GT_ADD, retType, op1, op2, simdBaseJitType, simdSize);
break;
}
op2 = impSIMDPopStack(retType);
op1 = impSIMDPopStack(retType);
- retNode = gtNewSimdBinOpNode(GT_AND_NOT, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdBinOpNode(GT_AND_NOT, retType, op1, op2, simdBaseJitType, simdSize);
break;
}
assert(retType == TYP_SIMD8);
op1 = impSIMDPopStack(TYP_SIMD16);
- retNode = gtNewSimdGetLowerNode(TYP_SIMD8, op1, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdGetLowerNode(TYP_SIMD8, op1, simdBaseJitType, simdSize);
break;
}
GenTree* idx = gtNewIconNode(2, TYP_INT);
GenTree* zero = gtNewZeroConNode(TYP_FLOAT);
- op1 = gtNewSimdWithElementNode(retType, op1, idx, zero, simdBaseJitType, 16,
- /* isSimdAsHWIntrinsic */ false);
+ op1 = gtNewSimdWithElementNode(retType, op1, idx, zero, simdBaseJitType, 16);
idx = gtNewIconNode(3, TYP_INT);
zero = gtNewZeroConNode(TYP_FLOAT);
- retNode = gtNewSimdWithElementNode(retType, op1, idx, zero, simdBaseJitType, 16,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdWithElementNode(retType, op1, idx, zero, simdBaseJitType, 16);
break;
}
GenTree* idx = gtNewIconNode(3, TYP_INT);
GenTree* zero = gtNewZeroConNode(TYP_FLOAT);
- retNode = gtNewSimdWithElementNode(retType, op1, idx, zero, simdBaseJitType, 16,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdWithElementNode(retType, op1, idx, zero, simdBaseJitType, 16);
break;
}
op2 = impSIMDPopStack(retType);
op1 = impSIMDPopStack(retType);
- retNode = gtNewSimdBinOpNode(GT_AND, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdBinOpNode(GT_AND, retType, op1, op2, simdBaseJitType, simdSize);
break;
}
op2 = impSIMDPopStack(retType);
op1 = impSIMDPopStack(retType);
- retNode = gtNewSimdBinOpNode(GT_OR, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdBinOpNode(GT_OR, retType, op1, op2, simdBaseJitType, simdSize);
break;
}
assert(varTypeIsFloating(simdBaseType));
op1 = impSIMDPopStack(retType);
- retNode = gtNewSimdCeilNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdCeilNode(retType, op1, simdBaseJitType, simdSize);
break;
}
op2 = impSIMDPopStack(retType);
op1 = impSIMDPopStack(retType);
- retNode =
- gtNewSimdCndSelNode(retType, op1, op2, op3, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdCndSelNode(retType, op1, op2, op3, simdBaseJitType, simdSize);
break;
}
argType = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg1, &argClass)));
op1 = getArgForHWIntrinsic(argType, argClass);
- retNode = gtNewSimdBinOpNode(GT_DIV, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdBinOpNode(GT_DIV, retType, op1, op2, simdBaseJitType, simdSize);
break;
}
op2 = impSIMDPopStack(simdType);
op1 = impSIMDPopStack(simdType);
- retNode = gtNewSimdDotProdNode(retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdDotProdNode(retType, op1, op2, simdBaseJitType, simdSize);
}
break;
}
op2 = impSIMDPopStack(retType);
op1 = impSIMDPopStack(retType);
- retNode = gtNewSimdCmpOpNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdCmpOpNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize);
break;
}
op2 = impSIMDPopStack(simdType);
op1 = impSIMDPopStack(simdType);
- retNode = gtNewSimdCmpOpAllNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdCmpOpAllNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize);
break;
}
op2 = impSIMDPopStack(simdType);
op1 = impSIMDPopStack(simdType);
- retNode = gtNewSimdCmpOpAnyNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdCmpOpAnyNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize);
break;
}
op3 = vecCon3;
op2 = vecCon2;
- op1 = gtNewSimdHWIntrinsicNode(simdType, op1, op2, NI_AdvSimd_And, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ op1 = gtNewSimdHWIntrinsicNode(simdType, op1, op2, NI_AdvSimd_And, simdBaseJitType, simdSize);
NamedIntrinsic shiftIntrinsic = NI_AdvSimd_ShiftLogical;
shiftIntrinsic = NI_AdvSimd_ShiftLogicalScalar;
}
- op1 = gtNewSimdHWIntrinsicNode(simdType, op1, op3, shiftIntrinsic, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ op1 = gtNewSimdHWIntrinsicNode(simdType, op1, op3, shiftIntrinsic, simdBaseJitType, simdSize);
if (varTypeIsByte(simdBaseType) && (simdSize == 16))
{
- CORINFO_CLASS_HANDLE simdClsHnd = gtGetStructHandleForSimdOrHW(TYP_SIMD16, simdBaseJitType);
-
- op1 = impCloneExpr(op1, &op2, simdClsHnd, CHECK_SPILL_ALL,
+ op1 = impCloneExpr(op1, &op2, NO_CLASS_HANDLE, CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector extractmostsignificantbits"));
- op1 = gtNewSimdGetLowerNode(TYP_SIMD8, op1, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
- op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_AddAcross, simdBaseJitType, 8,
- /* isSimdAsHWIntrinsic */ false);
- op1 = gtNewSimdHWIntrinsicNode(simdBaseType, op1, NI_Vector64_ToScalar, simdBaseJitType, 8,
- /* isSimdAsHWIntrinsic */ false);
+ op1 = gtNewSimdGetLowerNode(TYP_SIMD8, op1, simdBaseJitType, simdSize);
+ op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_AddAcross, simdBaseJitType, 8);
+ op1 = gtNewSimdHWIntrinsicNode(simdBaseType, op1, NI_Vector64_ToScalar, simdBaseJitType, 8);
op1 = gtNewCastNode(TYP_INT, op1, /* isUnsigned */ true, TYP_INT);
GenTree* zero = gtNewZeroConNode(TYP_SIMD16);
ssize_t index = 8 / genTypeSize(simdBaseType);
- op2 = gtNewSimdGetUpperNode(TYP_SIMD8, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
- op2 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op2, NI_AdvSimd_Arm64_AddAcross, simdBaseJitType, 8,
- /* isSimdAsHWIntrinsic */ false);
- op2 = gtNewSimdHWIntrinsicNode(simdBaseType, op2, NI_Vector64_ToScalar, simdBaseJitType, 8,
- /* isSimdAsHWIntrinsic */ false);
+ op2 = gtNewSimdGetUpperNode(TYP_SIMD8, op2, simdBaseJitType, simdSize);
+ op2 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op2, NI_AdvSimd_Arm64_AddAcross, simdBaseJitType, 8);
+ op2 = gtNewSimdHWIntrinsicNode(simdBaseType, op2, NI_Vector64_ToScalar, simdBaseJitType, 8);
op2 = gtNewCastNode(TYP_INT, op2, /* isUnsigned */ true, TYP_INT);
op2 = gtNewOperNode(GT_LSH, TYP_INT, op2, gtNewIconNode(8));
{
if ((simdSize == 8) && ((simdBaseType == TYP_INT) || (simdBaseType == TYP_UINT)))
{
- CORINFO_CLASS_HANDLE simdClsHnd = gtGetStructHandleForSimdOrHW(simdType, simdBaseJitType);
-
- op1 = impCloneExpr(op1, &op2, simdClsHnd, CHECK_SPILL_ALL,
+ op1 = impCloneExpr(op1, &op2, NO_CLASS_HANDLE, CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector extractmostsignificantbits"));
op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, op2, NI_AdvSimd_AddPairwise, simdBaseJitType,
- simdSize, /* isSimdAsHWIntrinsic */ false);
+ simdSize);
}
else
{
op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_AddAcross, simdBaseJitType,
- simdSize, /* isSimdAsHWIntrinsic */ false);
+ simdSize);
}
}
else if (simdSize == 16)
{
op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_AddPairwiseScalar, simdBaseJitType,
- simdSize, /* isSimdAsHWIntrinsic */ false);
+ simdSize);
}
- retNode = gtNewSimdHWIntrinsicNode(simdBaseType, op1, NI_Vector64_ToScalar, simdBaseJitType, 8,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdHWIntrinsicNode(simdBaseType, op1, NI_Vector64_ToScalar, simdBaseJitType, 8);
if ((simdBaseType != TYP_INT) && (simdBaseType != TYP_UINT))
{
assert(varTypeIsFloating(simdBaseType));
op1 = impSIMDPopStack(retType);
- retNode = gtNewSimdFloorNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdFloorNode(retType, op1, simdBaseJitType, simdSize);
break;
}
op2 = impPopStack().val;
op1 = impSIMDPopStack(getSIMDTypeForSize(simdSize));
- const bool isSimdAsHWIntrinsic = true;
- retNode = gtNewSimdGetElementNode(retType, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ retNode = gtNewSimdGetElementNode(retType, op1, op2, simdBaseJitType, simdSize);
break;
}
assert(sig->numArgs == 1);
op1 = impSIMDPopStack(getSIMDTypeForSize(simdSize));
- retNode = gtNewSimdGetLowerNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdGetLowerNode(retType, op1, simdBaseJitType, simdSize);
break;
}
assert(sig->numArgs == 1);
op1 = impSIMDPopStack(getSIMDTypeForSize(simdSize));
- retNode = gtNewSimdGetUpperNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdGetUpperNode(retType, op1, simdBaseJitType, simdSize);
break;
}
op2 = impSIMDPopStack(retType);
op1 = impSIMDPopStack(retType);
- retNode = gtNewSimdCmpOpNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdCmpOpNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize);
break;
}
op2 = impSIMDPopStack(simdType);
op1 = impSIMDPopStack(simdType);
- retNode = gtNewSimdCmpOpAllNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdCmpOpAllNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize);
break;
}
op2 = impSIMDPopStack(simdType);
op1 = impSIMDPopStack(simdType);
- retNode = gtNewSimdCmpOpAnyNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdCmpOpAnyNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize);
break;
}
op2 = impSIMDPopStack(retType);
op1 = impSIMDPopStack(retType);
- retNode = gtNewSimdCmpOpNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdCmpOpNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize);
break;
}
op2 = impSIMDPopStack(simdType);
op1 = impSIMDPopStack(simdType);
- retNode = gtNewSimdCmpOpAllNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdCmpOpAllNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize);
break;
}
op2 = impSIMDPopStack(simdType);
op1 = impSIMDPopStack(simdType);
- retNode = gtNewSimdCmpOpAnyNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdCmpOpAnyNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize);
break;
}
op2 = impSIMDPopStack(retType);
op1 = impSIMDPopStack(retType);
- retNode = gtNewSimdCmpOpNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdCmpOpNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize);
break;
}
op2 = impSIMDPopStack(simdType);
op1 = impSIMDPopStack(simdType);
- retNode = gtNewSimdCmpOpAllNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdCmpOpAllNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize);
break;
}
op2 = impSIMDPopStack(simdType);
op1 = impSIMDPopStack(simdType);
- retNode = gtNewSimdCmpOpAnyNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdCmpOpAnyNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize);
break;
}
op2 = impSIMDPopStack(retType);
op1 = impSIMDPopStack(retType);
- retNode = gtNewSimdCmpOpNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdCmpOpNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize);
break;
}
op2 = impSIMDPopStack(simdType);
op1 = impSIMDPopStack(simdType);
- retNode = gtNewSimdCmpOpAllNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdCmpOpAllNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize);
break;
}
op2 = impSIMDPopStack(simdType);
op1 = impSIMDPopStack(simdType);
- retNode = gtNewSimdCmpOpAnyNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdCmpOpAnyNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize);
break;
}
op1 = gtNewOperNode(GT_ADD, op1->TypeGet(), op1, op2);
}
- retNode = gtNewSimdLoadNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdLoadNode(retType, op1, simdBaseJitType, simdSize);
break;
}
op1 = op1->gtGetOp1();
}
- retNode = gtNewSimdLoadAlignedNode(retType, op1, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdLoadAlignedNode(retType, op1, simdBaseJitType, simdSize);
break;
}
op1 = op1->gtGetOp1();
}
- retNode = gtNewSimdLoadNonTemporalNode(retType, op1, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdLoadNonTemporalNode(retType, op1, simdBaseJitType, simdSize);
break;
}
op2 = impSIMDPopStack(retType);
op1 = impSIMDPopStack(retType);
- retNode = gtNewSimdMaxNode(retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdMaxNode(retType, op1, op2, simdBaseJitType, simdSize);
break;
}
op2 = impSIMDPopStack(retType);
op1 = impSIMDPopStack(retType);
- retNode = gtNewSimdMinNode(retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdMinNode(retType, op1, op2, simdBaseJitType, simdSize);
break;
}
argType = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg1, &argClass)));
op1 = getArgForHWIntrinsic(argType, argClass);
- retNode = gtNewSimdBinOpNode(GT_MUL, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdBinOpNode(GT_MUL, retType, op1, op2, simdBaseJitType, simdSize);
break;
}
op2 = impSIMDPopStack(retType);
op1 = impSIMDPopStack(retType);
- retNode =
- gtNewSimdNarrowNode(retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdNarrowNode(retType, op1, op2, simdBaseJitType, simdSize);
break;
}
case NI_Vector128_op_UnaryNegation:
{
assert(sig->numArgs == 1);
- op1 = impSIMDPopStack(retType);
- retNode =
- gtNewSimdUnOpNode(GT_NEG, retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
+ op1 = impSIMDPopStack(retType);
+ retNode = gtNewSimdUnOpNode(GT_NEG, retType, op1, simdBaseJitType, simdSize);
break;
}
case NI_Vector128_op_OnesComplement:
{
assert(sig->numArgs == 1);
- op1 = impSIMDPopStack(retType);
- retNode =
- gtNewSimdUnOpNode(GT_NOT, retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
+ op1 = impSIMDPopStack(retType);
+ retNode = gtNewSimdUnOpNode(GT_NOT, retType, op1, simdBaseJitType, simdSize);
break;
}
op2 = impSIMDPopStack(simdType);
op1 = impSIMDPopStack(simdType);
- retNode = gtNewSimdCmpOpAnyNode(GT_NE, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdCmpOpAnyNode(GT_NE, retType, op1, op2, simdBaseJitType, simdSize);
break;
}
op2 = impSIMDPopStack(retType);
op1 = impSIMDPopStack(retType);
- retNode = gtNewSimdBinOpNode(GT_SUB, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdBinOpNode(GT_SUB, retType, op1, op2, simdBaseJitType, simdSize);
break;
}
op2 = impPopStack().val;
op1 = impSIMDPopStack(retType);
- retNode = gtNewSimdBinOpNode(GT_LSH, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdBinOpNode(GT_LSH, retType, op1, op2, simdBaseJitType, simdSize);
break;
}
op2 = impPopStack().val;
op1 = impSIMDPopStack(retType);
- retNode = gtNewSimdBinOpNode(op, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdBinOpNode(op, retType, op1, op2, simdBaseJitType, simdSize);
break;
}
op2 = impPopStack().val;
op1 = impSIMDPopStack(retType);
- retNode = gtNewSimdBinOpNode(GT_RSZ, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdBinOpNode(GT_RSZ, retType, op1, op2, simdBaseJitType, simdSize);
break;
}
op2 = impSIMDPopStack(retType);
op1 = impSIMDPopStack(retType);
- retNode = gtNewSimdShuffleNode(retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdShuffleNode(retType, op1, op2, simdBaseJitType, simdSize);
}
break;
}
if (varTypeIsFloating(simdBaseType))
{
op1 = impSIMDPopStack(retType);
- retNode = gtNewSimdSqrtNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdSqrtNode(retType, op1, simdBaseJitType, simdSize);
}
break;
}
op1 = op1->gtGetOp1();
}
- retNode = gtNewSimdStoreNode(op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdStoreNode(op1, op2, simdBaseJitType, simdSize);
break;
}
op1 = impSIMDPopStack(simdType);
- retNode = gtNewSimdStoreNode(op2, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdStoreNode(op2, op1, simdBaseJitType, simdSize);
break;
}
op1 = impSIMDPopStack(simdType);
- retNode = gtNewSimdStoreAlignedNode(op2, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdStoreAlignedNode(op2, op1, simdBaseJitType, simdSize);
break;
}
op1 = impSIMDPopStack(simdType);
- retNode = gtNewSimdStoreNonTemporalNode(op2, op1, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdStoreNonTemporalNode(op2, op1, simdBaseJitType, simdSize);
break;
}
var_types simdType = getSIMDTypeForSize(simdSize);
op1 = impSIMDPopStack(simdType);
- retNode = gtNewSimdSumNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdSumNode(retType, op1, simdBaseJitType, simdSize);
break;
}
op1 = impSIMDPopStack(retType);
- retNode = gtNewSimdWidenLowerNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdWidenLowerNode(retType, op1, simdBaseJitType, simdSize);
break;
}
op1 = impSIMDPopStack(retType);
- retNode = gtNewSimdWidenUpperNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdWidenUpperNode(retType, op1, simdBaseJitType, simdSize);
break;
}
impPopStack(); // pop the indexOp that we already have.
GenTree* vectorOp = impSIMDPopStack(getSIMDTypeForSize(simdSize));
- retNode = gtNewSimdWithElementNode(retType, vectorOp, indexOp, valueOp, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdWithElementNode(retType, vectorOp, indexOp, valueOp, simdBaseJitType, simdSize);
break;
}
op2 = impSIMDPopStack(TYP_SIMD8);
op1 = impSIMDPopStack(TYP_SIMD16);
- retNode = gtNewSimdWithLowerNode(retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdWithLowerNode(retType, op1, op2, simdBaseJitType, simdSize);
break;
}
op2 = impSIMDPopStack(TYP_SIMD8);
op1 = impSIMDPopStack(TYP_SIMD16);
- retNode = gtNewSimdWithUpperNode(retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdWithUpperNode(retType, op1, op2, simdBaseJitType, simdSize);
break;
}
op2 = impSIMDPopStack(retType);
op1 = impSIMDPopStack(retType);
- retNode = gtNewSimdBinOpNode(GT_XOR, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdBinOpNode(GT_XOR, retType, op1, op2, simdBaseJitType, simdSize);
break;
}
compExactlyDependsOn(InstructionSet_AVX2))
{
op1 = impSIMDPopStack(retType);
- retNode = gtNewSimdAbsNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdAbsNode(retType, op1, simdBaseJitType, simdSize);
}
break;
}
op2 = impSIMDPopStack(retType);
op1 = impSIMDPopStack(retType);
- retNode = gtNewSimdBinOpNode(GT_ADD, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdBinOpNode(GT_ADD, retType, op1, op2, simdBaseJitType, simdSize);
}
break;
}
op2 = impSIMDPopStack(retType);
op1 = impSIMDPopStack(retType);
- retNode = gtNewSimdBinOpNode(GT_AND_NOT, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdBinOpNode(GT_AND_NOT, retType, op1, op2, simdBaseJitType, simdSize);
break;
}
GenTree* idx = gtNewIconNode(2, TYP_INT);
GenTree* zero = gtNewZeroConNode(TYP_FLOAT);
- op1 = gtNewSimdWithElementNode(retType, op1, idx, zero, simdBaseJitType, 16,
- /* isSimdAsHWIntrinsic */ false);
+ op1 = gtNewSimdWithElementNode(retType, op1, idx, zero, simdBaseJitType, 16);
idx = gtNewIconNode(3, TYP_INT);
zero = gtNewZeroConNode(TYP_FLOAT);
- retNode = gtNewSimdWithElementNode(retType, op1, idx, zero, simdBaseJitType, 16,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdWithElementNode(retType, op1, idx, zero, simdBaseJitType, 16);
break;
}
GenTree* idx = gtNewIconNode(3, TYP_INT);
GenTree* zero = gtNewZeroConNode(TYP_FLOAT);
- retNode = gtNewSimdWithElementNode(retType, op1, idx, zero, simdBaseJitType, 16,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdWithElementNode(retType, op1, idx, zero, simdBaseJitType, 16);
break;
}
op2 = impSIMDPopStack(retType);
op1 = impSIMDPopStack(retType);
- retNode = gtNewSimdBinOpNode(GT_AND, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdBinOpNode(GT_AND, retType, op1, op2, simdBaseJitType, simdSize);
break;
}
op2 = impSIMDPopStack(retType);
op1 = impSIMDPopStack(retType);
- retNode = gtNewSimdBinOpNode(GT_OR, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdBinOpNode(GT_OR, retType, op1, op2, simdBaseJitType, simdSize);
break;
}
}
op1 = impSIMDPopStack(retType);
- retNode = gtNewSimdCeilNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdCeilNode(retType, op1, simdBaseJitType, simdSize);
break;
}
op2 = impSIMDPopStack(retType);
op1 = impSIMDPopStack(retType);
- retNode =
- gtNewSimdCndSelNode(retType, op1, op2, op3, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdCndSelNode(retType, op1, op2, op3, simdBaseJitType, simdSize);
break;
}
argType = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg1, &argClass)));
op1 = getArgForHWIntrinsic(argType, argClass);
- retNode = gtNewSimdBinOpNode(GT_DIV, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdBinOpNode(GT_DIV, retType, op1, op2, simdBaseJitType, simdSize);
break;
}
op2 = impSIMDPopStack(simdType);
op1 = impSIMDPopStack(simdType);
- retNode =
- gtNewSimdDotProdNode(retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdDotProdNode(retType, op1, op2, simdBaseJitType, simdSize);
break;
}
op2 = impSIMDPopStack(retType);
op1 = impSIMDPopStack(retType);
- retNode = gtNewSimdCmpOpNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdCmpOpNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize);
}
break;
}
op2 = impSIMDPopStack(simdType);
op1 = impSIMDPopStack(simdType);
- retNode = gtNewSimdCmpOpAllNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdCmpOpAllNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize);
}
break;
}
op2 = impSIMDPopStack(simdType);
op1 = impSIMDPopStack(simdType);
- retNode = gtNewSimdCmpOpAnyNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdCmpOpAnyNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize);
}
break;
}
op1 = impSIMDPopStack(simdType);
- retNode = gtNewSimdHWIntrinsicNode(retType, op1, NI_AVX512F_MoveMaskSpecial, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdHWIntrinsicNode(retType, op1, NI_AVX512F_MoveMaskSpecial, simdBaseJitType, simdSize);
}
break;
}
simdType = TYP_SIMD16;
- op1 = gtNewSimdGetLowerNode(simdType, op1, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ op1 = gtNewSimdGetLowerNode(simdType, op1, simdBaseJitType, simdSize);
simdSize = 16;
}
assert(moveMaskIntrinsic != NI_Illegal);
assert(op1 != nullptr);
- retNode = gtNewSimdHWIntrinsicNode(retType, op1, moveMaskIntrinsic, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdHWIntrinsicNode(retType, op1, moveMaskIntrinsic, simdBaseJitType, simdSize);
}
break;
}
}
op1 = impSIMDPopStack(retType);
- retNode = gtNewSimdFloorNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdFloorNode(retType, op1, simdBaseJitType, simdSize);
break;
}
GenTree* op2 = impPopStack().val;
GenTree* op1 = impSIMDPopStack(getSIMDTypeForSize(simdSize));
- retNode = gtNewSimdGetElementNode(retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdGetElementNode(retType, op1, op2, simdBaseJitType, simdSize);
break;
}
op2 = impSIMDPopStack(retType);
op1 = impSIMDPopStack(retType);
- retNode = gtNewSimdCmpOpNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdCmpOpNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize);
}
break;
}
op2 = impSIMDPopStack(simdType);
op1 = impSIMDPopStack(simdType);
- retNode = gtNewSimdCmpOpAllNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdCmpOpAllNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize);
}
break;
}
op2 = impSIMDPopStack(simdType);
op1 = impSIMDPopStack(simdType);
- retNode = gtNewSimdCmpOpAnyNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdCmpOpAnyNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize);
}
break;
}
op2 = impSIMDPopStack(retType);
op1 = impSIMDPopStack(retType);
- retNode = gtNewSimdCmpOpNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdCmpOpNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize);
}
break;
}
op2 = impSIMDPopStack(simdType);
op1 = impSIMDPopStack(simdType);
- retNode = gtNewSimdCmpOpAllNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdCmpOpAllNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize);
}
break;
}
op2 = impSIMDPopStack(simdType);
op1 = impSIMDPopStack(simdType);
- retNode = gtNewSimdCmpOpAnyNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdCmpOpAnyNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize);
}
break;
}
op2 = impSIMDPopStack(retType);
op1 = impSIMDPopStack(retType);
- retNode = gtNewSimdCmpOpNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdCmpOpNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize);
}
break;
}
op2 = impSIMDPopStack(simdType);
op1 = impSIMDPopStack(simdType);
- retNode = gtNewSimdCmpOpAllNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdCmpOpAllNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize);
}
break;
}
op2 = impSIMDPopStack(simdType);
op1 = impSIMDPopStack(simdType);
- retNode = gtNewSimdCmpOpAnyNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdCmpOpAnyNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize);
}
break;
}
op2 = impSIMDPopStack(retType);
op1 = impSIMDPopStack(retType);
- retNode = gtNewSimdCmpOpNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdCmpOpNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize);
}
break;
}
op2 = impSIMDPopStack(simdType);
op1 = impSIMDPopStack(simdType);
- retNode = gtNewSimdCmpOpAllNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdCmpOpAllNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize);
}
break;
}
op2 = impSIMDPopStack(simdType);
op1 = impSIMDPopStack(simdType);
- retNode = gtNewSimdCmpOpAnyNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdCmpOpAnyNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize);
}
break;
}
op1 = gtNewOperNode(GT_ADD, op1->TypeGet(), op1, op2);
}
- retNode = gtNewSimdLoadNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdLoadNode(retType, op1, simdBaseJitType, simdSize);
break;
}
op1 = op1->gtGetOp1();
}
- retNode = gtNewSimdLoadAlignedNode(retType, op1, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdLoadAlignedNode(retType, op1, simdBaseJitType, simdSize);
break;
}
op1 = op1->gtGetOp1();
}
- retNode = gtNewSimdLoadNonTemporalNode(retType, op1, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdLoadNonTemporalNode(retType, op1, simdBaseJitType, simdSize);
break;
}
op2 = impSIMDPopStack(retType);
op1 = impSIMDPopStack(retType);
- retNode =
- gtNewSimdMaxNode(retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdMaxNode(retType, op1, op2, simdBaseJitType, simdSize);
}
break;
}
op2 = impSIMDPopStack(retType);
op1 = impSIMDPopStack(retType);
- retNode =
- gtNewSimdMinNode(retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdMinNode(retType, op1, op2, simdBaseJitType, simdSize);
}
break;
}
argType = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg1, &argClass)));
op1 = getArgForHWIntrinsic(argType, argClass);
- retNode = gtNewSimdBinOpNode(GT_MUL, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdBinOpNode(GT_MUL, retType, op1, op2, simdBaseJitType, simdSize);
break;
}
op2 = impSIMDPopStack(retType);
op1 = impSIMDPopStack(retType);
- retNode =
- gtNewSimdNarrowNode(retType, op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdNarrowNode(retType, op1, op2, simdBaseJitType, simdSize);
}
break;
}
if ((simdSize != 32) || varTypeIsFloating(simdBaseType) || compExactlyDependsOn(InstructionSet_AVX2))
{
- op1 = impSIMDPopStack(retType);
- retNode =
- gtNewSimdUnOpNode(GT_NEG, retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
+ op1 = impSIMDPopStack(retType);
+ retNode = gtNewSimdUnOpNode(GT_NEG, retType, op1, simdBaseJitType, simdSize);
}
break;
}
case NI_Vector512_op_OnesComplement:
{
assert(sig->numArgs == 1);
- op1 = impSIMDPopStack(retType);
- retNode =
- gtNewSimdUnOpNode(GT_NOT, retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
+ op1 = impSIMDPopStack(retType);
+ retNode = gtNewSimdUnOpNode(GT_NOT, retType, op1, simdBaseJitType, simdSize);
break;
}
op2 = impSIMDPopStack(simdType);
op1 = impSIMDPopStack(simdType);
- retNode = gtNewSimdCmpOpAnyNode(GT_NE, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdCmpOpAnyNode(GT_NE, retType, op1, op2, simdBaseJitType, simdSize);
}
break;
}
op2 = impSIMDPopStack(retType);
op1 = impSIMDPopStack(retType);
- retNode = gtNewSimdBinOpNode(GT_SUB, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdBinOpNode(GT_SUB, retType, op1, op2, simdBaseJitType, simdSize);
}
break;
}
op2 = impPopStack().val;
op1 = impSIMDPopStack(retType);
- retNode = gtNewSimdBinOpNode(GT_LSH, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdBinOpNode(GT_LSH, retType, op1, op2, simdBaseJitType, simdSize);
}
break;
}
op2 = impPopStack().val;
op1 = impSIMDPopStack(retType);
- retNode = gtNewSimdBinOpNode(op, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdBinOpNode(op, retType, op1, op2, simdBaseJitType, simdSize);
}
break;
}
op2 = impPopStack().val;
op1 = impSIMDPopStack(retType);
- retNode = gtNewSimdBinOpNode(GT_RSZ, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdBinOpNode(GT_RSZ, retType, op1, op2, simdBaseJitType, simdSize);
}
break;
}
op2 = impSIMDPopStack(retType);
op1 = impSIMDPopStack(retType);
- retNode = gtNewSimdShuffleNode(retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdShuffleNode(retType, op1, op2, simdBaseJitType, simdSize);
}
break;
}
if (varTypeIsFloating(simdBaseType))
{
op1 = impSIMDPopStack(retType);
- retNode = gtNewSimdSqrtNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdSqrtNode(retType, op1, simdBaseJitType, simdSize);
}
break;
}
op1 = op1->gtGetOp1();
}
- retNode = gtNewSimdStoreNode(op1, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdStoreNode(op1, op2, simdBaseJitType, simdSize);
break;
}
op1 = impSIMDPopStack(simdType);
- retNode = gtNewSimdStoreNode(op2, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdStoreNode(op2, op1, simdBaseJitType, simdSize);
break;
}
op1 = impSIMDPopStack(simdType);
- retNode = gtNewSimdStoreAlignedNode(op2, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdStoreAlignedNode(op2, op1, simdBaseJitType, simdSize);
break;
}
op1 = impSIMDPopStack(simdType);
- retNode = gtNewSimdStoreNonTemporalNode(op2, op1, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdStoreNonTemporalNode(op2, op1, simdBaseJitType, simdSize);
break;
}
}
op1 = impSIMDPopStack(simdType);
- retNode = gtNewSimdSumNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdSumNode(retType, op1, simdBaseJitType, simdSize);
break;
}
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
op1 = impSIMDPopStack(getSIMDTypeForSize(simdSize));
- retNode = gtNewSimdGetLowerNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdGetLowerNode(retType, op1, simdBaseJitType, simdSize);
break;
}
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
op1 = impSIMDPopStack(getSIMDTypeForSize(simdSize));
- retNode = gtNewSimdGetUpperNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdGetUpperNode(retType, op1, simdBaseJitType, simdSize);
break;
}
assert(IsBaselineVector512IsaSupportedDebugOnly());
op1 = impSIMDPopStack(getSIMDTypeForSize(simdSize));
- retNode = gtNewSimdGetLowerNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdGetLowerNode(retType, op1, simdBaseJitType, simdSize);
break;
}
assert(IsBaselineVector512IsaSupportedDebugOnly());
op1 = impSIMDPopStack(getSIMDTypeForSize(simdSize));
- retNode = gtNewSimdGetUpperNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdGetUpperNode(retType, op1, simdBaseJitType, simdSize);
break;
}
op1 = impSIMDPopStack(retType);
- retNode =
- gtNewSimdWidenLowerNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdWidenLowerNode(retType, op1, simdBaseJitType, simdSize);
}
break;
}
op1 = impSIMDPopStack(retType);
- retNode =
- gtNewSimdWidenUpperNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdWidenUpperNode(retType, op1, simdBaseJitType, simdSize);
}
break;
}
impPopStack(); // Pop the indexOp now that we know its valid
GenTree* vectorOp = impSIMDPopStack(getSIMDTypeForSize(simdSize));
- retNode = gtNewSimdWithElementNode(retType, vectorOp, indexOp, valueOp, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdWithElementNode(retType, vectorOp, indexOp, valueOp, simdBaseJitType, simdSize);
break;
}
op2 = impSIMDPopStack(TYP_SIMD16);
op1 = impSIMDPopStack(TYP_SIMD32);
- retNode = gtNewSimdWithLowerNode(retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdWithLowerNode(retType, op1, op2, simdBaseJitType, simdSize);
break;
}
op2 = impSIMDPopStack(TYP_SIMD16);
op1 = impSIMDPopStack(TYP_SIMD32);
- retNode = gtNewSimdWithUpperNode(retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdWithUpperNode(retType, op1, op2, simdBaseJitType, simdSize);
break;
}
op2 = impSIMDPopStack(TYP_SIMD32);
op1 = impSIMDPopStack(TYP_SIMD64);
- retNode = gtNewSimdWithLowerNode(retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdWithLowerNode(retType, op1, op2, simdBaseJitType, simdSize);
break;
}
op2 = impSIMDPopStack(TYP_SIMD32);
op1 = impSIMDPopStack(TYP_SIMD64);
- retNode = gtNewSimdWithUpperNode(retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdWithUpperNode(retType, op1, op2, simdBaseJitType, simdSize);
break;
}
op2 = impSIMDPopStack(retType);
op1 = impSIMDPopStack(retType);
- retNode = gtNewSimdBinOpNode(GT_XOR, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ false);
+ retNode = gtNewSimdBinOpNode(GT_XOR, retType, op1, op2, simdBaseJitType, simdSize);
break;
}
op1 = getArgForHWIntrinsic(argType, argClass);
SetOpLclRelatedToSIMDIntrinsic(op1);
- const bool isSimdAsHWIntrinsic = false;
-
- retNode = new (this, GT_HWINTRINSIC)
- GenTreeHWIntrinsic(retType, getAllocator(CMK_ASTNode), intrinsic, simdBaseJitType, simdSize,
- isSimdAsHWIntrinsic, op1, op2, op3, op4, op5);
+ retNode = new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(retType, getAllocator(CMK_ASTNode), intrinsic,
+ simdBaseJitType, simdSize, op1, op2, op3, op4, op5);
retNode->AsHWIntrinsic()->SetAuxiliaryJitType(indexBaseJitType);
break;
}
{
// Apply ASCII-only ToLowerCase mask (bitwise OR 0x20 for all a-Z chars)
assert((toLowerVec1 != nullptr) && (toLowerVec2 != nullptr));
- vec1 = gtNewSimdBinOpNode(GT_OR, simdType, vec1, toLowerVec1, baseType, simdSize, false);
- vec2 = gtNewSimdBinOpNode(GT_OR, simdType, vec2, toLowerVec2, baseType, simdSize, false);
+ vec1 = gtNewSimdBinOpNode(GT_OR, simdType, vec1, toLowerVec1, baseType, simdSize);
+ vec2 = gtNewSimdBinOpNode(GT_OR, simdType, vec2, toLowerVec2, baseType, simdSize);
}
// ((v1 ^ cns1) | (v2 ^ cns2)) == zero
- GenTree* xor1 = gtNewSimdBinOpNode(GT_XOR, simdType, vec1, cnsVec1, baseType, simdSize, false);
- GenTree* xor2 = gtNewSimdBinOpNode(GT_XOR, simdType, vec2, cnsVec2, baseType, simdSize, false);
- GenTree* orr = gtNewSimdBinOpNode(GT_OR, simdType, xor1, xor2, baseType, simdSize, false);
+ GenTree* xor1 = gtNewSimdBinOpNode(GT_XOR, simdType, vec1, cnsVec1, baseType, simdSize);
+ GenTree* xor2 = gtNewSimdBinOpNode(GT_XOR, simdType, vec2, cnsVec2, baseType, simdSize);
+ GenTree* orr = gtNewSimdBinOpNode(GT_OR, simdType, xor1, xor2, baseType, simdSize);
return gtNewSimdHWIntrinsicNode(TYP_BOOL, useSingleVector ? xor1 : orr, zero, niEquals, baseType, simdSize);
}
#endif // defined(FEATURE_HW_INTRINSICS)
JITDUMP("Trying to unroll MemoryExtensions.Equals|SequenceEqual|StartsWith(op1, \"%ws\")...\n", str)
}
- CORINFO_CLASS_HANDLE spanCls = gtGetStructHandle(spanObj);
+ CORINFO_CLASS_HANDLE spanCls;
+ info.compCompHnd->getArgType(sig, sig->args, &spanCls);
CORINFO_FIELD_HANDLE pointerHnd = info.compCompHnd->getFieldInClass(spanCls, 0);
CORINFO_FIELD_HANDLE lengthHnd = info.compCompHnd->getFieldInClass(spanCls, 1);
const unsigned lengthOffset = info.compCompHnd->getFieldOffset(lengthHnd);
{
GenTree* indexNode = m_compiler->gtNewIconNode(val.Offset() / genTypeSize(elementType));
hwiNode = m_compiler->gtNewSimdGetElementNode(elementType, lclNode, indexNode, CORINFO_TYPE_FLOAT,
- genTypeSize(varDsc),
- /* isSimdAsHWIntrinsic */ true);
+ genTypeSize(varDsc));
}
else
{
assert(elementType == TYP_SIMD12);
assert(genTypeSize(varDsc) == 16);
- hwiNode =
- m_compiler->gtNewSimdHWIntrinsicNode(elementType, lclNode, NI_Vector128_AsVector3,
- CORINFO_TYPE_FLOAT, 16, /* isSimdAsHWIntrinsic */ true);
+ hwiNode = m_compiler->gtNewSimdHWIntrinsicNode(elementType, lclNode, NI_Vector128_AsVector3,
+ CORINFO_TYPE_FLOAT, 16);
}
indir = hwiNode;
if (elementType == TYP_FLOAT)
{
GenTree* indexNode = m_compiler->gtNewIconNode(val.Offset() / genTypeSize(elementType));
- hwiNode = m_compiler->gtNewSimdWithElementNode(varDsc->TypeGet(), simdLclNode, indexNode,
- elementNode, CORINFO_TYPE_FLOAT, genTypeSize(varDsc),
- /* isSimdAsHWIntrinsic */ true);
+ hwiNode =
+ m_compiler->gtNewSimdWithElementNode(varDsc->TypeGet(), simdLclNode, indexNode, elementNode,
+ CORINFO_TYPE_FLOAT, genTypeSize(varDsc));
}
else
{
// We inverse the operands here and take elementNode as the main value and simdLclNode[3] as the
// new value. This gives us a new TYP_SIMD16 with all elements in the right spots
GenTree* indexNode = m_compiler->gtNewIconNode(3, TYP_INT);
- hwiNode =
- m_compiler->gtNewSimdWithElementNode(TYP_SIMD16, elementNode, indexNode, simdLclNode,
- CORINFO_TYPE_FLOAT, 16, /* isSimdAsHWIntrinsic */ true);
+ hwiNode = m_compiler->gtNewSimdWithElementNode(TYP_SIMD16, elementNode, indexNode, simdLclNode,
+ CORINFO_TYPE_FLOAT, 16);
}
user->AsOp()->gtOp2 = hwiNode;
//
GenTree* Lowering::LowerHWIntrinsicCndSel(GenTreeHWIntrinsic* node)
{
- var_types simdType = node->gtType;
- CorInfoType simdBaseJitType = node->GetSimdBaseJitType();
- var_types simdBaseType = node->GetSimdBaseType();
- unsigned simdSize = node->GetSimdSize();
- bool isSimdAsHWIntrinsic = node->IsSimdAsHWIntrinsic();
+ var_types simdType = node->gtType;
+ CorInfoType simdBaseJitType = node->GetSimdBaseJitType();
+ var_types simdBaseType = node->GetSimdBaseType();
+ unsigned simdSize = node->GetSimdSize();
assert(varTypeIsSIMD(simdType));
assert(varTypeIsArithmetic(simdBaseType));
// ...
// tmp2 = op1 & op2
// ...
- tmp2 = comp->gtNewSimdBinOpNode(GT_AND, simdType, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ tmp2 = comp->gtNewSimdBinOpNode(GT_AND, simdType, op1, op2, simdBaseJitType, simdSize);
BlockRange().InsertAfter(op2, tmp2);
LowerNode(tmp2);
// ...
// tmp3 = op3 & ~tmp1
// ...
- tmp3 = comp->gtNewSimdBinOpNode(GT_AND_NOT, simdType, op3, tmp1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ tmp3 = comp->gtNewSimdBinOpNode(GT_AND_NOT, simdType, op3, tmp1, simdBaseJitType, simdSize);
BlockRange().InsertAfter(op3, tmp3);
LowerNode(tmp3);
// ...
// tmp4 = tmp2 | tmp3
// ...
- tmp4 = comp->gtNewSimdBinOpNode(GT_OR, simdType, tmp2, tmp3, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
+ tmp4 = comp->gtNewSimdBinOpNode(GT_OR, simdType, tmp2, tmp3, simdBaseJitType, simdSize);
BlockRange().InsertBefore(node, tmp4);
LIR::Use use;
// var tmp3 = tmp2.ToVector256Unsafe();
// return tmp3.WithUpper(tmp1);
- tmp1 = comp->gtNewSimdCreateBroadcastNode(TYP_SIMD16, op1, simdBaseJitType, 16, false);
+ tmp1 = comp->gtNewSimdCreateBroadcastNode(TYP_SIMD16, op1, simdBaseJitType, 16);
BlockRange().InsertAfter(op1, tmp1);
node->Op(1) = tmp1;
imm8 -= count / 2;
- tmp1 = comp->gtNewSimdGetUpperNode(TYP_SIMD16, op1, simdBaseJitType, simdSize, false);
+ tmp1 = comp->gtNewSimdGetUpperNode(TYP_SIMD16, op1, simdBaseJitType, simdSize);
BlockRange().InsertBefore(node, tmp1);
LowerNode(tmp1);
}
// ...
// op1 = op1.GetLower();
- tmp1 = comp->gtNewSimdGetLowerNode(TYP_SIMD16, op1, simdBaseJitType, simdSize, false);
+ tmp1 = comp->gtNewSimdGetLowerNode(TYP_SIMD16, op1, simdBaseJitType, simdSize);
BlockRange().InsertBefore(node, tmp1);
LowerNode(tmp1);
}
imm8 -= count / 2;
- tmp1 = comp->gtNewSimdGetUpperNode(TYP_SIMD16, op1, simdBaseJitType, simdSize, false);
+ tmp1 = comp->gtNewSimdGetUpperNode(TYP_SIMD16, op1, simdBaseJitType, simdSize);
BlockRange().InsertAfter(op1, tmp1);
LowerNode(tmp1);
}
// ...
// op1 = op1.GetLower();
- tmp1 = comp->gtNewSimdGetLowerNode(TYP_SIMD16, op1, simdBaseJitType, simdSize, false);
+ tmp1 = comp->gtNewSimdGetLowerNode(TYP_SIMD16, op1, simdBaseJitType, simdSize);
BlockRange().InsertAfter(op1, tmp1);
LowerNode(tmp1);
}
tmp2 = comp->gtClone(tmp1);
BlockRange().InsertAfter(tmp1, tmp2);
- tmp3 = comp->gtNewSimdGetUpperNode(TYP_SIMD16, tmp2, simdBaseJitType, simdSize, false);
+ tmp3 = comp->gtNewSimdGetUpperNode(TYP_SIMD16, tmp2, simdBaseJitType, simdSize);
BlockRange().InsertAfter(tmp2, tmp3);
LowerNode(tmp3);
- tmp1 = comp->gtNewSimdGetLowerNode(TYP_SIMD16, tmp1, simdBaseJitType, simdSize, false);
+ tmp1 = comp->gtNewSimdGetLowerNode(TYP_SIMD16, tmp1, simdBaseJitType, simdSize);
BlockRange().InsertAfter(tmp3, tmp1);
LowerNode(tmp1);
- tmp2 = comp->gtNewSimdBinOpNode(GT_ADD, TYP_SIMD16, tmp3, tmp1, simdBaseJitType, 16, false);
+ tmp2 = comp->gtNewSimdBinOpNode(GT_ADD, TYP_SIMD16, tmp3, tmp1, simdBaseJitType, 16);
BlockRange().InsertAfter(tmp1, tmp2);
LowerNode(tmp2);
memcpy(&vecCon1->gtSimdVal, &simd16Val, sizeof(simd16_t));
BlockRange().InsertAfter(op1, vecCon1);
- op1 = comp->gtNewSimdBinOpNode(GT_AND, simdType, op1, vecCon1, simdBaseJitType, simdSize, false);
+ op1 = comp->gtNewSimdBinOpNode(GT_AND, simdType, op1, vecCon1, simdBaseJitType, simdSize);
BlockRange().InsertAfter(vecCon1, op1);
LowerNode(vecCon1);
memcpy(&vecCon2->gtSimdVal, &simd16Val, sizeof(simd16_t));
BlockRange().InsertAfter(op2, vecCon2);
- op2 = comp->gtNewSimdBinOpNode(GT_AND, simdType, op2, vecCon2, simdBaseJitType, simdSize, false);
+ op2 = comp->gtNewSimdBinOpNode(GT_AND, simdType, op2, vecCon2, simdBaseJitType, simdSize);
BlockRange().InsertAfter(vecCon2, op2);
LowerNode(vecCon2);
// var tmp1 = Isa.Multiply(op1, op2);
// ...
- tmp1 = comp->gtNewSimdBinOpNode(GT_MUL, simdType, op1, op2, simdBaseJitType, simdSize, false);
+ tmp1 = comp->gtNewSimdBinOpNode(GT_MUL, simdType, op1, op2, simdBaseJitType, simdSize);
BlockRange().InsertBefore(node, tmp1);
LowerNode(tmp1);
// tmp1 = Isa.Add(tmp1, tmp2);
// ...
- tmp1 = comp->gtNewSimdBinOpNode(GT_ADD, simdType, tmp1, tmp2, simdBaseJitType, simdSize, false);
+ tmp1 = comp->gtNewSimdBinOpNode(GT_ADD, simdType, tmp1, tmp2, simdBaseJitType, simdSize);
}
BlockRange().InsertAfter(tmp2, tmp1);
tmp2 = comp->gtClone(tmp1);
BlockRange().InsertAfter(tmp1, tmp2);
- tmp3 = comp->gtNewSimdGetUpperNode(TYP_SIMD16, tmp2, simdBaseJitType, simdSize, false);
+ tmp3 = comp->gtNewSimdGetUpperNode(TYP_SIMD16, tmp2, simdBaseJitType, simdSize);
BlockRange().InsertAfter(tmp2, tmp3);
LowerNode(tmp3);
- tmp1 = comp->gtNewSimdGetLowerNode(TYP_SIMD16, tmp1, simdBaseJitType, simdSize, false);
+ tmp1 = comp->gtNewSimdGetLowerNode(TYP_SIMD16, tmp1, simdBaseJitType, simdSize);
BlockRange().InsertAfter(tmp3, tmp1);
LowerNode(tmp1);
- tmp2 = comp->gtNewSimdBinOpNode(GT_ADD, TYP_SIMD16, tmp3, tmp1, simdBaseJitType, 16, false);
+ tmp2 = comp->gtNewSimdBinOpNode(GT_ADD, TYP_SIMD16, tmp3, tmp1, simdBaseJitType, 16);
BlockRange().InsertAfter(tmp1, tmp2);
LowerNode(tmp2);
// 'TempInfo' data that contains the GT_ASG and GT_LCL_VAR nodes for assignment
// and variable load respectively.
//
-TempInfo Compiler::fgMakeTemp(GenTree* rhs, CORINFO_CLASS_HANDLE structType /*= nullptr*/)
+TempInfo Compiler::fgMakeTemp(GenTree* rhs)
{
unsigned lclNum = lvaGrabTemp(true DEBUGARG("fgMakeTemp is creating a new local variable"));
-
- if (varTypeIsStruct(rhs))
- {
- assert(structType != nullptr);
- lvaSetStruct(lclNum, structType, false);
- }
-
- // If rhs->TypeGet() == TYP_STRUCT, gtNewTempAssign() will create a GT_COPYBLK tree.
- // The type of GT_COPYBLK is TYP_VOID. Therefore, we should use type of rhs for
- // setting type of lcl vars created.
- GenTree* asg = gtNewTempAssign(lclNum, rhs);
- GenTree* load = gtNewLclvNode(lclNum, genActualType(rhs));
+ GenTree* asg = gtNewTempAssign(lclNum, rhs);
+ GenTree* load = gtNewLclvNode(lclNum, genActualType(rhs));
TempInfo tempInfo{};
tempInfo.asg = asg;
// ppTree - a pointer to the child node we will be replacing with the comma expression that
// evaluates ppTree to a temp and returns the result
//
-// structType - value type handle if the temp created is of TYP_STRUCT.
-//
// Return Value:
// A fresh GT_LCL_VAR node referencing the temp which has not been used
//
// original use and new use is possible. Otherwise, fgInsertCommaFormTemp
// should be used directly.
//
-GenTree* Compiler::fgMakeMultiUse(GenTree** pOp, CORINFO_CLASS_HANDLE structType /*= nullptr*/)
+GenTree* Compiler::fgMakeMultiUse(GenTree** pOp)
{
GenTree* const tree = *pOp;
return gtCloneExpr(tree);
}
- return fgInsertCommaFormTemp(pOp, structType);
+ return fgInsertCommaFormTemp(pOp);
}
//------------------------------------------------------------------------------
// ppTree - a pointer to the child node we will be replacing with the comma expression that
// evaluates ppTree to a temp and returns the result
//
-// structType - value type handle if the temp created is of TYP_STRUCT.
-//
// Return Value:
// A fresh GT_LCL_VAR node referencing the temp which has not been used
//
-GenTree* Compiler::fgInsertCommaFormTemp(GenTree** ppTree, CORINFO_CLASS_HANDLE structType /*= nullptr*/)
+GenTree* Compiler::fgInsertCommaFormTemp(GenTree** ppTree)
{
GenTree* subTree = *ppTree;
- TempInfo tempInfo = fgMakeTemp(subTree, structType);
+ TempInfo tempInfo = fgMakeTemp(subTree);
GenTree* asg = tempInfo.asg;
GenTree* load = tempInfo.load;
if (sqrt != nullptr)
{
- CorInfoType simdBaseJitType = node->GetSimdBaseJitType();
- node = gtNewSimdSqrtNode(simdType, hwop1, simdBaseJitType, simdSize, node->IsSimdAsHWIntrinsic())
- ->AsHWIntrinsic();
+ node = gtNewSimdSqrtNode(simdType, hwop1, node->GetSimdBaseJitType(), simdSize)->AsHWIntrinsic();
DEBUG_DESTROY_NODE(sqrt);
}
else
JITDUMP(" Found Vector<%s>\n", varTypeName(JitType2PreciseVarType(simdBaseJitType)));
size = getSIMDVectorRegisterByteLength();
-
- uint32_t handleIndex = static_cast<uint32_t>(simdBaseJitType - CORINFO_TYPE_BYTE);
- assert(handleIndex < SIMDHandlesCache::SupportedTypeCount);
-
- m_simdHandleCache->VectorTHandles[handleIndex] = typeHnd;
break;
}
}
JITDUMP(" Found Vector64<%s>\n", varTypeName(JitType2PreciseVarType(simdBaseJitType)));
-
- uint32_t handleIndex = static_cast<uint32_t>(simdBaseJitType - CORINFO_TYPE_BYTE);
- assert(handleIndex < SIMDHandlesCache::SupportedTypeCount);
-
- m_simdHandleCache->Vector64THandles[handleIndex] = typeHnd;
break;
}
#endif // TARGET_ARM64
}
JITDUMP(" Found Vector128<%s>\n", varTypeName(JitType2PreciseVarType(simdBaseJitType)));
-
- uint32_t handleIndex = static_cast<uint32_t>(simdBaseJitType - CORINFO_TYPE_BYTE);
- assert(handleIndex < SIMDHandlesCache::SupportedTypeCount);
-
- m_simdHandleCache->Vector128THandles[handleIndex] = typeHnd;
break;
}
}
JITDUMP(" Found Vector256<%s>\n", varTypeName(JitType2PreciseVarType(simdBaseJitType)));
-
- uint32_t handleIndex = static_cast<uint32_t>(simdBaseJitType - CORINFO_TYPE_BYTE);
- assert(handleIndex < SIMDHandlesCache::SupportedTypeCount);
-
- m_simdHandleCache->Vector256THandles[handleIndex] = typeHnd;
break;
}
}
JITDUMP(" Found Vector512<%s>\n", varTypeName(JitType2PreciseVarType(simdBaseJitType)));
-
- uint32_t handleIndex = static_cast<uint32_t>(simdBaseJitType - CORINFO_TYPE_BYTE);
- assert(handleIndex < SIMDHandlesCache::SupportedTypeCount);
-
- m_simdHandleCache->Vector512THandles[handleIndex] = typeHnd;
break;
}
#endif // TARGET_XARCH
{
assert(size == info.compCompHnd->getClassSize(typeHnd));
setUsesSIMDTypes(true);
-
- CORINFO_CLASS_HANDLE* pCanonicalHnd = nullptr;
-
- switch (size)
- {
- case 8:
- pCanonicalHnd = &m_simdHandleCache->CanonicalSimd8Handle;
- break;
- case 12:
- // There is no need for a canonical SIMD12 handle because it is always Vector3.
- break;
- case 16:
- pCanonicalHnd = &m_simdHandleCache->CanonicalSimd16Handle;
- break;
- case 32:
- pCanonicalHnd = &m_simdHandleCache->CanonicalSimd32Handle;
- break;
- case 64:
- pCanonicalHnd = &m_simdHandleCache->CanonicalSimd64Handle;
- break;
- default:
- unreached();
- }
-
- if ((pCanonicalHnd != nullptr) && (*pCanonicalHnd == NO_CLASS_HANDLE))
- {
- *pCanonicalHnd = typeHnd;
- }
}
return simdBaseJitType;
case NI_VectorT256_Abs:
#endif // TARGET_XARCH
{
- return gtNewSimdAbsNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdAbsNode(retType, op1, simdBaseJitType, simdSize);
}
case NI_VectorT128_Ceiling:
case NI_VectorT256_Ceiling:
#endif // TARGET_XARCH
{
- return gtNewSimdCeilNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdCeilNode(retType, op1, simdBaseJitType, simdSize);
}
case NI_Quaternion_Conjugate:
vecCon->gtSimdVal.f32[2] = -1.0f;
vecCon->gtSimdVal.f32[3] = +1.0f;
- return gtNewSimdBinOpNode(GT_MUL, retType, op1, vecCon, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdBinOpNode(GT_MUL, retType, op1, vecCon, simdBaseJitType, simdSize);
}
case NI_VectorT128_Floor:
case NI_VectorT256_Floor:
#endif // TARGET_XARCH
{
- return gtNewSimdFloorNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdFloorNode(retType, op1, simdBaseJitType, simdSize);
}
case NI_Quaternion_Inverse:
vecCon->gtSimdVal.f32[2] = -1.0f;
vecCon->gtSimdVal.f32[3] = +1.0f;
- GenTree* conjugate = gtNewSimdBinOpNode(GT_MUL, retType, op1, vecCon, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ GenTree* conjugate = gtNewSimdBinOpNode(GT_MUL, retType, op1, vecCon, simdBaseJitType, simdSize);
- op1 = gtNewSimdDotProdNode(retType, clonedOp1, clonedOp2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ op1 = gtNewSimdDotProdNode(retType, clonedOp1, clonedOp2, simdBaseJitType, simdSize);
- return gtNewSimdBinOpNode(GT_DIV, retType, conjugate, op1, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdBinOpNode(GT_DIV, retType, conjugate, op1, simdBaseJitType, simdSize);
}
case NI_Quaternion_Length:
op1 = impCloneExpr(op1, &clonedOp1, NO_CLASS_HANDLE, CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector length"));
- op1 = gtNewSimdDotProdNode(retType, op1, clonedOp1, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ op1 = gtNewSimdDotProdNode(retType, op1, clonedOp1, simdBaseJitType, simdSize);
return new (this, GT_INTRINSIC)
GenTreeIntrinsic(simdBaseType, op1, NI_System_Math_Sqrt, NO_METHOD_HANDLE);
op1 = impCloneExpr(op1, &clonedOp1, NO_CLASS_HANDLE, CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector length squared"));
- return gtNewSimdDotProdNode(retType, op1, clonedOp1, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdDotProdNode(retType, op1, clonedOp1, simdBaseJitType, simdSize);
}
case NI_VectorT128_Load:
op1 = op1->gtGetOp1();
}
- return gtNewSimdLoadNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdLoadNode(retType, op1, simdBaseJitType, simdSize);
}
case NI_VectorT128_LoadAligned:
op1 = op1->gtGetOp1();
}
- return gtNewSimdLoadAlignedNode(retType, op1, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdLoadAlignedNode(retType, op1, simdBaseJitType, simdSize);
}
case NI_VectorT128_LoadAlignedNonTemporal:
op1 = op1->gtGetOp1();
}
- return gtNewSimdLoadNonTemporalNode(retType, op1, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdLoadNonTemporalNode(retType, op1, simdBaseJitType, simdSize);
}
case NI_Quaternion_Negate:
case NI_VectorT256_op_UnaryNegation:
#endif // TARGET_XARCH
{
- return gtNewSimdUnOpNode(GT_NEG, retType, op1, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdUnOpNode(GT_NEG, retType, op1, simdBaseJitType, simdSize);
}
case NI_Quaternion_Normalize:
clonedOp1 = impCloneExpr(clonedOp1, &clonedOp2, NO_CLASS_HANDLE, CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector normalize (2)"));
- op1 = gtNewSimdDotProdNode(retType, op1, clonedOp1, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ op1 = gtNewSimdDotProdNode(retType, op1, clonedOp1, simdBaseJitType, simdSize);
- op1 = gtNewSimdSqrtNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true);
+ op1 = gtNewSimdSqrtNode(retType, op1, simdBaseJitType, simdSize);
- return gtNewSimdBinOpNode(GT_DIV, retType, clonedOp2, op1, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdBinOpNode(GT_DIV, retType, clonedOp2, op1, simdBaseJitType, simdSize);
}
case NI_VectorT128_OnesComplement:
case NI_VectorT256_op_OnesComplement:
#endif // TARGET_XARCH
{
- return gtNewSimdUnOpNode(GT_NOT, retType, op1, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdUnOpNode(GT_NOT, retType, op1, simdBaseJitType, simdSize);
}
case NI_Vector2_Sqrt:
case NI_VectorT256_Sqrt:
#endif // TARGET_XARCH
{
- return gtNewSimdSqrtNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdSqrtNode(retType, op1, simdBaseJitType, simdSize);
}
case NI_VectorT128_Sum:
case NI_VectorT256_Sum:
#endif // TARGET_XARCH
{
- return gtNewSimdSumNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdSumNode(retType, op1, simdBaseJitType, simdSize);
}
case NI_VectorT128_ToScalar:
{
- return gtNewSimdHWIntrinsicNode(retType, op1, NI_Vector128_ToScalar, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdHWIntrinsicNode(retType, op1, NI_Vector128_ToScalar, simdBaseJitType, simdSize);
}
case NI_VectorT128_op_UnaryPlus:
case NI_VectorT256_WidenLower:
#endif // TARGET_XARCH
{
- return gtNewSimdWidenLowerNode(retType, op1, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdWidenLowerNode(retType, op1, simdBaseJitType, simdSize);
}
case NI_VectorT128_WidenUpper:
case NI_VectorT256_WidenUpper:
#endif // TARGET_XARCH
{
- return gtNewSimdWidenUpperNode(retType, op1, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdWidenUpperNode(retType, op1, simdBaseJitType, simdSize);
}
#if defined(TARGET_XARCH)
assert(simdBaseType == TYP_FLOAT);
NamedIntrinsic convert = (simdSize == 32) ? NI_AVX_ConvertToVector256Int32WithTruncation
: NI_SSE2_ConvertToVector128Int32WithTruncation;
- return gtNewSimdHWIntrinsicNode(retType, op1, convert, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdHWIntrinsicNode(retType, op1, convert, simdBaseJitType, simdSize);
}
case NI_VectorT128_ConvertToSingle:
assert(simdBaseType == TYP_INT);
NamedIntrinsic convert =
(simdSize == 32) ? NI_AVX_ConvertToVector256Single : NI_SSE2_ConvertToVector128Single;
- return gtNewSimdHWIntrinsicNode(retType, op1, convert, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdHWIntrinsicNode(retType, op1, convert, simdBaseJitType, simdSize);
}
case NI_VectorT256_ToScalar:
{
- return gtNewSimdHWIntrinsicNode(retType, op1, NI_Vector256_ToScalar, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdHWIntrinsicNode(retType, op1, NI_Vector256_ToScalar, simdBaseJitType, simdSize);
}
#elif defined(TARGET_ARM64)
case NI_VectorT128_ConvertToDouble:
{
assert((simdBaseType == TYP_LONG) || (simdBaseType == TYP_ULONG));
return gtNewSimdHWIntrinsicNode(retType, op1, NI_AdvSimd_Arm64_ConvertToDouble, simdBaseJitType,
- simdSize, /* isSimdAsHWIntrinsic */ true);
+ simdSize);
}
case NI_VectorT128_ConvertToInt32:
{
assert(simdBaseType == TYP_FLOAT);
return gtNewSimdHWIntrinsicNode(retType, op1, NI_AdvSimd_ConvertToInt32RoundToZero, simdBaseJitType,
- simdSize, /* isSimdAsHWIntrinsic */ true);
+ simdSize);
}
case NI_VectorT128_ConvertToInt64:
{
assert(simdBaseType == TYP_DOUBLE);
return gtNewSimdHWIntrinsicNode(retType, op1, NI_AdvSimd_Arm64_ConvertToInt64RoundToZero,
- simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true);
+ simdBaseJitType, simdSize);
}
case NI_VectorT128_ConvertToSingle:
{
assert((simdBaseType == TYP_INT) || (simdBaseType == TYP_UINT));
- return gtNewSimdHWIntrinsicNode(retType, op1, NI_AdvSimd_ConvertToSingle, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdHWIntrinsicNode(retType, op1, NI_AdvSimd_ConvertToSingle, simdBaseJitType,
+ simdSize);
}
case NI_VectorT128_ConvertToUInt32:
{
assert(simdBaseType == TYP_FLOAT);
return gtNewSimdHWIntrinsicNode(retType, op1, NI_AdvSimd_ConvertToUInt32RoundToZero,
- simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true);
+ simdBaseJitType, simdSize);
}
case NI_VectorT128_ConvertToUInt64:
{
assert(simdBaseType == TYP_DOUBLE);
return gtNewSimdHWIntrinsicNode(retType, op1, NI_AdvSimd_Arm64_ConvertToUInt64RoundToZero,
- simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true);
+ simdBaseJitType, simdSize);
}
#else
#error Unsupported platform
case NI_VectorT256_op_Addition:
#endif // TARGET_XARCH
{
- return gtNewSimdBinOpNode(GT_ADD, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdBinOpNode(GT_ADD, retType, op1, op2, simdBaseJitType, simdSize);
}
case NI_VectorT128_AndNot:
case NI_VectorT256_AndNot:
#endif // TARGET_XARCH
{
- return gtNewSimdBinOpNode(GT_AND_NOT, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdBinOpNode(GT_AND_NOT, retType, op1, op2, simdBaseJitType, simdSize);
}
case NI_VectorT128_BitwiseAnd:
case NI_VectorT256_op_BitwiseAnd:
#endif // TARGET_XARCH
{
- return gtNewSimdBinOpNode(GT_AND, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdBinOpNode(GT_AND, retType, op1, op2, simdBaseJitType, simdSize);
}
case NI_VectorT128_BitwiseOr:
case NI_VectorT256_op_BitwiseOr:
#endif // TARGET_XARCH
{
- return gtNewSimdBinOpNode(GT_OR, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdBinOpNode(GT_OR, retType, op1, op2, simdBaseJitType, simdSize);
}
case NI_Vector2_CreateBroadcast:
assert(retType == TYP_VOID);
copyBlkDst = op1;
- copyBlkSrc = gtNewSimdCreateBroadcastNode(simdType, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ copyBlkSrc = gtNewSimdCreateBroadcastNode(simdType, op2, simdBaseJitType, simdSize);
break;
}
case NI_Vector3_Distance:
case NI_Vector4_Distance:
{
- op1 = gtNewSimdBinOpNode(GT_SUB, simdType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ op1 = gtNewSimdBinOpNode(GT_SUB, simdType, op1, op2, simdBaseJitType, simdSize);
GenTree* clonedOp1;
op1 = impCloneExpr(op1, &clonedOp1, NO_CLASS_HANDLE, CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone diff for vector distance"));
- op1 = gtNewSimdDotProdNode(retType, op1, clonedOp1, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ op1 = gtNewSimdDotProdNode(retType, op1, clonedOp1, simdBaseJitType, simdSize);
return new (this, GT_INTRINSIC)
GenTreeIntrinsic(retType, op1, NI_System_Math_Sqrt, NO_METHOD_HANDLE);
case NI_Vector3_DistanceSquared:
case NI_Vector4_DistanceSquared:
{
- op1 = gtNewSimdBinOpNode(GT_SUB, simdType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ op1 = gtNewSimdBinOpNode(GT_SUB, simdType, op1, op2, simdBaseJitType, simdSize);
GenTree* clonedOp1;
op1 = impCloneExpr(op1, &clonedOp1, NO_CLASS_HANDLE, CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone diff for vector distance squared"));
- return gtNewSimdDotProdNode(retType, op1, clonedOp1, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdDotProdNode(retType, op1, clonedOp1, simdBaseJitType, simdSize);
}
case NI_Quaternion_Divide:
case NI_VectorT256_op_Division:
#endif // TARGET_XARCH
{
- return gtNewSimdBinOpNode(GT_DIV, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdBinOpNode(GT_DIV, retType, op1, op2, simdBaseJitType, simdSize);
}
case NI_Plane_Dot:
case NI_VectorT256_Dot:
#endif // TARGET_XARCH
{
- return gtNewSimdDotProdNode(retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdDotProdNode(retType, op1, op2, simdBaseJitType, simdSize);
}
case NI_VectorT128_Equals:
case NI_VectorT256_Equals:
#endif // TARGET_XARCH
{
- return gtNewSimdCmpOpNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdCmpOpNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize);
}
case NI_Plane_op_Equality:
case NI_VectorT256_op_Equality:
#endif // TARGET_XARCH
{
- return gtNewSimdCmpOpAllNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdCmpOpAllNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize);
}
case NI_VectorT128_EqualsAny:
case NI_VectorT256_EqualsAny:
#endif // TARGET_XARCH
{
- return gtNewSimdCmpOpAnyNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdCmpOpAnyNode(GT_EQ, retType, op1, op2, simdBaseJitType, simdSize);
}
case NI_VectorT128_Xor:
case NI_VectorT256_op_ExclusiveOr:
#endif // TARGET_XARCH
{
- return gtNewSimdBinOpNode(GT_XOR, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdBinOpNode(GT_XOR, retType, op1, op2, simdBaseJitType, simdSize);
}
case NI_Quaternion_get_Item:
case NI_VectorT256_GetElement:
#endif // TARGET_XARCH
{
- return gtNewSimdGetElementNode(retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdGetElementNode(retType, op1, op2, simdBaseJitType, simdSize);
}
case NI_VectorT128_GreaterThan:
case NI_VectorT256_GreaterThan:
#endif // TARGET_XARCH
{
- return gtNewSimdCmpOpNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdCmpOpNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize);
}
case NI_VectorT128_GreaterThanAll:
case NI_VectorT256_GreaterThanAll:
#endif // TARGET_XARCH
{
- return gtNewSimdCmpOpAllNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdCmpOpAllNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize);
}
case NI_VectorT128_GreaterThanAny:
case NI_VectorT256_GreaterThanAny:
#endif // TARGET_XARCH
{
- return gtNewSimdCmpOpAnyNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdCmpOpAnyNode(GT_GT, retType, op1, op2, simdBaseJitType, simdSize);
}
case NI_VectorT128_GreaterThanOrEqual:
case NI_VectorT256_GreaterThanOrEqual:
#endif // TARGET_XARCH
{
- return gtNewSimdCmpOpNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdCmpOpNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize);
}
case NI_VectorT128_GreaterThanOrEqualAll:
case NI_VectorT256_GreaterThanOrEqualAll:
#endif // TARGET_XARCH
{
- return gtNewSimdCmpOpAllNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdCmpOpAllNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize);
}
case NI_VectorT128_GreaterThanOrEqualAny:
case NI_VectorT256_GreaterThanOrEqualAny:
#endif // TARGET_XARCH
{
- return gtNewSimdCmpOpAnyNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdCmpOpAnyNode(GT_GE, retType, op1, op2, simdBaseJitType, simdSize);
}
case NI_Plane_op_Inequality:
case NI_VectorT256_op_Inequality:
#endif // TARGET_XARCH
{
- return gtNewSimdCmpOpAnyNode(GT_NE, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdCmpOpAnyNode(GT_NE, retType, op1, op2, simdBaseJitType, simdSize);
}
case NI_VectorT128_LessThan:
case NI_VectorT256_LessThan:
#endif // TARGET_XARCH
{
- return gtNewSimdCmpOpNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdCmpOpNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize);
}
case NI_VectorT128_LessThanAll:
case NI_VectorT256_LessThanAll:
#endif // TARGET_XARCH
{
- return gtNewSimdCmpOpAllNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdCmpOpAllNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize);
}
case NI_VectorT128_LessThanAny:
case NI_VectorT256_LessThanAny:
#endif // TARGET_XARCH
{
- return gtNewSimdCmpOpAnyNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdCmpOpAnyNode(GT_LT, retType, op1, op2, simdBaseJitType, simdSize);
}
case NI_VectorT128_LessThanOrEqual:
case NI_VectorT256_LessThanOrEqual:
#endif // TARGET_XARCH
{
- return gtNewSimdCmpOpNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdCmpOpNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize);
}
case NI_VectorT128_LessThanOrEqualAll:
case NI_VectorT256_LessThanOrEqualAll:
#endif // TARGET_XARCH
{
- return gtNewSimdCmpOpAllNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdCmpOpAllNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize);
}
case NI_VectorT128_LessThanOrEqualAny:
case NI_VectorT256_LessThanOrEqualAny:
#endif // TARGET_XARCH
{
- return gtNewSimdCmpOpAnyNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdCmpOpAnyNode(GT_LE, retType, op1, op2, simdBaseJitType, simdSize);
}
case NI_VectorT128_LoadUnsafeIndex:
op2 = gtNewOperNode(GT_MUL, op2->TypeGet(), op2, tmp);
op1 = gtNewOperNode(GT_ADD, op1->TypeGet(), op1, op2);
- return gtNewSimdLoadNode(retType, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdLoadNode(retType, op1, simdBaseJitType, simdSize);
}
case NI_Vector2_Max:
case NI_VectorT256_Max:
#endif // TARGET_XARCH
{
- return gtNewSimdMaxNode(retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdMaxNode(retType, op1, op2, simdBaseJitType, simdSize);
}
case NI_Vector2_Min:
case NI_VectorT256_Min:
#endif // TARGET_XARCH
{
- return gtNewSimdMinNode(retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdMinNode(retType, op1, op2, simdBaseJitType, simdSize);
}
case NI_Quaternion_Multiply:
case NI_VectorT256_op_Multiply:
#endif // TARGET_XARCH
{
- return gtNewSimdBinOpNode(GT_MUL, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdBinOpNode(GT_MUL, retType, op1, op2, simdBaseJitType, simdSize);
}
case NI_VectorT128_Narrow:
case NI_VectorT256_Narrow:
#endif // TARGET_XARCH
{
- return gtNewSimdNarrowNode(retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdNarrowNode(retType, op1, op2, simdBaseJitType, simdSize);
}
case NI_VectorT128_ShiftLeft:
case NI_VectorT256_op_LeftShift:
#endif // TARGET_XARCH
{
- return gtNewSimdBinOpNode(GT_LSH, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdBinOpNode(GT_LSH, retType, op1, op2, simdBaseJitType, simdSize);
}
case NI_VectorT128_ShiftRightArithmetic:
#endif // TARGET_XARCH
{
genTreeOps op = varTypeIsUnsigned(simdBaseType) ? GT_RSZ : GT_RSH;
- return gtNewSimdBinOpNode(op, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdBinOpNode(op, retType, op1, op2, simdBaseJitType, simdSize);
}
case NI_VectorT128_ShiftRightLogical:
case NI_VectorT256_op_UnsignedRightShift:
#endif // TARGET_XARCH
{
- return gtNewSimdBinOpNode(GT_RSZ, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdBinOpNode(GT_RSZ, retType, op1, op2, simdBaseJitType, simdSize);
}
case NI_VectorT128_Store:
op2 = op2->gtGetOp1();
}
- return gtNewSimdStoreNode(op2, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdStoreNode(op2, op1, simdBaseJitType, simdSize);
}
case NI_VectorT128_StoreAligned:
op2 = op2->gtGetOp1();
}
- return gtNewSimdStoreAlignedNode(op2, op1, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdStoreAlignedNode(op2, op1, simdBaseJitType, simdSize);
}
case NI_VectorT128_StoreAlignedNonTemporal:
op2 = op2->gtGetOp1();
}
- return gtNewSimdStoreNonTemporalNode(op2, op1, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdStoreNonTemporalNode(op2, op1, simdBaseJitType, simdSize);
}
case NI_Quaternion_Subtract:
case NI_VectorT256_op_Subtraction:
#endif // TARGET_XARCH
{
- return gtNewSimdBinOpNode(GT_SUB, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdBinOpNode(GT_SUB, retType, op1, op2, simdBaseJitType, simdSize);
}
default:
case NI_Vector3_Clamp:
case NI_Vector4_Clamp:
{
- GenTree* maxNode = gtNewSimdMaxNode(retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
- return gtNewSimdMinNode(retType, maxNode, op3, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ GenTree* maxNode = gtNewSimdMaxNode(retType, op1, op2, simdBaseJitType, simdSize);
+ return gtNewSimdMinNode(retType, maxNode, op3, simdBaseJitType, simdSize);
}
case NI_VectorT128_ConditionalSelect:
case NI_VectorT256_ConditionalSelect:
#endif // TARGET_XARCH
{
- return gtNewSimdCndSelNode(retType, op1, op2, op3, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdCndSelNode(retType, op1, op2, op3, simdBaseJitType, simdSize);
}
case NI_Vector2_Lerp:
#if defined(TARGET_XARCH)
// op3 = broadcast(op3)
- op3 = gtNewSimdCreateBroadcastNode(retType, op3, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ op3 = gtNewSimdCreateBroadcastNode(retType, op3, simdBaseJitType, simdSize);
#endif // TARGET_XARCH
// clonedOp3 = op3
#if defined(TARGET_XARCH)
// op3 = 1.0f - op3
GenTree* oneCon = gtNewOneConNode(retType, simdBaseType);
- op3 = gtNewSimdBinOpNode(GT_SUB, retType, oneCon, op3, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ op3 = gtNewSimdBinOpNode(GT_SUB, retType, oneCon, op3, simdBaseJitType, simdSize);
#elif defined(TARGET_ARM64)
// op3 = 1.0f - op3
GenTree* oneCon = gtNewOneConNode(simdBaseType);
#endif
// op1 *= op3
- op1 = gtNewSimdBinOpNode(GT_MUL, retType, op1, op3, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ op1 = gtNewSimdBinOpNode(GT_MUL, retType, op1, op3, simdBaseJitType, simdSize);
// op2 *= clonedOp3
- op2 = gtNewSimdBinOpNode(GT_MUL, retType, op2, clonedOp3, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ op2 = gtNewSimdBinOpNode(GT_MUL, retType, op2, clonedOp3, simdBaseJitType, simdSize);
// return op1 + op2
- return gtNewSimdBinOpNode(GT_ADD, retType, op1, op2, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdBinOpNode(GT_ADD, retType, op1, op2, simdBaseJitType, simdSize);
}
case NI_VectorT128_StoreUnsafeIndex:
op3 = gtNewOperNode(GT_MUL, op3->TypeGet(), op3, tmp);
op2 = gtNewOperNode(GT_ADD, op2->TypeGet(), op2, op3);
- return gtNewSimdStoreNode(op2, op1, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdStoreNode(op2, op1, simdBaseJitType, simdSize);
}
case NI_Vector2_Create:
nodeBuilder.AddOperand(3, gtNewZeroConNode(TYP_FLOAT));
copyBlkSrc = gtNewSimdHWIntrinsicNode(TYP_SIMD8, std::move(nodeBuilder), NI_Vector128_Create,
- simdBaseJitType, 16, /* isSimdAsHWIntrinsic */ true);
+ simdBaseJitType, 16);
#elif defined(TARGET_ARM64)
- copyBlkSrc = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op2, op3, NI_Vector64_Create, simdBaseJitType,
- 8, /* isSimdAsHWIntrinsic */ true);
+ copyBlkSrc =
+ gtNewSimdHWIntrinsicNode(TYP_SIMD8, op2, op3, NI_Vector64_Create, simdBaseJitType, 8);
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
else
{
GenTree* idx = gtNewIconNode((simdSize == 12) ? 2 : 3, TYP_INT);
- copyBlkSrc = gtNewSimdWithElementNode(simdType, op2, idx, op3, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ copyBlkSrc = gtNewSimdWithElementNode(simdType, op2, idx, op3, simdBaseJitType, simdSize);
}
copyBlkDst = op1;
case NI_VectorT256_WithElement:
#endif // TARGET_XARCH
{
- return gtNewSimdWithElementNode(retType, op1, op2, op3, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ return gtNewSimdWithElementNode(retType, op1, op2, op3, simdBaseJitType, simdSize);
}
default:
nodeBuilder.AddOperand(3, gtNewZeroConNode(TYP_FLOAT));
copyBlkSrc = gtNewSimdHWIntrinsicNode(TYP_SIMD12, std::move(nodeBuilder), NI_Vector128_Create,
- simdBaseJitType, 16, /* isSimdAsHWIntrinsic */ true);
+ simdBaseJitType, 16);
}
copyBlkDst = op1;
else
{
GenTree* idx = gtNewIconNode(2, TYP_INT);
- op2 = gtNewSimdWithElementNode(simdType, op2, idx, op3, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ op2 = gtNewSimdWithElementNode(simdType, op2, idx, op3, simdBaseJitType, simdSize);
idx = gtNewIconNode(3, TYP_INT);
- copyBlkSrc = gtNewSimdWithElementNode(simdType, op2, idx, op4, simdBaseJitType, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ copyBlkSrc = gtNewSimdWithElementNode(simdType, op2, idx, op4, simdBaseJitType, simdSize);
}
copyBlkDst = op1;
nodeBuilder.AddOperand(3, op5);
copyBlkSrc = gtNewSimdHWIntrinsicNode(TYP_SIMD16, std::move(nodeBuilder), NI_Vector128_Create,
- simdBaseJitType, 16, /* isSimdAsHWIntrinsic */ true);
+ simdBaseJitType, 16);
}
copyBlkDst = op1;