Fix impIntrinsic to not raise asserts for the AltJit scenario (#76048)
authorTanner Gooding <tagoo@outlook.com>
Sat, 24 Sep 2022 02:54:54 +0000 (19:54 -0700)
committerGitHub <noreply@github.com>
Sat, 24 Sep 2022 02:54:54 +0000 (19:54 -0700)
* Fix impIntrinsic to not raise asserts for the AltJit scenario

* Respond to PR feedback

* Specially handle NI_Vector64/128/256_* intrinsics where not all overloads are valid

* Change the AltJit intrinsic handling so we get no asserts

* Apply suggestions from code review

Co-authored-by: Jan Kotas <jkotas@microsoft.com>
* Apply suggestions from code review

* Fixing a build error

* Applying formatting patch

Co-authored-by: Jan Kotas <jkotas@microsoft.com>
src/coreclr/jit/importer.cpp

index a428120..39ac0f5 100644 (file)
@@ -3615,15 +3615,14 @@ GenTree* Compiler::impIntrinsic(GenTree*                newobjThis,
                                 NamedIntrinsic*         pIntrinsicName,
                                 bool*                   isSpecialIntrinsic)
 {
-    bool           mustExpand  = false;
-    bool           isSpecial   = false;
-    bool           isIntrinsic = false;
-    NamedIntrinsic ni          = NI_Illegal;
+    bool       mustExpand  = false;
+    bool       isSpecial   = false;
+    const bool isIntrinsic = (methodFlags & CORINFO_FLG_INTRINSIC) != 0;
 
-    if ((methodFlags & CORINFO_FLG_INTRINSIC) != 0)
-    {
-        isIntrinsic = true;
+    NamedIntrinsic ni = lookupNamedIntrinsic(method);
 
+    if (isIntrinsic)
+    {
         // The recursive non-virtual calls to Jit intrinsics are must-expand by convention.
         mustExpand = gtIsRecursiveCall(method) && !(methodFlags & CORINFO_FLG_VIRTUAL);
     }
@@ -3634,37 +3633,62 @@ GenTree* Compiler::impIntrinsic(GenTree*                newobjThis,
         assert(!info.compMatchedVM);
     }
 
-    ni = lookupNamedIntrinsic(method);
-
     // We specially support the following on all platforms to allow for dead
     // code optimization and to more generally support recursive intrinsics.
 
-    if (ni == NI_IsSupported_True)
+    if (isIntrinsic)
     {
-        assert(sig->numArgs == 0);
-        return gtNewIconNode(true);
-    }
+        if (ni == NI_IsSupported_True)
+        {
+            assert(sig->numArgs == 0);
+            return gtNewIconNode(true);
+        }
 
-    if (ni == NI_IsSupported_False)
-    {
-        assert(sig->numArgs == 0);
-        return gtNewIconNode(false);
-    }
+        if (ni == NI_IsSupported_False)
+        {
+            assert(sig->numArgs == 0);
+            return gtNewIconNode(false);
+        }
 
-    if (ni == NI_Throw_PlatformNotSupportedException)
-    {
-        return impUnsupportedNamedIntrinsic(CORINFO_HELP_THROW_PLATFORM_NOT_SUPPORTED, method, sig, mustExpand);
-    }
+        if (ni == NI_Throw_PlatformNotSupportedException)
+        {
+            return impUnsupportedNamedIntrinsic(CORINFO_HELP_THROW_PLATFORM_NOT_SUPPORTED, method, sig, mustExpand);
+        }
 
-    if ((ni > NI_SRCS_UNSAFE_START) && (ni < NI_SRCS_UNSAFE_END))
-    {
-        assert(!mustExpand);
-        return impSRCSUnsafeIntrinsic(ni, clsHnd, method, sig);
+        if ((ni > NI_SRCS_UNSAFE_START) && (ni < NI_SRCS_UNSAFE_END))
+        {
+            assert(!mustExpand);
+            return impSRCSUnsafeIntrinsic(ni, clsHnd, method, sig);
+        }
     }
 
 #ifdef FEATURE_HW_INTRINSICS
     if ((ni > NI_HW_INTRINSIC_START) && (ni < NI_HW_INTRINSIC_END))
     {
+        if (!isIntrinsic)
+        {
+#if defined(TARGET_XARCH)
+            // We can't guarantee that all overloads for the xplat intrinsics can be
+            // handled by the AltJit, so limit only the platform specific intrinsics
+            assert((NI_Vector256_Xor + 1) == NI_X86Base_BitScanForward);
+
+            if (ni < NI_Vector256_Xor)
+#elif defined(TARGET_ARM64)
+            // We can't guarantee that all overloads for the xplat intrinsics can be
+            // handled by the AltJit, so limit only the platform specific intrinsics
+            assert((NI_Vector128_Xor + 1) == NI_AdvSimd_Abs);
+
+            if (ni < NI_Vector128_Xor)
+#else
+#error Unsupported platform
+#endif
+            {
+                // Several of the NI_Vector64/128/256 APIs do not have
+                // all overloads as intrinsic today so they will assert
+                return nullptr;
+            }
+        }
+
         GenTree* hwintrinsic = impHWIntrinsic(ni, clsHnd, method, sig, mustExpand);
 
         if (mustExpand && (hwintrinsic == nullptr))
@@ -3675,7 +3699,7 @@ GenTree* Compiler::impIntrinsic(GenTree*                newobjThis,
         return hwintrinsic;
     }
 
-    if ((ni > NI_SIMD_AS_HWINTRINSIC_START) && (ni < NI_SIMD_AS_HWINTRINSIC_END))
+    if (isIntrinsic && (ni > NI_SIMD_AS_HWINTRINSIC_START) && (ni < NI_SIMD_AS_HWINTRINSIC_END))
     {
         // These intrinsics aren't defined recursively and so they will never be mustExpand
         // Instead, they provide software fallbacks that will be executed instead.
@@ -3685,6 +3709,15 @@ GenTree* Compiler::impIntrinsic(GenTree*                newobjThis,
     }
 #endif // FEATURE_HW_INTRINSICS
 
+    if (!isIntrinsic)
+    {
+        // Outside the cases above, there are many intrinsics which apply to only a
+        // subset of overload and where simply matching by name may cause downstream
+        // asserts or other failures. Math.Min is one example, where it only applies
+        // to the floating-point overloads.
+        return nullptr;
+    }
+
     *pIntrinsicName = ni;
 
     if (ni == NI_System_StubHelpers_GetStubContext)
@@ -8889,9 +8922,12 @@ var_types Compiler::impImportCall(OPCODE                  opcode,
         }
 #endif // DEBUG
 
+        const bool isIntrinsic = (mflags & CORINFO_FLG_INTRINSIC) != 0;
+
         // <NICE> Factor this into getCallInfo </NICE>
         bool isSpecialIntrinsic = false;
-        if (((mflags & CORINFO_FLG_INTRINSIC) != 0) || !info.compMatchedVM)
+
+        if (isIntrinsic || !info.compMatchedVM)
         {
             // For mismatched VM (AltJit) we want to check all methods as intrinsic to ensure
             // we get more accurate codegen. This particularly applies to HWIntrinsic usage