Use the common path for lowering single-reg SIMD returns (#72944)
authorSingleAccretion <62474226+SingleAccretion@users.noreply.github.com>
Thu, 28 Jul 2022 17:23:29 +0000 (20:23 +0300)
committerGitHub <noreply@github.com>
Thu, 28 Jul 2022 17:23:29 +0000 (10:23 -0700)
* Use the common path for lowering single-reg SIMD returns

This handles type mismatches that morph can create.

* Avoid regressions from RA copies

Lowering will now retype non-reg-candidate LCL_VAR nodes to LCL_FLD
ones in more circumstances, and this pessimizes LSRA a bit.

Apply the same fix as used by the LCL_VAR case. This turns out to
be a good CQ improvement on its own as well.

* Add a test

* Add more tests

* Fix CNS_INT handling

* Fix another similar issue

And remove the duplicative special handling of HWI/SIMD nodes.

src/coreclr/jit/lower.cpp
src/coreclr/jit/lsrabuild.cpp
src/tests/JIT/Regression/JitBlue/Runtime_72926/Runtime_72926.cs [new file with mode: 0644]
src/tests/JIT/Regression/JitBlue/Runtime_72926/Runtime_72926.csproj [new file with mode: 0644]

index c04ee61..db81ecb 100644 (file)
@@ -3599,13 +3599,6 @@ void Lowering::LowerRetStruct(GenTreeUnOp* ret)
             else
             {
                 assert(comp->info.compRetNativeType == ret->TypeGet());
-                GenTree* retVal = ret->gtGetOp1();
-                if (retVal->TypeGet() != ret->TypeGet())
-                {
-                    assert(retVal->OperIs(GT_LCL_VAR));
-                    LowerRetSingleRegStructLclVar(ret);
-                }
-                return;
             }
         }
     }
@@ -3626,19 +3619,22 @@ void Lowering::LowerRetStruct(GenTreeUnOp* ret)
 
     switch (retVal->OperGet())
     {
-        case GT_CALL:
-            assert(retVal->TypeIs(genActualType(nativeReturnType))); // Type should be changed during call processing.
-            break;
-
         case GT_CNS_INT:
-            // When we promote LCL_VAR single fields into return
-            // we could have all type of constans here.
+            // When we promote LCL_VAR single fields into return we could have all type of constans here.
             if (varTypeUsesFloatReg(nativeReturnType))
             {
-                // Do not expect `initblock` for SIMD* types,
-                // only 'initobj'.
-                assert(retVal->AsIntCon()->IconValue() == 0);
-                retVal->BashToConst(0.0, TYP_FLOAT);
+                // ZeroObj assertion propagation can create INT zeros for DOUBLE returns.
+                assert((genTypeSize(retVal) == genTypeSize(nativeReturnType)) || retVal->IsIntegralConst(0));
+                int64_t value = retVal->AsIntCon()->IconValue();
+
+                if (nativeReturnType == TYP_FLOAT)
+                {
+                    retVal->BashToConst(*reinterpret_cast<float*>(&value));
+                }
+                else
+                {
+                    retVal->BashToConst(*reinterpret_cast<double*>(&value));
+                }
             }
             break;
 
@@ -3658,26 +3654,6 @@ void Lowering::LowerRetStruct(GenTreeUnOp* ret)
             retVal->ChangeType(nativeReturnType);
             break;
 
-#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
-#ifdef FEATURE_SIMD
-        case GT_SIMD:
-#endif // FEATURE_SIMD
-#ifdef FEATURE_HW_INTRINSICS
-        case GT_HWINTRINSIC:
-#endif // FEATURE_HW_INTRINSICS
-        {
-            assert(!retVal->TypeIs(TYP_STRUCT));
-            if (varTypeUsesFloatReg(ret) != varTypeUsesFloatReg(retVal))
-            {
-                GenTree* bitcast = comp->gtNewBitCastNode(ret->TypeGet(), retVal);
-                ret->gtOp1       = bitcast;
-                BlockRange().InsertBefore(ret, bitcast);
-                ContainCheckBitCast(bitcast);
-            }
-        }
-        break;
-#endif // FEATURE_SIMD || FEATURE_HW_INTRINSICS
-
         default:
             assert(varTypeIsEnregisterable(retVal));
             if (varTypeUsesFloatReg(ret) != varTypeUsesFloatReg(retVal))
@@ -3721,7 +3697,6 @@ void Lowering::LowerRetSingleRegStructLclVar(GenTreeUnOp* ret)
     if (varDsc->lvDoNotEnregister)
     {
         lclVar->ChangeOper(GT_LCL_FLD);
-        lclVar->AsLclFld()->SetLclOffs(0);
 
         // We are returning as a primitive type and the lcl is of struct type.
         assert(comp->info.compRetNativeType != TYP_STRUCT);
index e656cf8..9f01df4 100644 (file)
@@ -3644,13 +3644,7 @@ int LinearScan::BuildReturn(GenTree* tree)
 #ifdef TARGET_ARM64
         if (varTypeIsSIMD(tree) && !op1->IsMultiRegLclVar())
         {
-            useCandidates = allSIMDRegs();
-            if (op1->OperGet() == GT_LCL_VAR)
-            {
-                assert(op1->TypeGet() != TYP_SIMD32);
-                useCandidates = RBM_DOUBLERET;
-            }
-            BuildUse(op1, useCandidates);
+            BuildUse(op1, RBM_DOUBLERET);
             return 1;
         }
 #endif // TARGET_ARM64
diff --git a/src/tests/JIT/Regression/JitBlue/Runtime_72926/Runtime_72926.cs b/src/tests/JIT/Regression/JitBlue/Runtime_72926/Runtime_72926.cs
new file mode 100644 (file)
index 0000000..99135b9
--- /dev/null
@@ -0,0 +1,86 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System;
+using System.Runtime.Intrinsics;
+using System.Runtime.CompilerServices;
+
+public unsafe class Runtime_72926
+{
+    public static int Main()
+    {
+        if (CallForLongAsVector64_Zero() != Vector64<double>.Zero)
+        {
+            return 101;
+        }
+        if (CallForLongAsVector64_AllBitsSet().AsInt64() != Vector64<long>.AllBitsSet)
+        {
+            return 102;
+        }
+        if (CallForDoubleAsVector64_Zero() != Vector64<double>.Zero)
+        {
+            return 103;
+        }
+        if (CallForDoubleAsVector64_AllBitsSet().AsInt64() != Vector64<long>.AllBitsSet)
+        {
+            return 104;
+        }
+        if (CallForStructWithLongAsStructWithDouble().Dbl != 0)
+        {
+            return 105;
+        }
+
+        return 100;
+    }
+
+    [MethodImpl(MethodImplOptions.NoInlining)]
+    private static Vector64<double> CallForLongAsVector64_Zero()
+    {
+        long value = 0;
+        return *(Vector64<double>*)&value;
+    }
+
+    [MethodImpl(MethodImplOptions.NoInlining)]
+    private static Vector64<double> CallForLongAsVector64_AllBitsSet()
+    {
+        long value = -1;
+        return *(Vector64<double>*)&value;
+    }
+
+    [MethodImpl(MethodImplOptions.NoInlining)]
+    private static Vector64<double> CallForDoubleAsVector64_Zero()
+    {
+        double value = 0;
+        return *(Vector64<double>*)&value;
+    }
+
+    [MethodImpl(MethodImplOptions.NoInlining)]
+    private static Vector64<double> CallForDoubleAsVector64_AllBitsSet()
+    {
+        double value = BitConverter.Int64BitsToDouble(-1);
+        return *(Vector64<double>*)&value;
+    }
+
+    [MethodImpl(MethodImplOptions.NoInlining)]
+    private static StructWithDouble CallForStructWithLongAsStructWithDouble()
+    {
+        StructWithLong lng = GetStructWithLong();
+        return *(StructWithDouble*)&lng;
+    }
+
+    [MethodImpl(MethodImplOptions.NoInlining)]
+    private static StructWithLong GetStructWithLong()
+    {
+        return default;
+    }
+
+    struct StructWithDouble
+    {
+        public double Dbl;
+    }
+
+    struct StructWithLong
+    {
+        public long Lng;
+    }
+}
diff --git a/src/tests/JIT/Regression/JitBlue/Runtime_72926/Runtime_72926.csproj b/src/tests/JIT/Regression/JitBlue/Runtime_72926/Runtime_72926.csproj
new file mode 100644 (file)
index 0000000..e66ad78
--- /dev/null
@@ -0,0 +1,18 @@
+<Project Sdk="Microsoft.NET.Sdk">
+  <PropertyGroup>
+    <OutputType>Exe</OutputType>
+    <Optimize>True</Optimize>
+    <AllowUnsafeBlocks>true</AllowUnsafeBlocks>
+    <CLRTestBatchPreCommands><![CDATA[
+$(CLRTestBatchPreCommands)
+set COMPlus_JitNoStructPromotion=1
+]]></CLRTestBatchPreCommands>
+    <BashCLRTestPreCommands><![CDATA[
+$(BashCLRTestPreCommands)
+export COMPlus_JitNoStructPromotion=1
+]]></BashCLRTestPreCommands>
+  </PropertyGroup>
+  <ItemGroup>
+    <Compile Include="$(MSBuildProjectName).cs" />
+  </ItemGroup>
+</Project>
\ No newline at end of file