Move int/long compare tweak to LowerCompare
authorMike Danes <onemihaid@hotmail.com>
Sun, 1 Jan 2017 11:31:31 +0000 (13:31 +0200)
committerMike Danes <onemihaid@hotmail.com>
Tue, 17 Jan 2017 18:26:28 +0000 (20:26 +0200)
Normally we should not get int/long compares but the JIT accidentally produces such compares sometimes.

Doing this in lowering requires to add a cast node but since this rarely happens the cost of adding a new node is not a concern.

FX diff shows a 151 bytes improvement without any regressions. All differences are due to narrowing int/long compares to int/int when the long operand is a constant.

Such narrowing usually avoids the need for a REX prefix and in some rare cases also allows a memory operand to be contained.

Commit migrated from https://github.com/dotnet/coreclr/commit/b10faae7579c2f22c34ee1fe7c1995ec674828ed

src/coreclr/src/jit/codegenxarch.cpp
src/coreclr/src/jit/lower.cpp

index 4c632ff..37aa444 100644 (file)
@@ -6180,23 +6180,8 @@ void CodeGen::genCompareInt(GenTreePtr treeNode)
             //
             bool op1Is64Bit = (genTypeSize(op1Type) == 8);
             bool op2Is64Bit = (genTypeSize(op2Type) == 8);
-            if (op1Is64Bit)
-            {
-                cmpType = TYP_LONG;
-                if (!(tree->gtFlags & GTF_UNSIGNED) && !op2Is64Bit)
-                {
-                    assert(op2->gtRegNum != REG_NA);
-                    inst_RV_RV(INS_movsxd, op2->gtRegNum, op2->gtRegNum, op2Type);
-                }
-            }
-            else if (op2Is64Bit)
-            {
-                cmpType = TYP_LONG;
-                if (!(tree->gtFlags & GTF_UNSIGNED) && !op1Is64Bit)
-                {
-                    assert(op1->gtRegNum != REG_NA);
-                }
-            }
+
+            assert(op1Is64Bit == op2Is64Bit);
         }
 #endif // _TARGET_AMD64_
     }
index edab7ba..2ce3e24 100644 (file)
@@ -2002,6 +2002,18 @@ bool genTypeValueFitsIn(ssize_t value, var_types type)
             return FitsIn<UINT16>(value);
         case TYP_SHORT:
             return FitsIn<INT16>(value);
+        case TYP_UINT:
+            return FitsIn<UINT32>(value);
+        case TYP_INT:
+            return FitsIn<INT32>(value);
+        case TYP_ULONG:
+            return FitsIn<UINT64>(value);
+        case TYP_LONG:
+            return FitsIn<INT64>(value);
+        case TYP_REF:
+        case TYP_BYREF:
+        case TYP_ARRAY:
+            return FitsIn<UINT_PTR>(value);
         default:
             unreached();
     }
@@ -2133,6 +2145,49 @@ void Lowering::LowerCompare(GenTree* cmp)
             cmp->gtFlags |= GTF_UNSIGNED;
         }
     }
+    else
+    {
+#ifdef _TARGET_AMD64_
+        bool op1Is64Bit = (genTypeSize(cmp->gtGetOp1()->TypeGet()) == 8);
+        bool op2Is64Bit = (genTypeSize(cmp->gtGetOp2()->TypeGet()) == 8);
+
+        if (op1Is64Bit != op2Is64Bit)
+        {
+            //
+            // Normally this should not happen. IL allows comparing int32 to native int but the importer
+            // automatically inserts a cast from int32 to long on 64 bit architectures. However, the JIT
+            // accidentally generates int/long comparisons internally:
+            //   - loop cloning compares int32 index limits against long constants
+            //   - switch lowering compares a 64 bit switch value against a int32 constant
+            //
+            // TODO-Cleanup: The above mentioned issues should be fixed and then the code below may be
+            // replaced with an assert or at least simplified. The special casing of constants in code
+            // below is only necessary to prevent worse code generation for switches and loop cloning.
+            //
+
+            GenTree*& longOp    = op1Is64Bit ? cmp->gtOp.gtOp1 : cmp->gtOp.gtOp2;
+            GenTree*& smallerOp = op2Is64Bit ? cmp->gtOp.gtOp1 : cmp->gtOp.gtOp2;
+
+            assert(genTypeSize(smallerOp) < 8);
+
+            if (longOp->IsCnsIntOrI() && genTypeValueFitsIn(longOp->AsIntCon()->IconValue(), smallerOp->TypeGet()))
+            {
+                longOp->gtType = smallerOp->TypeGet();
+            }
+            else if (smallerOp->IsCnsIntOrI())
+            {
+                smallerOp->gtType = TYP_LONG;
+            }
+            else
+            {
+                GenTree* cast = comp->gtNewCastNode(TYP_LONG, smallerOp, TYP_LONG);
+                smallerOp     = cast;
+                BlockRange().InsertAfter(cast->gtGetOp1(), cast);
+            }
+        }
+#endif // _TARGET_AMD64_
+    }
+
 #endif // _TARGET_XARCH_
 
 #ifndef _TARGET_64BIT_