Delete move to return spill in the implicit tail call in the inlinee with several...
authorSergey Andreenko <seandree@microsoft.com>
Wed, 15 Nov 2017 18:52:27 +0000 (10:52 -0800)
committerGitHub <noreply@github.com>
Wed, 15 Nov 2017 18:52:27 +0000 (10:52 -0800)
* add fgNeedReturnSpillTemp
* fix the issue
* add the repro without stress mode

src/jit/compiler.h
src/jit/flowgraph.cpp
src/jit/importer.cpp
src/jit/morph.cpp
tests/src/JIT/Regression/JitBlue/GitHub_14783/GitHub_14783.cs [new file with mode: 0644]
tests/src/JIT/Regression/JitBlue/GitHub_14783/GitHub_14783.csproj [new file with mode: 0644]

index 6694b1b..65e0bf4 100644 (file)
@@ -2500,7 +2500,8 @@ public:
                          // we will redirect all "ldarg(a) 0" and "starg 0" to this temp.
 
     unsigned lvaInlineeReturnSpillTemp; // The temp to spill the non-VOID return expression
-                                        // in case there are multiple BBJ_RETURN blocks in the inlinee.
+                                        // in case there are multiple BBJ_RETURN blocks in the inlinee
+                                        // or if the inlinee has GC ref locals.
 
 #if FEATURE_FIXED_OUT_ARGS
     unsigned            lvaOutgoingArgSpaceVar;  // dummy TYP_LCLBLK var for fixed outgoing argument space
@@ -4832,6 +4833,7 @@ public:
 private:
     GenTreePtr fgMorphField(GenTreePtr tree, MorphAddrContext* mac);
     bool fgCanFastTailCall(GenTreeCall* call);
+    bool fgCheckStmtAfterTailCall();
     void fgMorphTailCall(GenTreeCall* call);
     void fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCall* recursiveTailCall);
     GenTreePtr fgAssignRecursiveCallArgToCallerParam(GenTreePtr       arg,
@@ -5033,6 +5035,8 @@ private:
     bool fgIsSignedModOptimizable(GenTreePtr divisor);
     bool fgIsUnsignedModOptimizable(GenTreePtr divisor);
 
+    bool fgNeedReturnSpillTemp();
+
     /*
     XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
     XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
index 4f9c23b..b6ae5c0 100644 (file)
@@ -25872,3 +25872,15 @@ void Compiler::fgCompDominatedByExceptionalEntryBlocks()
         }
     }
 }
+
+//------------------------------------------------------------------------
+// fgNeedReturnSpillTemp: Answers does the inlinee need to spill all returns
+//  as a temp.
+//
+// Return Value:
+//    true if the inlinee has to spill return exprs.
+bool Compiler::fgNeedReturnSpillTemp()
+{
+    assert(compIsForInlining());
+    return (lvaInlineeReturnSpillTemp != BAD_VAR_NUM);
+}
index d45de4b..ec723ef 100644 (file)
@@ -15768,7 +15768,7 @@ bool Compiler::impReturnInstruction(BasicBlock* block, int prefixFlags, OPCODE&
 
             // Below, we are going to set impInlineInfo->retExpr to the tree with the return
             // expression. At this point, retExpr could already be set if there are multiple
-            // return blocks (meaning lvaInlineeReturnSpillTemp != BAD_VAR_NUM) and one of
+            // return blocks (meaning fgNeedReturnSpillTemp() == true) and one of
             // the other blocks already set it. If there is only a single return block,
             // retExpr shouldn't be set. However, this is not true if we reimport a block
             // with a return. In that case, retExpr will be set, then the block will be
@@ -15800,7 +15800,7 @@ bool Compiler::impReturnInstruction(BasicBlock* block, int prefixFlags, OPCODE&
                     }
                 }
 
-                if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
+                if (fgNeedReturnSpillTemp())
                 {
                     assert(info.compRetNativeType != TYP_VOID &&
                            (fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals()));
@@ -15809,7 +15809,7 @@ bool Compiler::impReturnInstruction(BasicBlock* block, int prefixFlags, OPCODE&
                     // If we are inlining a call that returns a struct, where the actual "native" return type is
                     // not a struct (for example, the struct is composed of exactly one int, and the native
                     // return type is thus an int), and the inlinee has multiple return blocks (thus,
-                    // lvaInlineeReturnSpillTemp is != BAD_VAR_NUM, and is the index of a local var that is set
+                    // fgNeedReturnSpillTemp() == true, and is the index of a local var that is set
                     // to the *native* return type), and at least one of the return blocks is the result of
                     // a call, then we have a problem. The situation is like this (from a failed test case):
                     //
@@ -15922,7 +15922,7 @@ bool Compiler::impReturnInstruction(BasicBlock* block, int prefixFlags, OPCODE&
                     CLANG_FORMAT_COMMENT_ANCHOR;
 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
 
-                    if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
+                    if (fgNeedReturnSpillTemp())
                     {
                         if (!impInlineInfo->retExpr)
                         {
@@ -15950,7 +15950,7 @@ bool Compiler::impReturnInstruction(BasicBlock* block, int prefixFlags, OPCODE&
                 {
                     assert(!iciCall->HasRetBufArg());
                     assert(retRegCount >= 2);
-                    if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
+                    if (fgNeedReturnSpillTemp())
                     {
                         if (!impInlineInfo->retExpr)
                         {
@@ -15970,7 +15970,7 @@ bool Compiler::impReturnInstruction(BasicBlock* block, int prefixFlags, OPCODE&
                     assert(iciCall->HasRetBufArg());
                     GenTreePtr dest = gtCloneExpr(iciCall->gtCallArgs->gtOp.gtOp1);
                     // spill temp only exists if there are multiple return points
-                    if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
+                    if (fgNeedReturnSpillTemp())
                     {
                         // if this is the first return we have seen set the retExpr
                         if (!impInlineInfo->retExpr)
@@ -18519,7 +18519,7 @@ unsigned Compiler::impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reas
             // Since there are gc locals we should have seen them earlier
             // and if there was a return value, set up the spill temp.
             assert(impInlineInfo->HasGcRefLocals());
-            assert((info.compRetNativeType == TYP_VOID) || (lvaInlineeReturnSpillTemp != BAD_VAR_NUM));
+            assert((info.compRetNativeType == TYP_VOID) || fgNeedReturnSpillTemp());
         }
         else
         {
index cb31f47..c8e4322 100644 (file)
@@ -8477,6 +8477,11 @@ GenTreePtr Compiler::fgMorphCall(GenTreeCall* call)
         }
 #endif // FEATURE_PAL
 
+        if (!fgCheckStmtAfterTailCall())
+        {
+            szFailReason = "Unexpected statements after the tail call";
+        }
+
         if (szFailReason != nullptr)
         {
 #ifdef DEBUG
@@ -8602,11 +8607,11 @@ GenTreePtr Compiler::fgMorphCall(GenTreeCall* call)
         genTreeOps stmtOper = stmtExpr->gtOper;
         if (stmtOper == GT_CALL)
         {
-            noway_assert(stmtExpr == call);
+            assert(stmtExpr == call);
         }
         else
         {
-            noway_assert(stmtOper == GT_RETURN || stmtOper == GT_ASG || stmtOper == GT_COMMA);
+            assert(stmtOper == GT_RETURN || stmtOper == GT_ASG || stmtOper == GT_COMMA);
             GenTreePtr treeWithCall;
             if (stmtOper == GT_RETURN)
             {
@@ -8615,7 +8620,7 @@ GenTreePtr Compiler::fgMorphCall(GenTreeCall* call)
             else if (stmtOper == GT_COMMA)
             {
                 // Second operation must be nop.
-                noway_assert(stmtExpr->gtGetOp2()->IsNothingNode());
+                assert(stmtExpr->gtGetOp2()->IsNothingNode());
                 treeWithCall = stmtExpr->gtGetOp1();
             }
             else
@@ -8626,115 +8631,20 @@ GenTreePtr Compiler::fgMorphCall(GenTreeCall* call)
             // Peel off casts
             while (treeWithCall->gtOper == GT_CAST)
             {
-                noway_assert(!treeWithCall->gtOverflow());
+                assert(!treeWithCall->gtOverflow());
                 treeWithCall = treeWithCall->gtGetOp1();
             }
 
-            noway_assert(treeWithCall == call);
+            assert(treeWithCall == call);
         }
 #endif
-
-        // For void calls, we would have created a GT_CALL in the stmt list.
-        // For non-void calls, we would have created a GT_RETURN(GT_CAST(GT_CALL)).
-        // For calls returning structs, we would have a void call, followed by a void return.
-        // For debuggable code, it would be an assignment of the call to a temp
-        // We want to get rid of any of this extra trees, and just leave
-        // the call.
         GenTreeStmt* nextMorphStmt = fgMorphStmt->gtNextStmt;
-
-#if !defined(FEATURE_CORECLR) && defined(_TARGET_AMD64_)
-        // Legacy Jit64 Compat:
-        // There could be any number of GT_NOPs between tail call and GT_RETURN.
-        // That is tail call pattern could be one of the following:
-        //  1) tail.call, nop*, ret
-        //  2) tail.call, nop*, pop, nop*, ret
-        //  3) var=tail.call, nop*, ret(var)
-        //  4) var=tail.call, nop*, pop, ret
-        //  5) comma(tail.call, nop), nop*, ret
-        //
-        // See impIsTailCallILPattern() for details on tail call IL patterns
-        // that are supported.
-        if (stmtExpr->gtOper != GT_RETURN)
+        // Remove all stmts after the call.
+        while (nextMorphStmt != nullptr)
         {
-            // First delete all GT_NOPs after the call
-            GenTreeStmt* morphStmtToRemove = nullptr;
-            while (nextMorphStmt != nullptr)
-            {
-                GenTreePtr nextStmtExpr = nextMorphStmt->gtStmtExpr;
-                if (!nextStmtExpr->IsNothingNode())
-                {
-                    break;
-                }
-
-                morphStmtToRemove = nextMorphStmt;
-                nextMorphStmt     = morphStmtToRemove->gtNextStmt;
-                fgRemoveStmt(compCurBB, morphStmtToRemove);
-            }
-
-            // Check to see if there is a pop.
-            // Since tail call is honored, we can get rid of the stmt corresponding to pop.
-            if (nextMorphStmt != nullptr && nextMorphStmt->gtStmtExpr->gtOper != GT_RETURN)
-            {
-                // Note that pop opcode may or may not result in a new stmt (for details see
-                // impImportBlockCode()). Hence, it is not possible to assert about the IR
-                // form generated by pop but pop tree must be side-effect free so that we can
-                // delete it safely.
-                GenTreeStmt* popStmt = nextMorphStmt;
-                nextMorphStmt        = nextMorphStmt->gtNextStmt;
-
-                // Side effect flags on a GT_COMMA may be overly pessimistic, so examine
-                // the constituent nodes.
-                GenTreePtr popExpr          = popStmt->gtStmtExpr;
-                bool       isSideEffectFree = (popExpr->gtFlags & GTF_ALL_EFFECT) == 0;
-                if (!isSideEffectFree && (popExpr->OperGet() == GT_COMMA))
-                {
-                    isSideEffectFree = ((popExpr->gtGetOp1()->gtFlags & GTF_ALL_EFFECT) == 0) &&
-                                       ((popExpr->gtGetOp2()->gtFlags & GTF_ALL_EFFECT) == 0);
-                }
-                noway_assert(isSideEffectFree);
-                fgRemoveStmt(compCurBB, popStmt);
-            }
-
-            // Next delete any GT_NOP nodes after pop
-            while (nextMorphStmt != nullptr)
-            {
-                GenTreePtr nextStmtExpr = nextMorphStmt->gtStmtExpr;
-                if (!nextStmtExpr->IsNothingNode())
-                {
-                    break;
-                }
-
-                morphStmtToRemove = nextMorphStmt;
-                nextMorphStmt     = morphStmtToRemove->gtNextStmt;
-                fgRemoveStmt(compCurBB, morphStmtToRemove);
-            }
-        }
-#endif // !FEATURE_CORECLR && _TARGET_AMD64_
-
-        // Delete GT_RETURN  if any
-        if (nextMorphStmt != nullptr)
-        {
-            GenTreePtr retExpr = nextMorphStmt->gtStmtExpr;
-            noway_assert(retExpr->gtOper == GT_RETURN);
-
-            // If var=call, then the next stmt must be a GT_RETURN(TYP_VOID) or GT_RETURN(var).
-            // This can occur if impSpillStackEnsure() has introduced an assignment to a temp.
-            if (stmtExpr->gtOper == GT_ASG && info.compRetType != TYP_VOID)
-            {
-                noway_assert(stmtExpr->gtGetOp1()->OperIsLocal());
-
-                GenTreePtr treeWithLcl = retExpr->gtGetOp1();
-                while (treeWithLcl->gtOper == GT_CAST)
-                {
-                    noway_assert(!treeWithLcl->gtOverflow());
-                    treeWithLcl = treeWithLcl->gtGetOp1();
-                }
-
-                noway_assert(stmtExpr->gtGetOp1()->AsLclVarCommon()->gtLclNum ==
-                             treeWithLcl->AsLclVarCommon()->gtLclNum);
-            }
-
-            fgRemoveStmt(compCurBB, nextMorphStmt);
+            GenTreeStmt* stmtToRemove = nextMorphStmt;
+            nextMorphStmt             = stmtToRemove->gtNextStmt;
+            fgRemoveStmt(compCurBB, stmtToRemove);
         }
 
         fgMorphStmt->gtStmtExpr = call;
@@ -19620,3 +19530,146 @@ bool Compiler::fgMorphCombineSIMDFieldAssignments(BasicBlock* block, GenTreePtr
 }
 
 #endif // FEATURE_SIMD
+
+#if !defined(FEATURE_CORECLR) && defined(_TARGET_AMD64_)
+GenTreeStmt* skipNopStmts(GenTreeStmt* stmt)
+{
+    while (stmt != nullptr)
+    {
+        if (!stmt->IsNothingNode())
+        {
+            break;
+        }
+        stmt = stmt->gtNext;
+    }
+}
+
+#endif // !FEATURE_CORECLR && _TARGET_AMD64_
+
+//------------------------------------------------------------------------
+// fgCheckStmtAfterTailCall: check that statements after the tail call stmt
+// candidate are in one of expected forms, that are desctibed below.
+//
+// Return Value:
+//    'true' if stmts are in the expected form, else 'false'.
+//
+bool Compiler::fgCheckStmtAfterTailCall()
+{
+
+    // For void calls, we would have created a GT_CALL in the stmt list.
+    // For non-void calls, we would have created a GT_RETURN(GT_CAST(GT_CALL)).
+    // For calls returning structs, we would have a void call, followed by a void return.
+    // For debuggable code, it would be an assignment of the call to a temp
+    // We want to get rid of any of this extra trees, and just leave
+    // the call.
+    GenTreeStmt* callStmt = fgMorphStmt;
+
+    GenTreeStmt* nextMorphStmt = callStmt->gtNextStmt;
+
+#if !defined(FEATURE_CORECLR) && defined(_TARGET_AMD64_)
+    // Legacy Jit64 Compat:
+    // There could be any number of GT_NOPs between tail call and GT_RETURN.
+    // That is tail call pattern could be one of the following:
+    //  1) tail.call, nop*, ret
+    //  2) tail.call, nop*, pop, nop*, ret
+    //  3) var=tail.call, nop*, ret(var)
+    //  4) var=tail.call, nop*, pop, ret
+    //  5) comma(tail.call, nop), nop*, ret
+    //
+    // See impIsTailCallILPattern() for details on tail call IL patterns
+    // that are supported.
+    if (stmtExpr->gtOper != GT_RETURN)
+    {
+        // First skip all GT_NOPs after the call
+        nextMorphStmt = skipNopStmts(nextMorphStmt);
+
+        // Check to see if there is a pop.
+        // Since tail call is honored, we can get rid of the stmt corresponding to pop.
+        if (nextMorphStmt != nullptr && nextMorphStmt->gtStmtExpr->gtOper != GT_RETURN)
+        {
+            // Note that pop opcode may or may not result in a new stmt (for details see
+            // impImportBlockCode()). Hence, it is not possible to assert about the IR
+            // form generated by pop but pop tree must be side-effect free so that we can
+            // delete it safely.
+            GenTreeStmt* popStmt = nextMorphStmt;
+
+            // Side effect flags on a GT_COMMA may be overly pessimistic, so examine
+            // the constituent nodes.
+            GenTreePtr popExpr          = popStmt->gtStmtExpr;
+            bool       isSideEffectFree = (popExpr->gtFlags & GTF_ALL_EFFECT) == 0;
+            if (!isSideEffectFree && (popExpr->OperGet() == GT_COMMA))
+            {
+                isSideEffectFree = ((popExpr->gtGetOp1()->gtFlags & GTF_ALL_EFFECT) == 0) &&
+                                   ((popExpr->gtGetOp2()->gtFlags & GTF_ALL_EFFECT) == 0);
+            }
+            noway_assert(isSideEffectFree);
+
+            nextMorphStmt = popStmt->gtNextStmt;
+        }
+
+        // Next skip any GT_NOP nodes after the pop
+        nextMorphStmt = skipNopStmts(nextMorphStmt);
+    }
+#endif // !FEATURE_CORECLR && _TARGET_AMD64_
+
+    // Check that the rest stmts in the block are in one of the following pattern:
+    //  1) ret(void)
+    //  2) ret(cast*(callResultLclVar))
+    //  3) lclVar = callResultLclVar, the actual ret(lclVar) in another block
+    if (nextMorphStmt != nullptr)
+    {
+        GenTree* callExpr = callStmt->gtStmtExpr;
+        if (callExpr->gtOper != GT_ASG)
+        {
+            // The next stmt can be GT_RETURN(TYP_VOID) or GT_RETURN(lclVar),
+            // where lclVar was return buffer in the call for structs or simd.
+            GenTreeStmt* retStmt = nextMorphStmt;
+            GenTreePtr   retExpr = retStmt->gtStmtExpr;
+            noway_assert(retExpr->gtOper == GT_RETURN);
+
+            nextMorphStmt = retStmt->gtNextStmt;
+        }
+        else
+        {
+            noway_assert(callExpr->gtGetOp1()->OperIsLocal());
+            unsigned callResultLclNumber = callExpr->gtGetOp1()->AsLclVarCommon()->gtLclNum;
+
+#if FEATURE_TAILCALL_OPT_SHARED_RETURN
+
+            // We can have a move from the call result to an lvaInlineeReturnSpillTemp.
+            // However, we can't check that this assignment was created there.
+            if (nextMorphStmt->gtStmtExpr->gtOper == GT_ASG)
+            {
+                GenTreeStmt* moveStmt = nextMorphStmt;
+                GenTree*     moveExpr = nextMorphStmt->gtStmtExpr;
+                noway_assert(moveExpr->gtGetOp1()->OperIsLocal() && moveExpr->gtGetOp2()->OperIsLocal());
+
+                unsigned srcLclNum = moveExpr->gtGetOp2()->AsLclVarCommon()->gtLclNum;
+                noway_assert(srcLclNum == callResultLclNumber);
+                unsigned dstLclNum  = moveExpr->gtGetOp1()->AsLclVarCommon()->gtLclNum;
+                callResultLclNumber = dstLclNum;
+
+                nextMorphStmt = moveStmt->gtNextStmt;
+            }
+            if (nextMorphStmt != nullptr)
+#endif
+            {
+                GenTreeStmt* retStmt = nextMorphStmt;
+                GenTreePtr   retExpr = nextMorphStmt->gtStmtExpr;
+                noway_assert(retExpr->gtOper == GT_RETURN);
+
+                GenTreePtr treeWithLcl = retExpr->gtGetOp1();
+                while (treeWithLcl->gtOper == GT_CAST)
+                {
+                    noway_assert(!treeWithLcl->gtOverflow());
+                    treeWithLcl = treeWithLcl->gtGetOp1();
+                }
+
+                noway_assert(callResultLclNumber == treeWithLcl->AsLclVarCommon()->gtLclNum);
+
+                nextMorphStmt = retStmt->gtNextStmt;
+            }
+        }
+    }
+    return nextMorphStmt == nullptr;
+}
diff --git a/tests/src/JIT/Regression/JitBlue/GitHub_14783/GitHub_14783.cs b/tests/src/JIT/Regression/JitBlue/GitHub_14783/GitHub_14783.cs
new file mode 100644 (file)
index 0000000..2f38262
--- /dev/null
@@ -0,0 +1,78 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+
+using System;
+using System.Runtime.CompilerServices;
+
+// When the implicit tail call optimization
+// was done inside inlinee with several returns, the compiler created a return spill
+// temp to merge all returns in one return expression and this spill was not expected
+// during the tail call transformation.
+
+class GitHub_14783
+{
+    [MethodImpl(MethodImplOptions.NoInlining)]
+    public static string X(string s)
+    {
+        return s;
+    }
+
+    public static string X1(string s, string q)
+    {
+        for (int k = 0; k < 10; k++)
+        {
+            s = X(s);
+        }
+
+        return s;
+    }
+
+    public static string D(string x)
+    {
+        string a = x;
+        string b = x;
+        return X1(a, b);
+    }
+
+    [MethodImpl(MethodImplOptions.AggressiveInlining)]
+    public static string C(string x)
+    {
+        string s = x;
+
+        if (s.Length > 3)
+        {
+            return D(s);
+        }
+        else
+        {
+            return X(s);
+        }
+    }
+
+
+    [MethodImpl(MethodImplOptions.AggressiveInlining)]
+    public static string B(string x)
+    {
+        return C(x);
+    }
+
+    public static string A(string x)
+    {
+        string s = x;
+
+        for (int k = 0; k < 10; k++)
+        {
+            s = X(s);
+        }
+
+        return B(x);
+    }
+
+    public static int Main()
+    {
+        string v = A("Hello");
+        return v.Length + 95;
+    }
+}
diff --git a/tests/src/JIT/Regression/JitBlue/GitHub_14783/GitHub_14783.csproj b/tests/src/JIT/Regression/JitBlue/GitHub_14783/GitHub_14783.csproj
new file mode 100644 (file)
index 0000000..25f7cd7
--- /dev/null
@@ -0,0 +1,38 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="12.0" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <Import Project="$([MSBuild]::GetDirectoryNameOfFileAbove($(MSBuildThisFileDirectory), dir.props))\dir.props" />
+  <PropertyGroup>
+    <Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
+    <Platform Condition=" '$(Platform)' == '' ">AnyCPU</Platform>
+    <AssemblyName>$(MSBuildProjectName)</AssemblyName>
+    <SchemaVersion>2.0</SchemaVersion>
+    <ProjectGuid>{95DFC527-4DC1-495E-97D7-E94EE1F7140D}</ProjectGuid>
+    <OutputType>Exe</OutputType>
+    <AppDesignerFolder>Properties</AppDesignerFolder>
+    <FileAlignment>512</FileAlignment>
+    <ProjectTypeGuids>{786C830F-07A1-408B-BD7F-6EE04809D6DB};{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}</ProjectTypeGuids>
+    <ReferencePath>$(ProgramFiles)\Common Files\microsoft shared\VSTT\11.0\UITestExtensionPackages</ReferencePath>
+    <SolutionDir Condition="$(SolutionDir) == '' Or $(SolutionDir) == '*Undefined*'">..\..\</SolutionDir>
+    <NuGetPackageImportStamp>7a9bfb7d</NuGetPackageImportStamp>
+  </PropertyGroup>
+  <!-- Default configurations to help VS understand the configurations -->
+  <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|AnyCPU' "></PropertyGroup>
+  <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|AnyCPU' "></PropertyGroup>
+  <ItemGroup>
+    <CodeAnalysisDependentAssemblyPaths Condition=" '$(VS100COMNTOOLS)' != '' " Include="$(VS100COMNTOOLS)..\IDE\PrivateAssemblies">
+      <Visible>False</Visible>
+    </CodeAnalysisDependentAssemblyPaths>
+  </ItemGroup>
+  <PropertyGroup>
+    <DebugType></DebugType>
+    <Optimize>True</Optimize>
+  </PropertyGroup>
+  <ItemGroup>
+    <Compile Include="GitHub_14783.cs" />
+  </ItemGroup>
+  <ItemGroup>
+    <Service Include="{82A7F48D-3B50-4B1E-B82E-3ADA8210C358}" />
+  </ItemGroup>
+  <Import Project="$([MSBuild]::GetDirectoryNameOfFileAbove($(MSBuildThisFileDirectory), dir.targets))\dir.targets" />
+  <PropertyGroup Condition=" '$(MsBuildProjectDirOverride)' != '' "></PropertyGroup>
+</Project>