Merge pull request #11917 from JosephTremoulet/RationalCall
authorJoseph Tremoulet <JCTremoulet@gmail.com>
Fri, 26 May 2017 05:41:38 +0000 (01:41 -0400)
committerGitHub <noreply@github.com>
Fri, 26 May 2017 05:41:38 +0000 (01:41 -0400)
Clear GTF_CALL from non-calls in rationalize

20 files changed:
netci.groovy
src/jit/codegenarm.cpp
src/jit/codegenxarch.cpp
src/jit/emitxarch.cpp
src/jit/lower.cpp
src/jit/lower.h
src/jit/lsraxarch.cpp
src/jit/simdcodegenxarch.cpp
src/mscorlib/src/System/Diagnostics/Eventing/EventPipe.cs
src/mscorlib/src/System/Diagnostics/Eventing/EventPipeEventProvider.cs
src/mscorlib/src/System/Diagnostics/Eventing/EventSource_CoreCLR.cs
src/vm/eventpipe.cpp
src/vm/eventpipe.h
src/vm/eventpipebuffer.cpp
src/vm/eventpipebuffer.h
src/vm/eventpipebuffermanager.cpp
src/vm/eventpipebuffermanager.h
src/vm/eventpipeconfiguration.cpp
src/vm/eventpipeeventinstance.cpp
src/vm/eventpipeeventinstance.h

index 038df2f..b1d24b8 100755 (executable)
@@ -1504,9 +1504,7 @@ def static calculateBuildCommands(def newJob, def scenario, def branch, def isPR
                         }
 
                         if (!isBuildOnly) {
-                            if (architecture == 'x64' || !isPR) {
-                                Utilities.addXUnitDotNETResults(newJob, 'bin/**/TestRun*.xml')
-                            }
+                            Utilities.addXUnitDotNETResults(newJob, 'bin/**/TestRun*.xml', true)
                             setTestJobTimeOut(newJob, scenario)
                         }
                     }
index fa9dcc2..124e994 100644 (file)
@@ -1597,7 +1597,8 @@ void CodeGen::genSetRegToCond(regNumber dstReg, GenTreePtr tree)
 {
     // Emit code like that:
     //   ...
-    //   bgt True
+    //   beq True
+    //   bvs True    ; this second branch is typically absent
     //   movs rD, #0
     //   b Next
     // True:
@@ -1605,11 +1606,17 @@ void CodeGen::genSetRegToCond(regNumber dstReg, GenTreePtr tree)
     // Next:
     //   ...
 
-    CompareKind  compareKind = ((tree->gtFlags & GTF_UNSIGNED) != 0) ? CK_UNSIGNED : CK_SIGNED;
-    emitJumpKind jmpKind     = genJumpKindForOper(tree->gtOper, compareKind);
+    emitJumpKind jumpKind[2];
+    bool         branchToTrueLabel[2];
+    genJumpKindsForTree(tree, jumpKind, branchToTrueLabel);
 
     BasicBlock* labelTrue = genCreateTempLabel();
-    getEmitter()->emitIns_J(emitter::emitJumpKindToIns(jmpKind), labelTrue);
+    getEmitter()->emitIns_J(emitter::emitJumpKindToIns(jumpKind[0]), labelTrue);
+
+    if (jumpKind[1] != EJ_NONE)
+    {
+        getEmitter()->emitIns_J(emitter::emitJumpKindToIns(jumpKind[1]), labelTrue);
+    }
 
     getEmitter()->emitIns_R_I(INS_mov, emitActualTypeSize(tree->gtType), dstReg, 0);
 
index d039a8b..6bb1242 100644 (file)
@@ -714,12 +714,6 @@ void CodeGen::genCodeForDivMod(GenTreeOp* treeNode)
     genConsumeOperands(treeNode->AsOp());
     if (varTypeIsFloating(targetType))
     {
-        // Check that divisor is a valid operand.
-        // Note that a reg optional operand is a treated as a memory op
-        // if no register is allocated to it.
-        assert(divisor->isUsedFromReg() || divisor->isMemoryOp() || divisor->IsCnsFltOrDbl() ||
-               divisor->IsRegOptional());
-
         // Floating point div/rem operation
         assert(oper == GT_DIV || oper == GT_MOD);
 
@@ -829,7 +823,8 @@ void CodeGen::genCodeForBinary(GenTree* treeNode)
     if (!op1->isUsedFromReg())
     {
         assert(treeNode->OperIsCommutative());
-        assert(op1->isMemoryOp() || op1->IsCnsNonZeroFltOrDbl() || op1->IsIntCnsFitsInI32() || op1->IsRegOptional());
+        assert(op1->isMemoryOp() || op1->IsLocal() || op1->IsCnsNonZeroFltOrDbl() || op1->IsIntCnsFitsInI32() ||
+               op1->IsRegOptional());
 
         op1 = treeNode->gtGetOp2();
         op2 = treeNode->gtGetOp1();
index 9f43202..f6e5690 100644 (file)
@@ -2878,12 +2878,12 @@ regNumber emitter::emitInsBinary(instruction ins, emitAttr attr, GenTree* dst, G
     GenTreeLclVar* lclVar = nullptr;
     if (src->isLclVarUsedFromMemory())
     {
-        assert(src->IsRegOptional());
+        assert(src->IsRegOptional() || !emitComp->lvaTable[src->gtLclVar.gtLclNum].lvIsRegCandidate());
         lclVar = src->AsLclVar();
     }
     if (dst->isLclVarUsedFromMemory())
     {
-        assert(dst->IsRegOptional());
+        assert(dst->IsRegOptional() || !emitComp->lvaTable[dst->gtLclVar.gtLclNum].lvIsRegCandidate());
         lclVar = dst->AsLclVar();
     }
 
index f93a398..f98d15f 100644 (file)
@@ -104,6 +104,26 @@ bool Lowering::IsSafeToContainMem(GenTree* parentNode, GenTree* childNode)
 }
 
 //------------------------------------------------------------------------
+// IsContainableMemoryOp: Checks whether this is a memory op that can be contained.
+//
+// Arguments:
+//    node - the node of interest.
+//
+// Notes:
+//    This differs from the isMemoryOp() method on GenTree because it checks for
+//    the case of an untracked local. Note that this won't include locals that
+//    for some reason do not become register candidates, nor those that get
+//    spilled.
+//
+// Return value:
+//    True if this will definitely be a memory reference that could be contained.
+//
+bool Lowering::IsContainableMemoryOp(GenTree* node)
+{
+    return node->isMemoryOp() || (node->IsLocal() && !comp->lvaTable[node->AsLclVar()->gtLclNum].lvTracked);
+}
+
+//------------------------------------------------------------------------
 
 // This is the main entry point for Lowering.
 GenTree* Lowering::LowerNode(GenTree* node)
@@ -2427,7 +2447,7 @@ void Lowering::LowerCompare(GenTree* cmp)
         GenTreeIntCon* op2      = cmp->gtGetOp2()->AsIntCon();
         ssize_t        op2Value = op2->IconValue();
 
-        if (op1->isMemoryOp() && varTypeIsSmall(op1Type) && genTypeCanRepresentValue(op1Type, op2Value))
+        if (IsContainableMemoryOp(op1) && varTypeIsSmall(op1Type) && genTypeCanRepresentValue(op1Type, op2Value))
         {
             //
             // If op1's type is small then try to narrow op2 so it has the same type as op1.
@@ -2457,7 +2477,7 @@ void Lowering::LowerCompare(GenTree* cmp)
                 // the result of bool returning calls.
                 //
 
-                if (castOp->OperIs(GT_CALL, GT_LCL_VAR) || castOp->OperIsLogical() || castOp->isMemoryOp())
+                if (castOp->OperIs(GT_CALL, GT_LCL_VAR) || castOp->OperIsLogical() || IsContainableMemoryOp(castOp))
                 {
                     assert(!castOp->gtOverflowEx()); // Must not be an overflow checking operation
 
@@ -2502,7 +2522,7 @@ void Lowering::LowerCompare(GenTree* cmp)
                 cmp->gtOp.gtOp1 = andOp1;
                 cmp->gtOp.gtOp2 = andOp2;
 
-                if (andOp1->isMemoryOp() && andOp2->IsIntegralConst())
+                if (IsContainableMemoryOp(andOp1) && andOp2->IsIntegralConst())
                 {
                     //
                     // For "test" we only care about the bits that are set in the second operand (mask).
index 4b78ee5..d9bb357 100644 (file)
@@ -278,6 +278,9 @@ private:
     //  for example small enough and non-relocatable
     bool IsContainableImmed(GenTree* parentNode, GenTree* childNode);
 
+    // Return true if 'node' is a containable memory op.
+    bool IsContainableMemoryOp(GenTree* node);
+
     // Makes 'childNode' contained in the 'parentNode'
     void MakeSrcContained(GenTreePtr parentNode, GenTreePtr childNode);
 
index e89e534..5bc0c9a 100644 (file)
@@ -400,12 +400,12 @@ void Lowering::TreeNodeInfoInit(GenTree* tree)
                 info->srcCount = 2;
                 info->dstCount = 1;
 
-                if (op2->isMemoryOp() || op2->IsCnsNonZeroFltOrDbl())
+                if (IsContainableMemoryOp(op2) || op2->IsCnsNonZeroFltOrDbl())
                 {
                     MakeSrcContained(tree, op2);
                 }
                 else if (tree->OperIsCommutative() &&
-                         (op1->IsCnsNonZeroFltOrDbl() || (op1->isMemoryOp() && IsSafeToContainMem(tree, op1))))
+                         (op1->IsCnsNonZeroFltOrDbl() || (IsContainableMemoryOp(op1) && IsSafeToContainMem(tree, op1))))
                 {
                     // Though we have GT_ADD(op1=memOp, op2=non-memOp, we try to reorder the operands
                     // as long as it is safe so that the following efficient code sequence is generated:
@@ -629,7 +629,7 @@ void Lowering::TreeNodeInfoInit(GenTree* tree)
             {
                 other = node->gtIndex;
             }
-            else if (node->gtIndex->isMemoryOp())
+            else if (IsContainableMemoryOp(node->gtIndex))
             {
                 other = node->gtIndex;
             }
@@ -640,7 +640,7 @@ void Lowering::TreeNodeInfoInit(GenTree* tree)
 
             if (node->gtIndex->TypeGet() == node->gtArrLen->TypeGet())
             {
-                if (other->isMemoryOp())
+                if (IsContainableMemoryOp(other))
                 {
                     MakeSrcContained(tree, other);
                 }
@@ -845,7 +845,7 @@ void Lowering::TreeNodeInfoInit(GenTree* tree)
                 delayUseSrc = op1;
             }
             else if ((op2 != nullptr) &&
-                     (!tree->OperIsCommutative() || (op2->isMemoryOp() && (op2->gtLsraInfo.srcCount == 0))))
+                     (!tree->OperIsCommutative() || (IsContainableMemoryOp(op2) && (op2->gtLsraInfo.srcCount == 0))))
             {
                 delayUseSrc = op2;
             }
@@ -2110,7 +2110,7 @@ void Lowering::TreeNodeInfoInitLogicalOp(GenTree* tree)
         binOpInRMW = IsBinOpInRMWStoreInd(tree);
         if (!binOpInRMW)
         {
-            if (op2->isMemoryOp() && tree->TypeGet() == op2->TypeGet())
+            if (IsContainableMemoryOp(op2) && tree->TypeGet() == op2->TypeGet())
             {
                 directlyEncodable = true;
                 operand           = op2;
@@ -2118,7 +2118,7 @@ void Lowering::TreeNodeInfoInitLogicalOp(GenTree* tree)
             else if (tree->OperIsCommutative())
             {
                 if (IsContainableImmed(tree, op1) ||
-                    (op1->isMemoryOp() && tree->TypeGet() == op1->TypeGet() && IsSafeToContainMem(tree, op1)))
+                    (IsContainableMemoryOp(op1) && tree->TypeGet() == op1->TypeGet() && IsSafeToContainMem(tree, op1)))
                 {
                     // If it is safe, we can reverse the order of operands of commutative operations for efficient
                     // codegen
@@ -2176,7 +2176,7 @@ void Lowering::TreeNodeInfoInitModDiv(GenTree* tree)
                 // everything is made explicit by adding casts.
                 assert(op1->TypeGet() == op2->TypeGet());
 
-                if (op2->isMemoryOp() || op2->IsCnsNonZeroFltOrDbl())
+                if (IsContainableMemoryOp(op2) || op2->IsCnsNonZeroFltOrDbl())
                 {
                     MakeSrcContained(tree, op2);
                 }
@@ -2241,7 +2241,7 @@ void Lowering::TreeNodeInfoInitModDiv(GenTree* tree)
     }
 
     // divisor can be an r/m, but the memory indirection must be of the same size as the divide
-    if (op2->isMemoryOp() && (op2->TypeGet() == tree->TypeGet()))
+    if (IsContainableMemoryOp(op2) && (op2->TypeGet() == tree->TypeGet()))
     {
         MakeSrcContained(tree, op2);
     }
@@ -2280,7 +2280,7 @@ void Lowering::TreeNodeInfoInitIntrinsic(GenTree* tree)
     switch (tree->gtIntrinsic.gtIntrinsicId)
     {
         case CORINFO_INTRINSIC_Sqrt:
-            if (op1->isMemoryOp() || op1->IsCnsNonZeroFltOrDbl())
+            if (IsContainableMemoryOp(op1) || op1->IsCnsNonZeroFltOrDbl())
             {
                 MakeSrcContained(tree, op1);
             }
@@ -2581,7 +2581,7 @@ void Lowering::TreeNodeInfoInitSIMD(GenTree* tree)
                 info->srcCount = 1;
             }
 
-            if (op1->isMemoryOp())
+            if (IsContainableMemoryOp(op1))
             {
                 MakeSrcContained(tree, op1);
 
@@ -2805,7 +2805,7 @@ void Lowering::TreeNodeInfoInitCast(GenTree* tree)
         // U8 -> R8 conversion requires that the operand be in a register.
         if (castOpType != TYP_ULONG)
         {
-            if (castOp->isMemoryOp() || castOp->IsCnsNonZeroFltOrDbl())
+            if (IsContainableMemoryOp(castOp) || castOp->IsCnsNonZeroFltOrDbl())
             {
                 MakeSrcContained(tree, castOp);
             }
@@ -3095,7 +3095,7 @@ void Lowering::TreeNodeInfoInitCmp(GenTreePtr tree)
         {
             MakeSrcContained(tree, otherOp);
         }
-        else if (otherOp->isMemoryOp() && ((otherOp == op2) || IsSafeToContainMem(tree, otherOp)))
+        else if (IsContainableMemoryOp(otherOp) && ((otherOp == op2) || IsSafeToContainMem(tree, otherOp)))
         {
             MakeSrcContained(tree, otherOp);
         }
@@ -3115,10 +3115,10 @@ void Lowering::TreeNodeInfoInitCmp(GenTreePtr tree)
     if (CheckImmedAndMakeContained(tree, op2))
     {
         // If the types are the same, or if the constant is of the correct size,
-        // we can treat the isMemoryOp as contained.
+        // we can treat the MemoryOp as contained.
         if (op1Type == op2Type)
         {
-            if (op1->isMemoryOp())
+            if (IsContainableMemoryOp(op1))
             {
                 MakeSrcContained(tree, op1);
             }
@@ -3167,11 +3167,11 @@ void Lowering::TreeNodeInfoInitCmp(GenTreePtr tree)
         // Note that TEST does not have a r,rm encoding like CMP has but we can still
         // contain the second operand because the emitter maps both r,rm and rm,r to
         // the same instruction code. This avoids the need to special case TEST here.
-        if (op2->isMemoryOp())
+        if (IsContainableMemoryOp(op2))
         {
             MakeSrcContained(tree, op2);
         }
-        else if (op1->isMemoryOp() && IsSafeToContainMem(tree, op1))
+        else if (IsContainableMemoryOp(op1) && IsSafeToContainMem(tree, op1))
         {
             MakeSrcContained(tree, op1);
         }
@@ -3260,7 +3260,7 @@ bool Lowering::TreeNodeInfoInitIfRMWMemOp(GenTreePtr storeInd)
     if (GenTree::OperIsBinary(oper))
     {
         // On Xarch RMW operations require that the source memory-op be in a register.
-        assert(!indirOpSource->isMemoryOp() || indirOpSource->gtLsraInfo.dstCount == 1);
+        assert(!IsContainableMemoryOp(indirOpSource) || indirOpSource->gtLsraInfo.dstCount == 1);
         JITDUMP("Lower succesfully detected an assignment of the form: *addrMode BinOp= source\n");
         info->srcCount = indirOpSource->gtLsraInfo.dstCount;
     }
@@ -3367,11 +3367,11 @@ void Lowering::TreeNodeInfoInitMul(GenTreePtr tree)
     {
         assert(tree->OperGet() == GT_MUL);
 
-        if (op2->isMemoryOp() || op2->IsCnsNonZeroFltOrDbl())
+        if (IsContainableMemoryOp(op2) || op2->IsCnsNonZeroFltOrDbl())
         {
             MakeSrcContained(tree, op2);
         }
-        else if (op1->IsCnsNonZeroFltOrDbl() || (op1->isMemoryOp() && IsSafeToContainMem(tree, op1)))
+        else if (op1->IsCnsNonZeroFltOrDbl() || (IsContainableMemoryOp(op1) && IsSafeToContainMem(tree, op1)))
         {
             // Since  GT_MUL is commutative, we will try to re-order operands if it is safe to
             // generate more efficient code sequence for the case of GT_MUL(op1=memOp, op2=non-memOp)
@@ -3460,7 +3460,7 @@ void Lowering::TreeNodeInfoInitMul(GenTreePtr tree)
         }
 
         MakeSrcContained(tree, imm); // The imm is always contained
-        if (other->isMemoryOp())
+        if (IsContainableMemoryOp(other))
         {
             memOp = other; // memOp may be contained below
         }
@@ -3471,7 +3471,7 @@ void Lowering::TreeNodeInfoInitMul(GenTreePtr tree)
     // This is because during codegen we use 'tree' type to derive EmitTypeSize.
     // E.g op1 type = byte, op2 type = byte but GT_MUL tree type is int.
     //
-    if (memOp == nullptr && op2->isMemoryOp())
+    if (memOp == nullptr && IsContainableMemoryOp(op2))
     {
         memOp = op2;
     }
@@ -3610,7 +3610,7 @@ bool Lowering::ExcludeNonByteableRegisters(GenTree* tree)
                 GenTree*  op1      = simdNode->gtGetOp1();
                 GenTree*  op2      = simdNode->gtGetOp2();
                 var_types baseType = simdNode->gtSIMDBaseType;
-                if (!op1->isMemoryOp() && op2->IsCnsIntOrI() && varTypeIsSmallInt(baseType))
+                if (!IsContainableMemoryOp(op1) && op2->IsCnsIntOrI() && varTypeIsSmallInt(baseType))
                 {
                     bool     ZeroOrSignExtnReqd = true;
                     unsigned baseSize           = genTypeSize(baseType);
index a28c652..ef50aae 100644 (file)
@@ -2500,7 +2500,7 @@ void CodeGen::genSIMDIntrinsicGetItem(GenTreeSIMD* simdNode)
     regNumber srcReg = op1->gtRegNum;
 
     // Optimize the case of op1 is in memory and trying to access ith element.
-    if (op1->isMemoryOp())
+    if (!op1->isUsedFromReg())
     {
         assert(op1->isContained());
 
@@ -2508,15 +2508,17 @@ void CodeGen::genSIMDIntrinsicGetItem(GenTreeSIMD* simdNode)
         regNumber indexReg;
         int       offset = 0;
 
-        if (op1->OperGet() == GT_LCL_FLD)
+        if (op1->OperIsLocal())
         {
             // There are three parts to the total offset here:
-            // {offset of local} + {offset of SIMD Vector field} + {offset of element within SIMD vector}.
+            // {offset of local} + {offset of SIMD Vector field (lclFld only)} + {offset of element within SIMD vector}.
             bool     isEBPbased;
             unsigned varNum = op1->gtLclVarCommon.gtLclNum;
             offset += compiler->lvaFrameAddress(varNum, &isEBPbased);
-            offset += op1->gtLclFld.gtLclOffs;
-
+            if (op1->OperGet() == GT_LCL_FLD)
+            {
+                offset += op1->gtLclFld.gtLclOffs;
+            }
             baseReg = (isEBPbased) ? REG_EBP : REG_ESP;
         }
         else
index 0d66c94..2f6fdf6 100644 (file)
@@ -170,6 +170,6 @@ namespace System.Diagnostics.Tracing
 
         [DllImport(JitHelpers.QCall, CharSet = CharSet.Unicode)]
         [SuppressUnmanagedCodeSecurity]
-        internal static extern unsafe void WriteEvent(IntPtr eventHandle, uint eventID, void* pData, uint length);
+        internal static extern unsafe void WriteEvent(IntPtr eventHandle, uint eventID, void* pData, uint length, Guid* activityId, Guid* relatedActivityId);
     }
 }
index cd9bd3c..d5bc4c2 100644 (file)
@@ -62,7 +62,7 @@ namespace System.Diagnostics.Tracing
             {
                 if (userDataCount == 0)
                 {
-                    EventPipeInternal.WriteEvent(eventHandle, eventID, null, 0);
+                    EventPipeInternal.WriteEvent(eventHandle, eventID, null, 0, activityId, relatedActivityId);
                     return 0;
                 }
 
@@ -82,7 +82,7 @@ namespace System.Diagnostics.Tracing
                         uint singleUserDataSize = userData[i].Size;
                         WriteToBuffer(pData, length, ref offset, singleUserDataPtr, singleUserDataSize);
                     }
-                    EventPipeInternal.WriteEvent(eventHandle, eventID, pData, length);
+                    EventPipeInternal.WriteEvent(eventHandle, eventID, pData, length, activityId, relatedActivityId);
                 }
             }
             return 0;
index a41c069..01aac72 100644 (file)
@@ -11,6 +11,13 @@ namespace System.Diagnostics.Tracing
 {
     public partial class EventSource
     {
+#if FEATURE_MANAGED_ETW && FEATURE_PERFTRACING
+        // For non-Windows, we use a thread-local variable to hold the activity ID.
+        // On Windows, ETW has it's own thread-local variable and we participate in its use.
+        [ThreadStatic]
+        private static Guid s_currentThreadActivityId;
+#endif // FEATURE_MANAGED_ETW && FEATURE_PERFTRACING
+
         // ActivityID support (see also WriteEventWithRelatedActivityIdCore)
         /// <summary>
         /// When a thread starts work that is on behalf of 'something else' (typically another 
@@ -33,16 +40,20 @@ namespace System.Diagnostics.Tracing
         {
             if (TplEtwProvider.Log != null)
                 TplEtwProvider.Log.SetActivityId(activityId);
-#if FEATURE_MANAGED_ETW && PLATFORM_WINDOWS
+#if FEATURE_MANAGED_ETW
 #if FEATURE_ACTIVITYSAMPLING
             Guid newId = activityId;
 #endif // FEATURE_ACTIVITYSAMPLING
             // We ignore errors to keep with the convention that EventSources do not throw errors.
             // Note we can't access m_throwOnWrites because this is a static method.  
 
+#if FEATURE_PERFTRACING
+            s_currentThreadActivityId = activityId;
+#elif PLATFORM_WINDOWS
             if (UnsafeNativeMethods.ManifestEtw.EventActivityIdControl(
                 UnsafeNativeMethods.ManifestEtw.ActivityControl.EVENT_ACTIVITY_CTRL_GET_SET_ID,
                 ref activityId) == 0)
+#endif // FEATURE_PERFTRACING
             {
 #if FEATURE_ACTIVITYSAMPLING
                 var activityDying = s_activityDying;
@@ -57,7 +68,7 @@ namespace System.Diagnostics.Tracing
                 }
 #endif // FEATURE_ACTIVITYSAMPLING
             }
-#endif // FEATURE_MANAGED_ETW && PLATFORM_WINDOWS
+#endif // FEATURE_MANAGED_ETW
         }
 
         /// <summary>
@@ -82,14 +93,19 @@ namespace System.Diagnostics.Tracing
         public static void SetCurrentThreadActivityId(Guid activityId, out Guid oldActivityThatWillContinue)
         {
             oldActivityThatWillContinue = activityId;
-#if FEATURE_MANAGED_ETW && PLATFORM_WINDOWS
+#if FEATURE_MANAGED_ETW
             // We ignore errors to keep with the convention that EventSources do not throw errors.
             // Note we can't access m_throwOnWrites because this is a static method.  
 
+#if FEATURE_PERFTRACING
+            oldActivityThatWillContinue = s_currentThreadActivityId;
+            s_currentThreadActivityId = activityId;
+#elif PLATFORM_WINDOWS
             UnsafeNativeMethods.ManifestEtw.EventActivityIdControl(
                 UnsafeNativeMethods.ManifestEtw.ActivityControl.EVENT_ACTIVITY_CTRL_GET_SET_ID,
                     ref oldActivityThatWillContinue);
-#endif // FEATURE_MANAGED_ETW && PLATFORM_WINDOWS
+#endif // FEATURE_PERFTRACING
+#endif // FEATURE_MANAGED_ETW
 
             // We don't call the activityDying callback here because the caller has declared that
             // it is not dying.  
@@ -107,11 +123,15 @@ namespace System.Diagnostics.Tracing
                 // We ignore errors to keep with the convention that EventSources do not throw 
                 // errors. Note we can't access m_throwOnWrites because this is a static method.
                 Guid retVal = new Guid();
-#if FEATURE_MANAGED_ETW && PLATFORM_WINDOWS
+#if FEATURE_MANAGED_ETW
+#if FEATURE_PERFTRACING
+                retVal = s_currentThreadActivityId;
+#elif PLATFORM_WINDOWS
                 UnsafeNativeMethods.ManifestEtw.EventActivityIdControl(
                     UnsafeNativeMethods.ManifestEtw.ActivityControl.EVENT_ACTIVITY_CTRL_GET_ID,
                     ref retVal);
-#endif // FEATURE_MANAGED_ETW && PLATFORM_WINDOWS
+#endif // FEATURE_PERFTRACING
+#endif // FEATURE_MANAGED_ETW
                 return retVal;
             }
         }
index b73c023..4eae6a8 100644 (file)
@@ -282,7 +282,7 @@ void EventPipe::DeleteProvider(EventPipeProvider *pProvider)
     }
 }
 
-void EventPipe::WriteEvent(EventPipeEvent &event, BYTE *pData, unsigned int length)
+void EventPipe::WriteEvent(EventPipeEvent &event, BYTE *pData, unsigned int length, LPCGUID pActivityId, LPCGUID pRelatedActivityId)
 {
     CONTRACTL
     {
@@ -309,7 +309,7 @@ void EventPipe::WriteEvent(EventPipeEvent &event, BYTE *pData, unsigned int leng
 
     if(!s_pConfig->RundownEnabled() && s_pBufferManager != NULL)
     {
-        if(!s_pBufferManager->WriteEvent(pThread, event, pData, length))
+        if(!s_pBufferManager->WriteEvent(pThread, event, pData, length, pActivityId, pRelatedActivityId))
         {
             // This is used in DEBUG to make sure that we don't log an event synchronously that we didn't log to the buffer.
             return;
@@ -323,7 +323,9 @@ void EventPipe::WriteEvent(EventPipeEvent &event, BYTE *pData, unsigned int leng
             event,
             pThread->GetOSThreadId(),
             pData,
-            length);
+            length,
+            pActivityId,
+            pRelatedActivityId);
 
         if(s_pFile != NULL)
         {
@@ -340,7 +342,9 @@ void EventPipe::WriteEvent(EventPipeEvent &event, BYTE *pData, unsigned int leng
             event,
             pThread->GetOSThreadId(),
             pData,
-            length);
+            length,
+            pActivityId,
+            pRelatedActivityId);
 
         // Write to the EventPipeFile if it exists.
         if(s_pSyncFile != NULL)
@@ -372,7 +376,7 @@ void EventPipe::WriteSampleProfileEvent(Thread *pSamplingThread, EventPipeEvent
     {
         // Specify the sampling thread as the "current thread", so that we select the right buffer.
         // Specify the target thread so that the event gets properly attributed.
-        if(!s_pBufferManager->WriteEvent(pSamplingThread, *pEvent, pData, length, pTargetThread, &stackContents))
+        if(!s_pBufferManager->WriteEvent(pSamplingThread, *pEvent, pData, length, NULL /* pActivityId */, NULL /* pRelatedActivityId */, pTargetThread, &stackContents))
         {
             // This is used in DEBUG to make sure that we don't log an event synchronously that we didn't log to the buffer.
             return;
@@ -577,14 +581,16 @@ void QCALLTYPE EventPipeInternal::WriteEvent(
     INT_PTR eventHandle,
     unsigned int eventID,
     void *pData,
-    unsigned int length)
+    unsigned int length,
+    LPCGUID pActivityId,
+    LPCGUID pRelatedActivityId)
 {
     QCALL_CONTRACT;
     BEGIN_QCALL;
 
     _ASSERTE(eventHandle != NULL);
     EventPipeEvent *pEvent = reinterpret_cast<EventPipeEvent *>(eventHandle);
-    EventPipe::WriteEvent(*pEvent, (BYTE *)pData, length);
+    EventPipe::WriteEvent(*pEvent, (BYTE *)pData, length, pActivityId, pRelatedActivityId);
 
     END_QCALL;
 }
index 0860080..a69e1ba 100644 (file)
@@ -188,7 +188,7 @@ class EventPipe
 
         // Write out an event.
         // Data is written as a serialized blob matching the ETW serialization conventions.
-        static void WriteEvent(EventPipeEvent &event, BYTE *pData, unsigned int length);
+        static void WriteEvent(EventPipeEvent &event, BYTE *pData, unsigned int length, LPCGUID pActivityId = NULL, LPCGUID pRelatedActivityId = NULL);
 
         // Write out a sample profile event.
         static void WriteSampleProfileEvent(Thread *pSamplingThread, EventPipeEvent *pEvent, Thread *pTargetThread, StackContents &stackContents, BYTE *pData = NULL, unsigned int length = 0);
@@ -305,7 +305,8 @@ public:
         INT_PTR eventHandle,
         unsigned int eventID,
         void *pData,
-        unsigned int length);
+        unsigned int length,
+        LPCGUID pActivityId, LPCGUID pRelatedActivityId);
 };
 
 #endif // FEATURE_PERFTRACING
index ed1c547..00652c9 100644 (file)
@@ -46,7 +46,7 @@ EventPipeBuffer::~EventPipeBuffer()
     }
 }
 
-bool EventPipeBuffer::WriteEvent(Thread *pThread, EventPipeEvent &event, BYTE *pData, unsigned int dataLength, StackContents *pStack)
+bool EventPipeBuffer::WriteEvent(Thread *pThread, EventPipeEvent &event, BYTE *pData, unsigned int dataLength, LPCGUID pActivityId, LPCGUID pRelatedActivityId, StackContents *pStack)
 {
     CONTRACTL
     {
@@ -77,7 +77,9 @@ bool EventPipeBuffer::WriteEvent(Thread *pThread, EventPipeEvent &event, BYTE *p
             event,
             pThread->GetOSThreadId(),
             pDataDest,
-            dataLength);
+            dataLength,
+            pActivityId,
+            pRelatedActivityId);
 
         // Copy the stack if a separate stack trace was provided.
         if(pStack != NULL)
index 97b858d..f279a28 100644 (file)
@@ -81,7 +81,7 @@ public:
     // Returns:
     //  - true: The write succeeded.
     //  - false: The write failed.  In this case, the buffer should be considered full.
-    bool WriteEvent(Thread *pThread, EventPipeEvent &event, BYTE *pData, unsigned int dataLength, StackContents *pStack = NULL);
+    bool WriteEvent(Thread *pThread, EventPipeEvent &event, BYTE *pData, unsigned int dataLength, LPCGUID pActivityId, LPCGUID pRelatedActivityId, StackContents *pStack = NULL);
 
     // Get the timestamp of the most recent event in the buffer.
     LARGE_INTEGER GetMostRecentTimeStamp() const;
index e271fce..5edc462 100644 (file)
@@ -217,7 +217,7 @@ void EventPipeBufferManager::DeAllocateBuffer(EventPipeBuffer *pBuffer)
     }
 }
 
-bool EventPipeBufferManager::WriteEvent(Thread *pThread, EventPipeEvent &event, BYTE *pData, unsigned int length, Thread *pEventThread, StackContents *pStack)
+bool EventPipeBufferManager::WriteEvent(Thread *pThread, EventPipeEvent &event, BYTE *pData, unsigned int length, LPCGUID pActivityId, LPCGUID pRelatedActivityId, Thread *pEventThread, StackContents *pStack)
 {
     CONTRACTL
     {
@@ -276,7 +276,7 @@ bool EventPipeBufferManager::WriteEvent(Thread *pThread, EventPipeEvent &event,
         else
         {
             // Attempt to write the event to the buffer.  If this fails, we should allocate a new buffer.
-            allocNewBuffer = !pBuffer->WriteEvent(pEventThread, event, pData, length, pStack);
+            allocNewBuffer = !pBuffer->WriteEvent(pEventThread, event, pData, length, pActivityId, pRelatedActivityId, pStack);
         }
     }
 
@@ -294,7 +294,7 @@ bool EventPipeBufferManager::WriteEvent(Thread *pThread, EventPipeEvent &event,
     // This is the second time if this thread did have one or more buffers, but they were full.
     if(allocNewBuffer && pBuffer != NULL)
     {
-        allocNewBuffer = !pBuffer->WriteEvent(pEventThread, event, pData, length, pStack);
+        allocNewBuffer = !pBuffer->WriteEvent(pEventThread, event, pData, length, pActivityId, pRelatedActivityId, pStack);
     }
 
     // Mark that the thread is no longer writing an event.
index 74783d2..a53721b 100644 (file)
@@ -67,7 +67,7 @@ public:
     // This is because the thread that writes the events is not the same as the "event thread".
     // An optional stack trace can be provided for sample profiler events.
     // Otherwise, if a stack trace is needed, one will be automatically collected.
-    bool WriteEvent(Thread *pThread, EventPipeEvent &event, BYTE *pData, unsigned int length, Thread *pEventThread = NULL, StackContents *pStack = NULL);
+    bool WriteEvent(Thread *pThread, EventPipeEvent &event, BYTE *pData, unsigned int length, LPCGUID pActivityId, LPCGUID pRelatedActivityId, Thread *pEventThread = NULL, StackContents *pStack = NULL);
 
     // Write the contents of the managed buffers to the specified file.
     // The stopTimeStamp is used to determine when tracing was stopped to ensure that we
index 73379fe..42f9daf 100644 (file)
@@ -370,7 +370,9 @@ EventPipeEventInstance* EventPipeConfiguration::BuildEventMetadataEvent(EventPip
         *m_pMetadataEvent,
         GetCurrentThreadId(),
         pInstancePayload,
-        instancePayloadSize);
+        instancePayloadSize,
+        NULL /* pActivityId */,
+        NULL /* pRelatedActivityId */);
 
     // Set the timestamp to match the source event, because the metadata event
     // will be emitted right before the source event.
index 9372cec..afde2c0 100644 (file)
@@ -14,7 +14,9 @@ EventPipeEventInstance::EventPipeEventInstance(
     EventPipeEvent &event,
     DWORD threadID,
     BYTE *pData,
-    unsigned int length)
+    unsigned int length,
+    LPCGUID pActivityId,
+    LPCGUID pRelatedActivityId)
 {
     CONTRACTL
     {
@@ -30,6 +32,23 @@ EventPipeEventInstance::EventPipeEventInstance(
 #endif // _DEBUG
     m_pEvent = &event;
     m_threadID = threadID;
+    if(pActivityId != NULL)
+    {
+        m_activityId = *pActivityId;
+    }
+    else
+    {
+        m_activityId = {0};
+    }
+    if(pRelatedActivityId != NULL)
+    {
+        m_relatedActivityId = *pRelatedActivityId;
+    }
+    else
+    {
+        m_relatedActivityId = {0};
+    }
+
     m_pData = pData;
     m_dataLength = length;
     QueryPerformanceCounter(&m_timeStamp);
@@ -98,11 +117,14 @@ void EventPipeEventInstance::FastSerialize(FastSerializer *pSerializer, StreamLa
     // Calculate the size of the total payload so that it can be written to the file.
     unsigned int payloadLength =
         sizeof(metadataLabel) +
-        sizeof(m_threadID) +        // Thread ID
-        sizeof(m_timeStamp) +       // TimeStamp
-        m_dataLength +              // Event payload data length
-        sizeof(unsigned int) +      // Prepended stack payload size in bytes
-        m_stackContents.GetSize();  // Stack payload size
+        sizeof(m_threadID) +            // Thread ID
+        sizeof(m_timeStamp) +           // TimeStamp
+        sizeof(m_activityId) +          // Activity ID
+        sizeof(m_relatedActivityId) +   // Related Activity ID
+        sizeof(m_dataLength) +          // Data payload length
+        m_dataLength +                  // Event payload data
+        sizeof(unsigned int) +          // Prepended stack payload size in bytes
+        m_stackContents.GetSize();      // Stack payload size
 
     // Write the size of the event to the file.
     pSerializer->WriteBuffer((BYTE*)&payloadLength, sizeof(payloadLength));
@@ -116,6 +138,15 @@ void EventPipeEventInstance::FastSerialize(FastSerializer *pSerializer, StreamLa
     // Write the timestamp.
     pSerializer->WriteBuffer((BYTE*)&m_timeStamp, sizeof(m_timeStamp));
 
+    // Write the activity id.
+    pSerializer->WriteBuffer((BYTE*)&m_activityId, sizeof(m_activityId));
+
+    // Write the related activity id.
+    pSerializer->WriteBuffer((BYTE*)&m_relatedActivityId, sizeof(m_relatedActivityId));
+
+    // Write the data payload size.
+    pSerializer->WriteBuffer((BYTE*)&m_dataLength, sizeof(m_dataLength));
+
     // Write the event data payload.
     if(m_dataLength > 0)
     {
@@ -199,7 +230,7 @@ bool EventPipeEventInstance::EnsureConsistency()
 #endif // _DEBUG
 
 SampleProfilerEventInstance::SampleProfilerEventInstance(EventPipeEvent &event, Thread *pThread, BYTE *pData, unsigned int length)
-    :EventPipeEventInstance(event, pThread->GetOSThreadId(), pData, length)
+    :EventPipeEventInstance(event, pThread->GetOSThreadId(), pData, length, NULL /* pActivityId */, NULL /* pRelatedActivityId */)
 {
     LIMITED_METHOD_CONTRACT;
 }
index f54a894..4fcf95c 100644 (file)
@@ -19,7 +19,7 @@ class EventPipeEventInstance
 
 public:
 
-    EventPipeEventInstance(EventPipeEvent &event, DWORD threadID, BYTE *pData, unsigned int length);
+    EventPipeEventInstance(EventPipeEvent &event, DWORD threadID, BYTE *pData, unsigned int length, LPCGUID pActivityId, LPCGUID pRelatedActivityId);
 
     // Get the event associated with this instance.
     EventPipeEvent* GetEvent() const;
@@ -55,6 +55,8 @@ protected:
     EventPipeEvent *m_pEvent;
     DWORD m_threadID;
     LARGE_INTEGER m_timeStamp;
+    GUID m_activityId;
+    GUID m_relatedActivityId;
 
     BYTE *m_pData;
     unsigned int m_dataLength;