}
if (!isBuildOnly) {
- if (architecture == 'x64' || !isPR) {
- Utilities.addXUnitDotNETResults(newJob, 'bin/**/TestRun*.xml')
- }
+ Utilities.addXUnitDotNETResults(newJob, 'bin/**/TestRun*.xml', true)
setTestJobTimeOut(newJob, scenario)
}
}
{
// Emit code like that:
// ...
- // bgt True
+ // beq True
+ // bvs True ; this second branch is typically absent
// movs rD, #0
// b Next
// True:
// Next:
// ...
- CompareKind compareKind = ((tree->gtFlags & GTF_UNSIGNED) != 0) ? CK_UNSIGNED : CK_SIGNED;
- emitJumpKind jmpKind = genJumpKindForOper(tree->gtOper, compareKind);
+ emitJumpKind jumpKind[2];
+ bool branchToTrueLabel[2];
+ genJumpKindsForTree(tree, jumpKind, branchToTrueLabel);
BasicBlock* labelTrue = genCreateTempLabel();
- getEmitter()->emitIns_J(emitter::emitJumpKindToIns(jmpKind), labelTrue);
+ getEmitter()->emitIns_J(emitter::emitJumpKindToIns(jumpKind[0]), labelTrue);
+
+ if (jumpKind[1] != EJ_NONE)
+ {
+ getEmitter()->emitIns_J(emitter::emitJumpKindToIns(jumpKind[1]), labelTrue);
+ }
getEmitter()->emitIns_R_I(INS_mov, emitActualTypeSize(tree->gtType), dstReg, 0);
genConsumeOperands(treeNode->AsOp());
if (varTypeIsFloating(targetType))
{
- // Check that divisor is a valid operand.
- // Note that a reg optional operand is a treated as a memory op
- // if no register is allocated to it.
- assert(divisor->isUsedFromReg() || divisor->isMemoryOp() || divisor->IsCnsFltOrDbl() ||
- divisor->IsRegOptional());
-
// Floating point div/rem operation
assert(oper == GT_DIV || oper == GT_MOD);
if (!op1->isUsedFromReg())
{
assert(treeNode->OperIsCommutative());
- assert(op1->isMemoryOp() || op1->IsCnsNonZeroFltOrDbl() || op1->IsIntCnsFitsInI32() || op1->IsRegOptional());
+ assert(op1->isMemoryOp() || op1->IsLocal() || op1->IsCnsNonZeroFltOrDbl() || op1->IsIntCnsFitsInI32() ||
+ op1->IsRegOptional());
op1 = treeNode->gtGetOp2();
op2 = treeNode->gtGetOp1();
GenTreeLclVar* lclVar = nullptr;
if (src->isLclVarUsedFromMemory())
{
- assert(src->IsRegOptional());
+ assert(src->IsRegOptional() || !emitComp->lvaTable[src->gtLclVar.gtLclNum].lvIsRegCandidate());
lclVar = src->AsLclVar();
}
if (dst->isLclVarUsedFromMemory())
{
- assert(dst->IsRegOptional());
+ assert(dst->IsRegOptional() || !emitComp->lvaTable[dst->gtLclVar.gtLclNum].lvIsRegCandidate());
lclVar = dst->AsLclVar();
}
}
//------------------------------------------------------------------------
+// IsContainableMemoryOp: Checks whether this is a memory op that can be contained.
+//
+// Arguments:
+// node - the node of interest.
+//
+// Notes:
+// This differs from the isMemoryOp() method on GenTree because it checks for
+// the case of an untracked local. Note that this won't include locals that
+// for some reason do not become register candidates, nor those that get
+// spilled.
+//
+// Return value:
+// True if this will definitely be a memory reference that could be contained.
+//
+bool Lowering::IsContainableMemoryOp(GenTree* node)
+{
+ return node->isMemoryOp() || (node->IsLocal() && !comp->lvaTable[node->AsLclVar()->gtLclNum].lvTracked);
+}
+
+//------------------------------------------------------------------------
// This is the main entry point for Lowering.
GenTree* Lowering::LowerNode(GenTree* node)
GenTreeIntCon* op2 = cmp->gtGetOp2()->AsIntCon();
ssize_t op2Value = op2->IconValue();
- if (op1->isMemoryOp() && varTypeIsSmall(op1Type) && genTypeCanRepresentValue(op1Type, op2Value))
+ if (IsContainableMemoryOp(op1) && varTypeIsSmall(op1Type) && genTypeCanRepresentValue(op1Type, op2Value))
{
//
// If op1's type is small then try to narrow op2 so it has the same type as op1.
// the result of bool returning calls.
//
- if (castOp->OperIs(GT_CALL, GT_LCL_VAR) || castOp->OperIsLogical() || castOp->isMemoryOp())
+ if (castOp->OperIs(GT_CALL, GT_LCL_VAR) || castOp->OperIsLogical() || IsContainableMemoryOp(castOp))
{
assert(!castOp->gtOverflowEx()); // Must not be an overflow checking operation
cmp->gtOp.gtOp1 = andOp1;
cmp->gtOp.gtOp2 = andOp2;
- if (andOp1->isMemoryOp() && andOp2->IsIntegralConst())
+ if (IsContainableMemoryOp(andOp1) && andOp2->IsIntegralConst())
{
//
// For "test" we only care about the bits that are set in the second operand (mask).
// for example small enough and non-relocatable
bool IsContainableImmed(GenTree* parentNode, GenTree* childNode);
+ // Return true if 'node' is a containable memory op.
+ bool IsContainableMemoryOp(GenTree* node);
+
// Makes 'childNode' contained in the 'parentNode'
void MakeSrcContained(GenTreePtr parentNode, GenTreePtr childNode);
info->srcCount = 2;
info->dstCount = 1;
- if (op2->isMemoryOp() || op2->IsCnsNonZeroFltOrDbl())
+ if (IsContainableMemoryOp(op2) || op2->IsCnsNonZeroFltOrDbl())
{
MakeSrcContained(tree, op2);
}
else if (tree->OperIsCommutative() &&
- (op1->IsCnsNonZeroFltOrDbl() || (op1->isMemoryOp() && IsSafeToContainMem(tree, op1))))
+ (op1->IsCnsNonZeroFltOrDbl() || (IsContainableMemoryOp(op1) && IsSafeToContainMem(tree, op1))))
{
// Though we have GT_ADD(op1=memOp, op2=non-memOp, we try to reorder the operands
// as long as it is safe so that the following efficient code sequence is generated:
{
other = node->gtIndex;
}
- else if (node->gtIndex->isMemoryOp())
+ else if (IsContainableMemoryOp(node->gtIndex))
{
other = node->gtIndex;
}
if (node->gtIndex->TypeGet() == node->gtArrLen->TypeGet())
{
- if (other->isMemoryOp())
+ if (IsContainableMemoryOp(other))
{
MakeSrcContained(tree, other);
}
delayUseSrc = op1;
}
else if ((op2 != nullptr) &&
- (!tree->OperIsCommutative() || (op2->isMemoryOp() && (op2->gtLsraInfo.srcCount == 0))))
+ (!tree->OperIsCommutative() || (IsContainableMemoryOp(op2) && (op2->gtLsraInfo.srcCount == 0))))
{
delayUseSrc = op2;
}
binOpInRMW = IsBinOpInRMWStoreInd(tree);
if (!binOpInRMW)
{
- if (op2->isMemoryOp() && tree->TypeGet() == op2->TypeGet())
+ if (IsContainableMemoryOp(op2) && tree->TypeGet() == op2->TypeGet())
{
directlyEncodable = true;
operand = op2;
else if (tree->OperIsCommutative())
{
if (IsContainableImmed(tree, op1) ||
- (op1->isMemoryOp() && tree->TypeGet() == op1->TypeGet() && IsSafeToContainMem(tree, op1)))
+ (IsContainableMemoryOp(op1) && tree->TypeGet() == op1->TypeGet() && IsSafeToContainMem(tree, op1)))
{
// If it is safe, we can reverse the order of operands of commutative operations for efficient
// codegen
// everything is made explicit by adding casts.
assert(op1->TypeGet() == op2->TypeGet());
- if (op2->isMemoryOp() || op2->IsCnsNonZeroFltOrDbl())
+ if (IsContainableMemoryOp(op2) || op2->IsCnsNonZeroFltOrDbl())
{
MakeSrcContained(tree, op2);
}
}
// divisor can be an r/m, but the memory indirection must be of the same size as the divide
- if (op2->isMemoryOp() && (op2->TypeGet() == tree->TypeGet()))
+ if (IsContainableMemoryOp(op2) && (op2->TypeGet() == tree->TypeGet()))
{
MakeSrcContained(tree, op2);
}
switch (tree->gtIntrinsic.gtIntrinsicId)
{
case CORINFO_INTRINSIC_Sqrt:
- if (op1->isMemoryOp() || op1->IsCnsNonZeroFltOrDbl())
+ if (IsContainableMemoryOp(op1) || op1->IsCnsNonZeroFltOrDbl())
{
MakeSrcContained(tree, op1);
}
info->srcCount = 1;
}
- if (op1->isMemoryOp())
+ if (IsContainableMemoryOp(op1))
{
MakeSrcContained(tree, op1);
// U8 -> R8 conversion requires that the operand be in a register.
if (castOpType != TYP_ULONG)
{
- if (castOp->isMemoryOp() || castOp->IsCnsNonZeroFltOrDbl())
+ if (IsContainableMemoryOp(castOp) || castOp->IsCnsNonZeroFltOrDbl())
{
MakeSrcContained(tree, castOp);
}
{
MakeSrcContained(tree, otherOp);
}
- else if (otherOp->isMemoryOp() && ((otherOp == op2) || IsSafeToContainMem(tree, otherOp)))
+ else if (IsContainableMemoryOp(otherOp) && ((otherOp == op2) || IsSafeToContainMem(tree, otherOp)))
{
MakeSrcContained(tree, otherOp);
}
if (CheckImmedAndMakeContained(tree, op2))
{
// If the types are the same, or if the constant is of the correct size,
- // we can treat the isMemoryOp as contained.
+ // we can treat the MemoryOp as contained.
if (op1Type == op2Type)
{
- if (op1->isMemoryOp())
+ if (IsContainableMemoryOp(op1))
{
MakeSrcContained(tree, op1);
}
// Note that TEST does not have a r,rm encoding like CMP has but we can still
// contain the second operand because the emitter maps both r,rm and rm,r to
// the same instruction code. This avoids the need to special case TEST here.
- if (op2->isMemoryOp())
+ if (IsContainableMemoryOp(op2))
{
MakeSrcContained(tree, op2);
}
- else if (op1->isMemoryOp() && IsSafeToContainMem(tree, op1))
+ else if (IsContainableMemoryOp(op1) && IsSafeToContainMem(tree, op1))
{
MakeSrcContained(tree, op1);
}
if (GenTree::OperIsBinary(oper))
{
// On Xarch RMW operations require that the source memory-op be in a register.
- assert(!indirOpSource->isMemoryOp() || indirOpSource->gtLsraInfo.dstCount == 1);
+ assert(!IsContainableMemoryOp(indirOpSource) || indirOpSource->gtLsraInfo.dstCount == 1);
JITDUMP("Lower succesfully detected an assignment of the form: *addrMode BinOp= source\n");
info->srcCount = indirOpSource->gtLsraInfo.dstCount;
}
{
assert(tree->OperGet() == GT_MUL);
- if (op2->isMemoryOp() || op2->IsCnsNonZeroFltOrDbl())
+ if (IsContainableMemoryOp(op2) || op2->IsCnsNonZeroFltOrDbl())
{
MakeSrcContained(tree, op2);
}
- else if (op1->IsCnsNonZeroFltOrDbl() || (op1->isMemoryOp() && IsSafeToContainMem(tree, op1)))
+ else if (op1->IsCnsNonZeroFltOrDbl() || (IsContainableMemoryOp(op1) && IsSafeToContainMem(tree, op1)))
{
// Since GT_MUL is commutative, we will try to re-order operands if it is safe to
// generate more efficient code sequence for the case of GT_MUL(op1=memOp, op2=non-memOp)
}
MakeSrcContained(tree, imm); // The imm is always contained
- if (other->isMemoryOp())
+ if (IsContainableMemoryOp(other))
{
memOp = other; // memOp may be contained below
}
// This is because during codegen we use 'tree' type to derive EmitTypeSize.
// E.g op1 type = byte, op2 type = byte but GT_MUL tree type is int.
//
- if (memOp == nullptr && op2->isMemoryOp())
+ if (memOp == nullptr && IsContainableMemoryOp(op2))
{
memOp = op2;
}
GenTree* op1 = simdNode->gtGetOp1();
GenTree* op2 = simdNode->gtGetOp2();
var_types baseType = simdNode->gtSIMDBaseType;
- if (!op1->isMemoryOp() && op2->IsCnsIntOrI() && varTypeIsSmallInt(baseType))
+ if (!IsContainableMemoryOp(op1) && op2->IsCnsIntOrI() && varTypeIsSmallInt(baseType))
{
bool ZeroOrSignExtnReqd = true;
unsigned baseSize = genTypeSize(baseType);
regNumber srcReg = op1->gtRegNum;
// Optimize the case of op1 is in memory and trying to access ith element.
- if (op1->isMemoryOp())
+ if (!op1->isUsedFromReg())
{
assert(op1->isContained());
regNumber indexReg;
int offset = 0;
- if (op1->OperGet() == GT_LCL_FLD)
+ if (op1->OperIsLocal())
{
// There are three parts to the total offset here:
- // {offset of local} + {offset of SIMD Vector field} + {offset of element within SIMD vector}.
+ // {offset of local} + {offset of SIMD Vector field (lclFld only)} + {offset of element within SIMD vector}.
bool isEBPbased;
unsigned varNum = op1->gtLclVarCommon.gtLclNum;
offset += compiler->lvaFrameAddress(varNum, &isEBPbased);
- offset += op1->gtLclFld.gtLclOffs;
-
+ if (op1->OperGet() == GT_LCL_FLD)
+ {
+ offset += op1->gtLclFld.gtLclOffs;
+ }
baseReg = (isEBPbased) ? REG_EBP : REG_ESP;
}
else
[DllImport(JitHelpers.QCall, CharSet = CharSet.Unicode)]
[SuppressUnmanagedCodeSecurity]
- internal static extern unsafe void WriteEvent(IntPtr eventHandle, uint eventID, void* pData, uint length);
+ internal static extern unsafe void WriteEvent(IntPtr eventHandle, uint eventID, void* pData, uint length, Guid* activityId, Guid* relatedActivityId);
}
}
{
if (userDataCount == 0)
{
- EventPipeInternal.WriteEvent(eventHandle, eventID, null, 0);
+ EventPipeInternal.WriteEvent(eventHandle, eventID, null, 0, activityId, relatedActivityId);
return 0;
}
uint singleUserDataSize = userData[i].Size;
WriteToBuffer(pData, length, ref offset, singleUserDataPtr, singleUserDataSize);
}
- EventPipeInternal.WriteEvent(eventHandle, eventID, pData, length);
+ EventPipeInternal.WriteEvent(eventHandle, eventID, pData, length, activityId, relatedActivityId);
}
}
return 0;
{
public partial class EventSource
{
+#if FEATURE_MANAGED_ETW && FEATURE_PERFTRACING
+ // For non-Windows, we use a thread-local variable to hold the activity ID.
+ // On Windows, ETW has it's own thread-local variable and we participate in its use.
+ [ThreadStatic]
+ private static Guid s_currentThreadActivityId;
+#endif // FEATURE_MANAGED_ETW && FEATURE_PERFTRACING
+
// ActivityID support (see also WriteEventWithRelatedActivityIdCore)
/// <summary>
/// When a thread starts work that is on behalf of 'something else' (typically another
{
if (TplEtwProvider.Log != null)
TplEtwProvider.Log.SetActivityId(activityId);
-#if FEATURE_MANAGED_ETW && PLATFORM_WINDOWS
+#if FEATURE_MANAGED_ETW
#if FEATURE_ACTIVITYSAMPLING
Guid newId = activityId;
#endif // FEATURE_ACTIVITYSAMPLING
// We ignore errors to keep with the convention that EventSources do not throw errors.
// Note we can't access m_throwOnWrites because this is a static method.
+#if FEATURE_PERFTRACING
+ s_currentThreadActivityId = activityId;
+#elif PLATFORM_WINDOWS
if (UnsafeNativeMethods.ManifestEtw.EventActivityIdControl(
UnsafeNativeMethods.ManifestEtw.ActivityControl.EVENT_ACTIVITY_CTRL_GET_SET_ID,
ref activityId) == 0)
+#endif // FEATURE_PERFTRACING
{
#if FEATURE_ACTIVITYSAMPLING
var activityDying = s_activityDying;
}
#endif // FEATURE_ACTIVITYSAMPLING
}
-#endif // FEATURE_MANAGED_ETW && PLATFORM_WINDOWS
+#endif // FEATURE_MANAGED_ETW
}
/// <summary>
public static void SetCurrentThreadActivityId(Guid activityId, out Guid oldActivityThatWillContinue)
{
oldActivityThatWillContinue = activityId;
-#if FEATURE_MANAGED_ETW && PLATFORM_WINDOWS
+#if FEATURE_MANAGED_ETW
// We ignore errors to keep with the convention that EventSources do not throw errors.
// Note we can't access m_throwOnWrites because this is a static method.
+#if FEATURE_PERFTRACING
+ oldActivityThatWillContinue = s_currentThreadActivityId;
+ s_currentThreadActivityId = activityId;
+#elif PLATFORM_WINDOWS
UnsafeNativeMethods.ManifestEtw.EventActivityIdControl(
UnsafeNativeMethods.ManifestEtw.ActivityControl.EVENT_ACTIVITY_CTRL_GET_SET_ID,
ref oldActivityThatWillContinue);
-#endif // FEATURE_MANAGED_ETW && PLATFORM_WINDOWS
+#endif // FEATURE_PERFTRACING
+#endif // FEATURE_MANAGED_ETW
// We don't call the activityDying callback here because the caller has declared that
// it is not dying.
// We ignore errors to keep with the convention that EventSources do not throw
// errors. Note we can't access m_throwOnWrites because this is a static method.
Guid retVal = new Guid();
-#if FEATURE_MANAGED_ETW && PLATFORM_WINDOWS
+#if FEATURE_MANAGED_ETW
+#if FEATURE_PERFTRACING
+ retVal = s_currentThreadActivityId;
+#elif PLATFORM_WINDOWS
UnsafeNativeMethods.ManifestEtw.EventActivityIdControl(
UnsafeNativeMethods.ManifestEtw.ActivityControl.EVENT_ACTIVITY_CTRL_GET_ID,
ref retVal);
-#endif // FEATURE_MANAGED_ETW && PLATFORM_WINDOWS
+#endif // FEATURE_PERFTRACING
+#endif // FEATURE_MANAGED_ETW
return retVal;
}
}
}
}
-void EventPipe::WriteEvent(EventPipeEvent &event, BYTE *pData, unsigned int length)
+void EventPipe::WriteEvent(EventPipeEvent &event, BYTE *pData, unsigned int length, LPCGUID pActivityId, LPCGUID pRelatedActivityId)
{
CONTRACTL
{
if(!s_pConfig->RundownEnabled() && s_pBufferManager != NULL)
{
- if(!s_pBufferManager->WriteEvent(pThread, event, pData, length))
+ if(!s_pBufferManager->WriteEvent(pThread, event, pData, length, pActivityId, pRelatedActivityId))
{
// This is used in DEBUG to make sure that we don't log an event synchronously that we didn't log to the buffer.
return;
event,
pThread->GetOSThreadId(),
pData,
- length);
+ length,
+ pActivityId,
+ pRelatedActivityId);
if(s_pFile != NULL)
{
event,
pThread->GetOSThreadId(),
pData,
- length);
+ length,
+ pActivityId,
+ pRelatedActivityId);
// Write to the EventPipeFile if it exists.
if(s_pSyncFile != NULL)
{
// Specify the sampling thread as the "current thread", so that we select the right buffer.
// Specify the target thread so that the event gets properly attributed.
- if(!s_pBufferManager->WriteEvent(pSamplingThread, *pEvent, pData, length, pTargetThread, &stackContents))
+ if(!s_pBufferManager->WriteEvent(pSamplingThread, *pEvent, pData, length, NULL /* pActivityId */, NULL /* pRelatedActivityId */, pTargetThread, &stackContents))
{
// This is used in DEBUG to make sure that we don't log an event synchronously that we didn't log to the buffer.
return;
INT_PTR eventHandle,
unsigned int eventID,
void *pData,
- unsigned int length)
+ unsigned int length,
+ LPCGUID pActivityId,
+ LPCGUID pRelatedActivityId)
{
QCALL_CONTRACT;
BEGIN_QCALL;
_ASSERTE(eventHandle != NULL);
EventPipeEvent *pEvent = reinterpret_cast<EventPipeEvent *>(eventHandle);
- EventPipe::WriteEvent(*pEvent, (BYTE *)pData, length);
+ EventPipe::WriteEvent(*pEvent, (BYTE *)pData, length, pActivityId, pRelatedActivityId);
END_QCALL;
}
// Write out an event.
// Data is written as a serialized blob matching the ETW serialization conventions.
- static void WriteEvent(EventPipeEvent &event, BYTE *pData, unsigned int length);
+ static void WriteEvent(EventPipeEvent &event, BYTE *pData, unsigned int length, LPCGUID pActivityId = NULL, LPCGUID pRelatedActivityId = NULL);
// Write out a sample profile event.
static void WriteSampleProfileEvent(Thread *pSamplingThread, EventPipeEvent *pEvent, Thread *pTargetThread, StackContents &stackContents, BYTE *pData = NULL, unsigned int length = 0);
INT_PTR eventHandle,
unsigned int eventID,
void *pData,
- unsigned int length);
+ unsigned int length,
+ LPCGUID pActivityId, LPCGUID pRelatedActivityId);
};
#endif // FEATURE_PERFTRACING
}
}
-bool EventPipeBuffer::WriteEvent(Thread *pThread, EventPipeEvent &event, BYTE *pData, unsigned int dataLength, StackContents *pStack)
+bool EventPipeBuffer::WriteEvent(Thread *pThread, EventPipeEvent &event, BYTE *pData, unsigned int dataLength, LPCGUID pActivityId, LPCGUID pRelatedActivityId, StackContents *pStack)
{
CONTRACTL
{
event,
pThread->GetOSThreadId(),
pDataDest,
- dataLength);
+ dataLength,
+ pActivityId,
+ pRelatedActivityId);
// Copy the stack if a separate stack trace was provided.
if(pStack != NULL)
// Returns:
// - true: The write succeeded.
// - false: The write failed. In this case, the buffer should be considered full.
- bool WriteEvent(Thread *pThread, EventPipeEvent &event, BYTE *pData, unsigned int dataLength, StackContents *pStack = NULL);
+ bool WriteEvent(Thread *pThread, EventPipeEvent &event, BYTE *pData, unsigned int dataLength, LPCGUID pActivityId, LPCGUID pRelatedActivityId, StackContents *pStack = NULL);
// Get the timestamp of the most recent event in the buffer.
LARGE_INTEGER GetMostRecentTimeStamp() const;
}
}
-bool EventPipeBufferManager::WriteEvent(Thread *pThread, EventPipeEvent &event, BYTE *pData, unsigned int length, Thread *pEventThread, StackContents *pStack)
+bool EventPipeBufferManager::WriteEvent(Thread *pThread, EventPipeEvent &event, BYTE *pData, unsigned int length, LPCGUID pActivityId, LPCGUID pRelatedActivityId, Thread *pEventThread, StackContents *pStack)
{
CONTRACTL
{
else
{
// Attempt to write the event to the buffer. If this fails, we should allocate a new buffer.
- allocNewBuffer = !pBuffer->WriteEvent(pEventThread, event, pData, length, pStack);
+ allocNewBuffer = !pBuffer->WriteEvent(pEventThread, event, pData, length, pActivityId, pRelatedActivityId, pStack);
}
}
// This is the second time if this thread did have one or more buffers, but they were full.
if(allocNewBuffer && pBuffer != NULL)
{
- allocNewBuffer = !pBuffer->WriteEvent(pEventThread, event, pData, length, pStack);
+ allocNewBuffer = !pBuffer->WriteEvent(pEventThread, event, pData, length, pActivityId, pRelatedActivityId, pStack);
}
// Mark that the thread is no longer writing an event.
// This is because the thread that writes the events is not the same as the "event thread".
// An optional stack trace can be provided for sample profiler events.
// Otherwise, if a stack trace is needed, one will be automatically collected.
- bool WriteEvent(Thread *pThread, EventPipeEvent &event, BYTE *pData, unsigned int length, Thread *pEventThread = NULL, StackContents *pStack = NULL);
+ bool WriteEvent(Thread *pThread, EventPipeEvent &event, BYTE *pData, unsigned int length, LPCGUID pActivityId, LPCGUID pRelatedActivityId, Thread *pEventThread = NULL, StackContents *pStack = NULL);
// Write the contents of the managed buffers to the specified file.
// The stopTimeStamp is used to determine when tracing was stopped to ensure that we
*m_pMetadataEvent,
GetCurrentThreadId(),
pInstancePayload,
- instancePayloadSize);
+ instancePayloadSize,
+ NULL /* pActivityId */,
+ NULL /* pRelatedActivityId */);
// Set the timestamp to match the source event, because the metadata event
// will be emitted right before the source event.
EventPipeEvent &event,
DWORD threadID,
BYTE *pData,
- unsigned int length)
+ unsigned int length,
+ LPCGUID pActivityId,
+ LPCGUID pRelatedActivityId)
{
CONTRACTL
{
#endif // _DEBUG
m_pEvent = &event;
m_threadID = threadID;
+ if(pActivityId != NULL)
+ {
+ m_activityId = *pActivityId;
+ }
+ else
+ {
+ m_activityId = {0};
+ }
+ if(pRelatedActivityId != NULL)
+ {
+ m_relatedActivityId = *pRelatedActivityId;
+ }
+ else
+ {
+ m_relatedActivityId = {0};
+ }
+
m_pData = pData;
m_dataLength = length;
QueryPerformanceCounter(&m_timeStamp);
// Calculate the size of the total payload so that it can be written to the file.
unsigned int payloadLength =
sizeof(metadataLabel) +
- sizeof(m_threadID) + // Thread ID
- sizeof(m_timeStamp) + // TimeStamp
- m_dataLength + // Event payload data length
- sizeof(unsigned int) + // Prepended stack payload size in bytes
- m_stackContents.GetSize(); // Stack payload size
+ sizeof(m_threadID) + // Thread ID
+ sizeof(m_timeStamp) + // TimeStamp
+ sizeof(m_activityId) + // Activity ID
+ sizeof(m_relatedActivityId) + // Related Activity ID
+ sizeof(m_dataLength) + // Data payload length
+ m_dataLength + // Event payload data
+ sizeof(unsigned int) + // Prepended stack payload size in bytes
+ m_stackContents.GetSize(); // Stack payload size
// Write the size of the event to the file.
pSerializer->WriteBuffer((BYTE*)&payloadLength, sizeof(payloadLength));
// Write the timestamp.
pSerializer->WriteBuffer((BYTE*)&m_timeStamp, sizeof(m_timeStamp));
+ // Write the activity id.
+ pSerializer->WriteBuffer((BYTE*)&m_activityId, sizeof(m_activityId));
+
+ // Write the related activity id.
+ pSerializer->WriteBuffer((BYTE*)&m_relatedActivityId, sizeof(m_relatedActivityId));
+
+ // Write the data payload size.
+ pSerializer->WriteBuffer((BYTE*)&m_dataLength, sizeof(m_dataLength));
+
// Write the event data payload.
if(m_dataLength > 0)
{
#endif // _DEBUG
SampleProfilerEventInstance::SampleProfilerEventInstance(EventPipeEvent &event, Thread *pThread, BYTE *pData, unsigned int length)
- :EventPipeEventInstance(event, pThread->GetOSThreadId(), pData, length)
+ :EventPipeEventInstance(event, pThread->GetOSThreadId(), pData, length, NULL /* pActivityId */, NULL /* pRelatedActivityId */)
{
LIMITED_METHOD_CONTRACT;
}
public:
- EventPipeEventInstance(EventPipeEvent &event, DWORD threadID, BYTE *pData, unsigned int length);
+ EventPipeEventInstance(EventPipeEvent &event, DWORD threadID, BYTE *pData, unsigned int length, LPCGUID pActivityId, LPCGUID pRelatedActivityId);
// Get the event associated with this instance.
EventPipeEvent* GetEvent() const;
EventPipeEvent *m_pEvent;
DWORD m_threadID;
LARGE_INTEGER m_timeStamp;
+ GUID m_activityId;
+ GUID m_relatedActivityId;
BYTE *m_pData;
unsigned int m_dataLength;