// Miscellaneous constants.
private const string DefaultAppName = "app";
private const string NetPerfFileExtension = ".netperf";
+ private const string NetTraceFileExtension = ".nettrace";
private const uint DefaultCircularBufferMB = 256; // MB (PerfView and dotnet-trace default)
private const char ProviderConfigDelimiter = ',';
private const char ConfigComponentDelimiter = ':';
private static string BuildTraceFileName()
{
- return GetAppName() + "." + Interop.GetCurrentProcessId().ToString() + NetPerfFileExtension;
+ return GetAppName() + "." + Interop.GetCurrentProcessId().ToString() +
+ ((Config_NetTraceFormat > 0) ? NetTraceFileExtension : NetPerfFileExtension);
}
private static string GetAppName()
}
}
+ private static int Config_NetTraceFormat
+ {
+ get
+ {
+ string? stringValue = CompatibilitySwitch.GetValueInternal("EventPipeNetTraceFormat");
+ if ((stringValue == null) || (!int.TryParse(stringValue, out int value)))
+ {
+ value = -1; // Indicates no value (or is illegal)
+ }
+
+ return value;
+ }
+ }
+
private static string? Config_EventPipeConfig => CompatibilitySwitch.GetValueInternal("EventPipeConfig");
private static uint Config_EventPipeCircularMB
// initialize our local copy from the marshaled target Thread instance
ZeroMemory (threadData, sizeof(DacpThreadData));
threadData->corThreadId = thread->m_ThreadId;
- threadData->osThreadId = thread->m_OSThreadId;
+ threadData->osThreadId = (DWORD)thread->m_OSThreadId;
threadData->state = thread->m_State;
threadData->preemptiveGCDisabled = thread->m_fPreemptiveGCDisabled;
threadData->allocContextPtr = TO_CDADDR(thread->m_alloc_context.alloc_ptr);
// EventPipe
//
RETAIL_CONFIG_DWORD_INFO(INTERNAL_EnableEventPipe, W("EnableEventPipe"), 0, "Enable/disable event pipe. Non-zero values enable tracing.")
+RETAIL_CONFIG_DWORD_INFO(INTERNAL_EventPipeNetTraceFormat, W("EventPipeNetTraceFormat"), 0, "Enable/disable using the newer nettrace file format.")
RETAIL_CONFIG_STRING_INFO(INTERNAL_EventPipeOutputPath, W("EventPipeOutputPath"), "The full path excluding file name for the trace file that will be written when COMPlus_EnableEventPipe=1")
RETAIL_CONFIG_STRING_INFO(INTERNAL_EventPipeConfig, W("EventPipeConfig"), "Configuration for EventPipe.")
RETAIL_CONFIG_DWORD_INFO(INTERNAL_EventPipeRundown, W("EventPipeRundown"), 1, "Enable/disable eventpipe rundown.")
OUT LPDWORD lpThreadId);
PALIMPORT
+HANDLE
+PALAPI
+PAL_CreateThread64(
+ IN LPSECURITY_ATTRIBUTES lpThreadAttributes,
+ IN DWORD dwStackSize,
+ IN LPTHREAD_START_ROUTINE lpStartAddress,
+ IN LPVOID lpParameter,
+ IN DWORD dwCreationFlags,
+ OUT SIZE_T* pThreadId);
+
+PALIMPORT
PAL_NORETURN
VOID
PALAPI
LPVOID lpParameter,
DWORD dwCreationFlags,
PalThreadType eThreadType,
- LPDWORD lpThreadId,
+ SIZE_T* pThreadId,
HANDLE *phThread
);
LPVOID,
DWORD,
PalThreadType,
- LPDWORD,
+ SIZE_T*,
HANDLE*
);
}
HANDLE hWorkerThread = NULL;
+ SIZE_T osThreadId = 0;
palErr = InternalCreateThread(pthrCurrent,
NULL,
0,
(PVOID)pSynchManager,
0,
PalWorkerThread,
- &pSynchManager->m_dwWorkerThreadTid,
+ &osThreadId,
&hWorkerThread);
if (NO_ERROR == palErr)
{
+ pSynchManager->m_dwWorkerThreadTid = (DWORD)osThreadId;
palErr = InternalGetThreadDataFromHandle(pthrCurrent,
hWorkerThread,
0,
PAL_ERROR pe = NO_ERROR;
BOOL ret;
UnambiguousProcessDescriptor unambiguousProcessDescriptor;
+ SIZE_T osThreadId = 0;
#ifdef __APPLE__
if (lpApplicationGroupId != NULL)
// Add a reference for the thread handler
AddRef();
-
pe = InternalCreateThread(
pThread,
NULL,
this,
0,
UserCreatedThread,
- &m_threadId,
+ &osThreadId,
&m_threadHandle);
if (NO_ERROR != pe)
Release();
goto exit;
}
-
+ m_threadId = (DWORD)osThreadId;
exit:
return pe;
}
dwCreationFlags, lpThreadId);
pThread = InternalGetCurrentThread();
-
+ SIZE_T osThreadId = 0;
palError = InternalCreateThread(
pThread,
lpThreadAttributes,
lpParameter,
dwCreationFlags,
UserCreatedThread,
- lpThreadId,
+ &osThreadId,
&hNewThread
);
{
pThread->SetLastError(palError);
}
-
+ if(lpThreadId != nullptr)
+ {
+ *lpThreadId = (DWORD)osThreadId;
+ }
LOGEXIT("CreateThread returns HANDLE %p\n", hNewThread);
PERF_EXIT(CreateThread);
return hNewThread;
}
+/*++
+Function:
+ PAL_CreateThread64
+ Similar to CreateThread but passes out a 64 bit thread id on platforms which use one.
+
+Note:
+ lpThreadAttributes could be ignored.
+
+See MSDN doc.
+
+--*/
+HANDLE
+PALAPI
+PAL_CreateThread64(
+ IN LPSECURITY_ATTRIBUTES lpThreadAttributes,
+ IN DWORD dwStackSize,
+ IN LPTHREAD_START_ROUTINE lpStartAddress,
+ IN LPVOID lpParameter,
+ IN DWORD dwCreationFlags,
+ OUT SIZE_T* pThreadId)
+{
+ PAL_ERROR palError;
+ CPalThread *pThread;
+ HANDLE hNewThread = NULL;
+
+ PERF_ENTRY(PAL_CreateThread64);
+ ENTRY("PAL_CreateThread64(lpThreadAttr=%p, dwStackSize=%u, lpStartAddress=%p, "
+ "lpParameter=%p, dwFlags=%#x, pThreadId=%p)\n",
+ lpThreadAttributes, dwStackSize, lpStartAddress, lpParameter,
+ dwCreationFlags, pThreadId);
+
+ pThread = InternalGetCurrentThread();
+ palError = InternalCreateThread(
+ pThread,
+ lpThreadAttributes,
+ dwStackSize,
+ lpStartAddress,
+ lpParameter,
+ dwCreationFlags,
+ UserCreatedThread,
+ pThreadId,
+ &hNewThread
+ );
+
+ if (NO_ERROR != palError)
+ {
+ pThread->SetLastError(palError);
+ }
+
+ LOGEXIT("PAL_CreateThread64 returns HANDLE %p\n", hNewThread);
+ PERF_EXIT(PAL_CreateThread64);
+
+ return hNewThread;
+}
+
PAL_ERROR
CorUnix::InternalCreateThread(
CPalThread *pThread,
LPVOID lpParameter,
DWORD dwCreationFlags,
PalThreadType eThreadType,
- LPDWORD lpThreadId,
+ SIZE_T* pThreadId,
HANDLE *phThread
)
{
//
*phThread = hNewThread;
- if (NULL != lpThreadId)
+ if (NULL != pThreadId)
{
- *lpThreadId = pNewThread->GetThreadId();
+ *pThreadId = pNewThread->GetThreadId();
}
}
else
eventpipemetadatagenerator.cpp
eventpipeprotocolhelper.cpp
eventpipeprovider.cpp
+ eventpipethread.cpp
eventpipebuffer.cpp
eventpipebuffermanager.cpp
eventpipesession.cpp
const EventPipeProviderConfiguration *pProviders,
uint32_t numProviders,
EventPipeSessionType sessionType,
+ EventPipeSerializationFormat format,
IpcStream *const pStream)
{
CONTRACTL
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
+ PRECONDITION(format < EventPipeSerializationFormat::Count);
PRECONDITION(circularBufferSizeInMB > 0);
PRECONDITION(numProviders > 0 && pProviders != nullptr);
}
strOutputPath,
pStream,
sessionType,
+ format,
circularBufferSizeInMB,
pProviders,
numProviders);
return;
}
+ // Write a final sequence point to the file now that all events have
+ // been emitted
+ pSession->WriteSequencePointUnbuffered();
+
// Remove the session.
s_config.DeleteSession(pSession);
// as opposed a a buffer copy here
EventPipeEventInstance instance(
event,
- pThread->GetOSThreadId(),
+ pEventPipeThread->GetOSThreadId(),
pData,
payload.GetSize(),
pActivityId,
{
_ASSERTE(pRundownSession != nullptr);
if (pRundownSession != nullptr)
- pRundownSession->WriteEvent(instance);
+ pRundownSession->WriteEventUnbuffered(instance, pEventPipeThread);
}
EX_CATCH {}
EX_END_CATCH(SwallowAllExceptions);
// allowed to set s_pSessions[i] = NULL at any time and that may have occured in between
// the check and the load
if (pSession != nullptr)
- pSession->WriteEvent(
+ pSession->WriteEventBuffered(
pThread,
event,
payload,
class EventPipeSession;
class IpcStream;
enum class EventPipeSessionType;
+enum class EventPipeSerializationFormat;
enum class EventPipeEventLevel
{
friend class EventPipeProvider;
public:
+ static const uint32_t MaxNumberOfSessions = 64;
+
// Initialize the event pipe.
static void Initialize();
const EventPipeProviderConfiguration *pProviders,
uint32_t numProviders,
EventPipeSessionType sessionType,
+ EventPipeSerializationFormat format,
IpcStream *const pStream);
// Disable tracing via the event pipe.
static CrstStatic s_configCrst;
static Volatile<bool> s_tracingInitialized;
static EventPipeConfiguration s_config;
- static const uint32_t MaxNumberOfSessions = 64;
static VolatilePtr<EventPipeSession> s_pSessions[MaxNumberOfSessions];
static EventPipeEventSource *s_pEventSource;
static HANDLE s_fileSwitchTimerHandle;
#include "fastserializableobject.h"
#include "fastserializer.h"
+// my attempts to include limits.h were hitting missing headers on Linux
+// This might be resolvable with more effort but I chose not to head
+// down the rabbit hole when a perfectly decent 60 second fix was available:
+#ifndef LLONG_MIN
+#define LLONG_MIN 0x8000000000000000
+#endif
+#ifndef LLONG_MAX
+#define LLONG_MAX 0x7FFFFFFFFFFFFFFF
+#endif
+
#ifdef FEATURE_PERFTRACING
-EventPipeBlock::EventPipeBlock(unsigned int maxBlockSize) :
- FastSerializableObject(1, 0)
+
+
+DWORD GetBlockVersion(EventPipeSerializationFormat format)
+{
+ LIMITED_METHOD_CONTRACT;
+ switch (format)
+ {
+ case EventPipeSerializationFormat::NetPerfV3:
+ return 1;
+ case EventPipeSerializationFormat::NetTraceV4:
+ return 2;
+ default:
+ _ASSERTE(!"Unrecognized EventPipeSerializationFormat");
+ return 0;
+ }
+}
+
+DWORD GetBlockMinVersion(EventPipeSerializationFormat format)
+{
+ LIMITED_METHOD_CONTRACT;
+ switch (format)
+ {
+ case EventPipeSerializationFormat::NetPerfV3:
+ return 0;
+ case EventPipeSerializationFormat::NetTraceV4:
+ return 2;
+ default:
+ _ASSERTE(!"Unrecognized EventPipeSerializationFormat");
+ return 0;
+ }
+}
+
+EventPipeBlock::EventPipeBlock(unsigned int maxBlockSize, EventPipeSerializationFormat format) :
+ FastSerializableObject(GetBlockVersion(format), GetBlockMinVersion(format), format >= EventPipeSerializationFormat::NetTraceV4)
{
CONTRACTL
{
memset(m_pBlock, 0, maxBlockSize);
m_pWritePointer = m_pBlock;
m_pEndOfTheBuffer = m_pBlock + maxBlockSize;
+ m_format = format;
}
EventPipeBlock::~EventPipeBlock()
delete[] m_pBlock;
}
-bool EventPipeBlock::WriteEvent(EventPipeEventInstance &instance)
+void EventPipeBlock::Clear()
{
CONTRACTL
{
if (m_pBlock == NULL)
{
- return false;
+ return;
}
- unsigned int totalSize = instance.GetAlignedTotalSize();
- if (m_pWritePointer + totalSize >= m_pEndOfTheBuffer)
+ _ASSERTE(m_pWritePointer <= m_pEndOfTheBuffer);
+
+ memset(m_pBlock, 0, GetSize());
+ m_pWritePointer = m_pBlock;
+}
+
+EventPipeEventBlockBase::EventPipeEventBlockBase(unsigned int maxBlockSize, EventPipeSerializationFormat format, bool fUseHeaderCompression) :
+ EventPipeBlock(maxBlockSize, format), m_fUseHeaderCompression(fUseHeaderCompression)
+{
+ memset(m_compressedHeader, 0, 100);
+ Clear();
+}
+
+void EventPipeEventBlockBase::Clear()
+{
+ EventPipeBlock::Clear();
+ m_lastHeader.MetadataId = 0;
+ m_lastHeader.SequenceNumber = 0;
+ m_lastHeader.ThreadId = 0;
+ m_lastHeader.CaptureThreadId = 0;
+ m_lastHeader.StackId = 0;
+ m_lastHeader.TimeStamp.QuadPart = 0;
+ m_lastHeader.ActivityId = { 0 };
+ m_lastHeader.RelatedActivityId = { 0 };
+ m_lastHeader.DataLength = 0;
+
+ m_minTimeStamp.QuadPart = LLONG_MAX;
+ m_maxTimeStamp.QuadPart = LLONG_MIN;
+}
+
+void WriteVarUInt32(BYTE* & pWritePointer, unsigned int value)
+{
+ while (value >= 0x80)
+ {
+ *pWritePointer = (BYTE)(value | 0x80);
+ pWritePointer++;
+ value >>= 7;
+ }
+ *pWritePointer = (BYTE)value;
+ pWritePointer++;
+}
+
+void WriteVarUInt64(BYTE* & pWritePointer, ULONGLONG value)
+{
+ while (value >= 0x80)
+ {
+ *pWritePointer = (BYTE)(value | 0x80);
+ pWritePointer++;
+ value >>= 7;
+ }
+ *pWritePointer = (BYTE)value;
+ pWritePointer++;
+}
+
+bool EventPipeEventBlockBase::WriteEvent(EventPipeEventInstance &instance,
+ ULONGLONG captureThreadId,
+ unsigned int sequenceNumber,
+ DWORD stackId,
+ BOOL isSortedEvent)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(isSortedEvent || m_format >= EventPipeSerializationFormat::NetTraceV4);
+ }
+ CONTRACTL_END;
+
+ if (m_pBlock == NULL)
{
return false;
}
- BYTE* alignedEnd = m_pWritePointer + totalSize + sizeof(totalSize);
+ unsigned int dataLength = 0;
+ BYTE* alignedEnd = NULL;
+
+ if (!m_fUseHeaderCompression)
+ {
+ unsigned int totalSize = instance.GetAlignedTotalSize(m_format);
+ if (m_pWritePointer + totalSize >= m_pEndOfTheBuffer)
+ {
+ return false;
+ }
+
+ alignedEnd = m_pWritePointer + totalSize + sizeof(totalSize);
+
+ memcpy(m_pWritePointer, &totalSize, sizeof(totalSize));
+ m_pWritePointer += sizeof(totalSize);
+
+ unsigned int metadataId = instance.GetMetadataId();
+ _ASSERTE((metadataId & (1 << 31)) == 0);
+ metadataId |= (!isSortedEvent ? 1 << 31 : 0);
+ memcpy(m_pWritePointer, &metadataId, sizeof(metadataId));
+ m_pWritePointer += sizeof(metadataId);
+
+ if (m_format == EventPipeSerializationFormat::NetPerfV3)
+ {
+ DWORD threadId = instance.GetThreadId32();
+ memcpy(m_pWritePointer, &threadId, sizeof(threadId));
+ m_pWritePointer += sizeof(threadId);
+ }
+ else if (m_format == EventPipeSerializationFormat::NetTraceV4)
+ {
+ memcpy(m_pWritePointer, &sequenceNumber, sizeof(sequenceNumber));
+ m_pWritePointer += sizeof(sequenceNumber);
+
+ ULONGLONG threadId = instance.GetThreadId64();
+ memcpy(m_pWritePointer, &threadId, sizeof(threadId));
+ m_pWritePointer += sizeof(threadId);
+
+ memcpy(m_pWritePointer, &captureThreadId, sizeof(captureThreadId));
+ m_pWritePointer += sizeof(captureThreadId);
+
+ memcpy(m_pWritePointer, &stackId, sizeof(stackId));
+ m_pWritePointer += sizeof(stackId);
+ }
+
+ const LARGE_INTEGER* timeStamp = instance.GetTimeStamp();
+ memcpy(m_pWritePointer, timeStamp, sizeof(*timeStamp));
+ m_pWritePointer += sizeof(*timeStamp);
+
+ const GUID* activityId = instance.GetActivityId();
+ memcpy(m_pWritePointer, activityId, sizeof(*activityId));
+ m_pWritePointer += sizeof(*activityId);
+
+ const GUID* relatedActivityId = instance.GetRelatedActivityId();
+ memcpy(m_pWritePointer, relatedActivityId, sizeof(*relatedActivityId));
+ m_pWritePointer += sizeof(*relatedActivityId);
+
+ dataLength = instance.GetDataLength();
+ memcpy(m_pWritePointer, &dataLength, sizeof(dataLength));
+ m_pWritePointer += sizeof(dataLength);
+ }
+ else // using header compression
+ {
+ BYTE flags = 0;
+ BYTE* pWritePointer = m_compressedHeader;
+
+ if (instance.GetMetadataId() != m_lastHeader.MetadataId)
+ {
+ WriteVarUInt32(pWritePointer, instance.GetMetadataId());
+ flags |= 1;
+ }
+ if (isSortedEvent)
+ {
+ flags |= (1 << 6);
+ }
+
+ if (m_lastHeader.SequenceNumber + (instance.GetMetadataId() != 0 ? 1 : 0) != sequenceNumber ||
+ m_lastHeader.CaptureThreadId != captureThreadId)
+ {
+ WriteVarUInt32(pWritePointer, sequenceNumber - m_lastHeader.SequenceNumber - 1);
+ WriteVarUInt64(pWritePointer, captureThreadId);
+ flags |= (1 << 1);
+ }
- memcpy(m_pWritePointer, &totalSize, sizeof(totalSize));
- m_pWritePointer += sizeof(totalSize);
+ if (m_lastHeader.ThreadId != instance.GetThreadId64())
+ {
+ WriteVarUInt64(pWritePointer, instance.GetThreadId64());
+ flags |= (1 << 2);
+ }
- unsigned int metadataId = instance.GetMetadataId();
- memcpy(m_pWritePointer, &metadataId, sizeof(metadataId));
- m_pWritePointer += sizeof(metadataId);
+ if (m_lastHeader.StackId != stackId)
+ {
+ WriteVarUInt32(pWritePointer, stackId);
+ flags |= (1 << 3);
+ }
- DWORD threadId = instance.GetThreadId();
- memcpy(m_pWritePointer, &threadId, sizeof(threadId));
- m_pWritePointer += sizeof(threadId);
+ const LARGE_INTEGER* timeStamp = instance.GetTimeStamp();
+ WriteVarUInt64(pWritePointer, timeStamp->QuadPart - m_lastHeader.TimeStamp.QuadPart);
- const LARGE_INTEGER* timeStamp = instance.GetTimeStamp();
- memcpy(m_pWritePointer, timeStamp, sizeof(*timeStamp));
- m_pWritePointer += sizeof(*timeStamp);
+ if (memcmp(&m_lastHeader.ActivityId, instance.GetActivityId(), sizeof(GUID)) != 0)
+ {
+ memcpy(pWritePointer, instance.GetActivityId(), sizeof(GUID));
+ pWritePointer += sizeof(GUID);
+ flags |= (1 << 4);
+ }
- const GUID* activityId = instance.GetActivityId();
- memcpy(m_pWritePointer, activityId, sizeof(*activityId));
- m_pWritePointer += sizeof(*activityId);
+ if (memcmp(&m_lastHeader.RelatedActivityId, instance.GetRelatedActivityId(), sizeof(GUID)) != 0)
+ {
+ memcpy(pWritePointer, instance.GetRelatedActivityId(), sizeof(GUID));
+ pWritePointer += sizeof(GUID);
+ flags |= (1 << 5);
+ }
- const GUID* relatedActivityId = instance.GetRelatedActivityId();
- memcpy(m_pWritePointer, relatedActivityId, sizeof(*relatedActivityId));
- m_pWritePointer += sizeof(*relatedActivityId);
+ dataLength = instance.GetDataLength();
+ if (m_lastHeader.DataLength != dataLength)
+ {
+ WriteVarUInt32(pWritePointer, dataLength);
+ flags |= (1 << 7);
+ }
- unsigned int dataLength = instance.GetDataLength();
- memcpy(m_pWritePointer, &dataLength, sizeof(dataLength));
- m_pWritePointer += sizeof(dataLength);
+ unsigned int bytesWritten = (unsigned int)(pWritePointer - m_compressedHeader);
+ unsigned int totalSize = 1 + bytesWritten + dataLength;
+ if (m_pWritePointer + totalSize >= m_pEndOfTheBuffer)
+ {
+ return false;
+ }
+
+ m_lastHeader.MetadataId = instance.GetMetadataId();
+ m_lastHeader.SequenceNumber = sequenceNumber;
+ m_lastHeader.ThreadId = instance.GetThreadId64();
+ m_lastHeader.CaptureThreadId = captureThreadId;
+ m_lastHeader.StackId = stackId;
+ m_lastHeader.TimeStamp.QuadPart = timeStamp->QuadPart;
+ memcpy(&m_lastHeader.ActivityId, instance.GetActivityId(), sizeof(GUID));
+ memcpy(&m_lastHeader.RelatedActivityId, instance.GetRelatedActivityId(), sizeof(GUID));
+ m_lastHeader.DataLength = dataLength;
+
+ alignedEnd = m_pWritePointer + totalSize;
+ *m_pWritePointer = flags;
+ m_pWritePointer++;
+ memcpy(m_pWritePointer, m_compressedHeader, bytesWritten);
+ m_pWritePointer += bytesWritten;
+ }
if (dataLength > 0)
{
m_pWritePointer += dataLength;
}
- unsigned int stackSize = instance.GetStackSize();
- memcpy(m_pWritePointer, &stackSize, sizeof(stackSize));
- m_pWritePointer += sizeof(stackSize);
-
- if (stackSize > 0)
+ if (m_format == EventPipeSerializationFormat::NetPerfV3)
{
- memcpy(m_pWritePointer, instance.GetStack(), stackSize);
- m_pWritePointer += stackSize;
+ unsigned int stackSize = instance.GetStackSize();
+ memcpy(m_pWritePointer, &stackSize, sizeof(stackSize));
+ m_pWritePointer += sizeof(stackSize);
+
+ if (stackSize > 0)
+ {
+ memcpy(m_pWritePointer, instance.GetStack(), stackSize);
+ m_pWritePointer += stackSize;
+ }
}
while (m_pWritePointer < alignedEnd)
{
*m_pWritePointer++ = (BYTE)0; // put padding at the end to get 4 bytes alignment of the payload
}
+ _ASSERTE(m_pWritePointer == alignedEnd);
+
+ if (m_minTimeStamp.QuadPart > instance.GetTimeStamp()->QuadPart)
+ {
+ m_minTimeStamp.QuadPart = instance.GetTimeStamp()->QuadPart;
+ }
+ if (m_maxTimeStamp.QuadPart < instance.GetTimeStamp()->QuadPart)
+ {
+ m_maxTimeStamp.QuadPart = instance.GetTimeStamp()->QuadPart;
+ }
return true;
}
-void EventPipeBlock::Clear()
+EventPipeEventBlock::EventPipeEventBlock(unsigned int maxBlockSize, EventPipeSerializationFormat format) :
+ EventPipeEventBlockBase(maxBlockSize, format, format >= EventPipeSerializationFormat::NetTraceV4)
+{}
+
+
+EventPipeMetadataBlock::EventPipeMetadataBlock(unsigned int maxBlockSize) :
+ EventPipeEventBlockBase(maxBlockSize, EventPipeSerializationFormat::NetTraceV4)
+{}
+
+unsigned int GetSequencePointBlockSize(EventPipeSequencePoint* pSequencePoint)
+{
+ const unsigned int sizeOfSequenceNumber =
+ sizeof(ULONGLONG) + // thread id
+ sizeof(unsigned int); // sequence number
+ return sizeof(pSequencePoint->TimeStamp) +
+ sizeof(unsigned int) + // thread count
+ pSequencePoint->ThreadSequenceNumbers.GetCount() * sizeOfSequenceNumber;
+}
+
+EventPipeSequencePointBlock::EventPipeSequencePointBlock(EventPipeSequencePoint* pSequencePoint) :
+ EventPipeBlock(GetSequencePointBlockSize(pSequencePoint))
+{
+ const LARGE_INTEGER timeStamp = pSequencePoint->TimeStamp;
+ memcpy(m_pWritePointer, &timeStamp, sizeof(timeStamp));
+ m_pWritePointer += sizeof(timeStamp);
+
+ const unsigned int threadCount = pSequencePoint->ThreadSequenceNumbers.GetCount();
+ memcpy(m_pWritePointer, &threadCount, sizeof(threadCount));
+ m_pWritePointer += sizeof(threadCount);
+
+ for (ThreadSequenceNumberMap::Iterator pCur = pSequencePoint->ThreadSequenceNumbers.Begin();
+ pCur != pSequencePoint->ThreadSequenceNumbers.End();
+ pCur++)
+ {
+ const ULONGLONG threadId = pCur->Key()->GetThread()->GetOSThreadId();
+ memcpy(m_pWritePointer, &threadId, sizeof(threadId));
+ m_pWritePointer += sizeof(threadId);
+
+ const unsigned int sequenceNumber = pCur->Value();
+ memcpy(m_pWritePointer, &sequenceNumber, sizeof(sequenceNumber));
+ m_pWritePointer += sizeof(sequenceNumber);
+ }
+}
+
+EventPipeStackBlock::EventPipeStackBlock(unsigned int maxBlockSize) :
+ EventPipeBlock(maxBlockSize)
+{
+ Clear();
+}
+
+void EventPipeStackBlock::Clear()
+{
+ m_hasInitialIndex = false;
+ m_initialIndex = 0;
+ m_count = 0;
+ EventPipeBlock::Clear();
+}
+
+bool EventPipeStackBlock::WriteStack(DWORD stackId, StackContents* pStack)
{
CONTRACTL
{
if (m_pBlock == NULL)
{
- return;
+ return false;
}
- memset(m_pBlock, 0, GetSize());
- m_pWritePointer = m_pBlock;
+ unsigned int stackSize = pStack->GetSize();
+ unsigned int totalSize = sizeof(stackSize) + stackSize;
+ if (m_pWritePointer + totalSize >= m_pEndOfTheBuffer)
+ {
+ return false;
+ }
+
+ if (!m_hasInitialIndex)
+ {
+ m_hasInitialIndex = true;
+ m_initialIndex = stackId;
+ }
+ m_count++;
+
+ memcpy(m_pWritePointer, &stackSize, sizeof(stackSize));
+ m_pWritePointer += sizeof(stackSize);
+
+ if (stackSize > 0)
+ {
+ memcpy(m_pWritePointer, pStack->GetPointer(), stackSize);
+ m_pWritePointer += stackSize;
+ }
+
+ return true;
}
+
#endif // FEATURE_PERFTRACING
#include "fastserializableobject.h"
#include "fastserializer.h"
-class EventPipeBlock final : public FastSerializableObject
+struct EventPipeSequencePoint;
+
+// The base class for all file blocks in the Nettrace file format
+// This class handles memory management to buffer the block data,
+// bookkeeping, block version numbers, and serializing the data
+// to the file with correct alignment.
+// Sub-classes decide the format of the block contents and how
+// the blocks are named.
+class EventPipeBlock : public FastSerializableObject
{
public:
- EventPipeBlock(unsigned int maxBlockSize);
+ EventPipeBlock(unsigned int maxBlockSize, EventPipeSerializationFormat format = EventPipeSerializationFormat::NetTraceV4);
~EventPipeBlock();
- // Write an event to the block.
- // Returns:
- // - true: The write succeeded.
- // - false: The write failed. In this case, the block should be considered full.
- bool WriteEvent(EventPipeEventInstance &instance);
+ virtual void Clear();
+
+ unsigned int GetBytesWritten() const
+ {
+ return m_pBlock == nullptr ? 0 : (unsigned int)(m_pWritePointer - m_pBlock);
+ }
- void Clear();
+ // The size of the header for this block, if any
+ virtual unsigned int GetHeaderSize()
+ {
+ return 0;
+ }
- const char *GetTypeName() override
+ // Write the header to the stream
+ virtual void SerializeHeader(FastSerializer *pSerializer)
{
- LIMITED_METHOD_CONTRACT;
- return "EventBlock";
}
void FastSerialize(FastSerializer *pSerializer) override
if (m_pBlock == NULL)
return;
- unsigned int eventsSize = (unsigned int)(m_pWritePointer - m_pBlock);
- pSerializer->WriteBuffer((BYTE *)&eventsSize, sizeof(eventsSize));
-
- if (eventsSize == 0)
- return;
+ unsigned int dataSize = GetBytesWritten();
+ // We shouldn't attempt to write blocks that have no data
+ _ASSERTE(dataSize != 0);
+ unsigned int headerSize = GetHeaderSize();
+ unsigned int totalSize = dataSize + headerSize;
+ pSerializer->WriteBuffer((BYTE *)&totalSize, sizeof(totalSize));
unsigned int requiredPadding = pSerializer->GetRequiredPadding();
if (requiredPadding != 0)
_ASSERTE(pSerializer->HasWriteErrors() || (pSerializer->GetRequiredPadding() == 0));
}
- pSerializer->WriteBuffer(m_pBlock, eventsSize);
+ SerializeHeader(pSerializer);
+ pSerializer->WriteBuffer(m_pBlock, dataSize);
}
-private:
+protected:
BYTE *m_pBlock;
BYTE *m_pWritePointer;
BYTE *m_pEndOfTheBuffer;
+ EventPipeSerializationFormat m_format;
unsigned int GetSize() const
{
}
};
+struct EventPipeEventHeader
+{
+ DWORD MetadataId;
+ DWORD SequenceNumber;
+ ULONGLONG ThreadId;
+ ULONGLONG CaptureThreadId;
+ DWORD StackId;
+ LARGE_INTEGER TimeStamp;
+ GUID ActivityId;
+ GUID RelatedActivityId;
+ DWORD DataLength;
+};
+
+// The base type for blocks that contain events (EventBlock and EventMetadataBlock)
+class EventPipeEventBlockBase : public EventPipeBlock
+{
+public:
+ EventPipeEventBlockBase(unsigned int maxBlockSize, EventPipeSerializationFormat format, bool fUseHeaderCompression = true);
+
+ void Clear() override;
+
+ unsigned int GetHeaderSize() override
+ {
+ if(m_format == EventPipeSerializationFormat::NetPerfV3)
+ {
+ return 0;
+ }
+ else
+ {
+ return sizeof(unsigned short) + // header size
+ sizeof(unsigned short) + // flags
+ sizeof(LARGE_INTEGER) + // min timestamp
+ sizeof(LARGE_INTEGER); // max timestamp
+ }
+ }
+
+ void SerializeHeader(FastSerializer* pSerializer) override
+ {
+ if(m_format == EventPipeSerializationFormat::NetPerfV3)
+ {
+ return;
+ }
+ else
+ {
+ const unsigned short headerSize = GetHeaderSize();
+ pSerializer->WriteBuffer((BYTE *)&headerSize, sizeof(headerSize));
+ const unsigned short flags = m_fUseHeaderCompression ? 1 : 0;
+ pSerializer->WriteBuffer((BYTE *)&flags, sizeof(flags));
+ pSerializer->WriteBuffer((BYTE *)&m_minTimeStamp, sizeof(m_minTimeStamp));
+ pSerializer->WriteBuffer((BYTE *)&m_maxTimeStamp, sizeof(m_maxTimeStamp));
+ }
+ }
+
+ // Write an event to the block.
+ // Returns:
+ // - true: The write succeeded.
+ // - false: The write failed. In this case, the block should be considered full.
+ bool WriteEvent(EventPipeEventInstance &instance, ULONGLONG captureThreadId, unsigned int sequenceNumber, DWORD stackId, BOOL isSortedEvent);
+
+private:
+ EventPipeEventHeader m_lastHeader;
+ BYTE m_compressedHeader[100];
+ bool m_fUseHeaderCompression;
+ LARGE_INTEGER m_minTimeStamp;
+ LARGE_INTEGER m_maxTimeStamp;
+};
+
+class EventPipeEventBlock : public EventPipeEventBlockBase
+{
+public:
+ EventPipeEventBlock(unsigned int maxBlockSize, EventPipeSerializationFormat format);
+
+ const char *GetTypeName() override
+ {
+ LIMITED_METHOD_CONTRACT;
+ return "EventBlock";
+ }
+};
+
+class EventPipeMetadataBlock : public EventPipeEventBlockBase
+{
+public:
+ EventPipeMetadataBlock(unsigned int maxBlockSize);
+
+ const char *GetTypeName() override
+ {
+ LIMITED_METHOD_CONTRACT;
+ return "MetadataBlock";
+ }
+};
+
+class EventPipeSequencePointBlock : public EventPipeBlock
+{
+public:
+ EventPipeSequencePointBlock(EventPipeSequencePoint* sequencePoint);
+
+ const char *GetTypeName() override
+ {
+ LIMITED_METHOD_CONTRACT;
+ return "SPBlock";
+ }
+};
+
+// The block that contains interned stacks
+class EventPipeStackBlock : public EventPipeBlock
+{
+public:
+ EventPipeStackBlock(unsigned int maxBlockSize);
+
+ unsigned int GetHeaderSize() override
+ {
+ return sizeof(unsigned int) + // start index
+ sizeof(unsigned int); // count of indices
+ }
+
+ void SerializeHeader(FastSerializer* pSerializer) override
+ {
+ pSerializer->WriteBuffer((BYTE *)&m_initialIndex, sizeof(m_initialIndex));
+ pSerializer->WriteBuffer((BYTE *)&m_count, sizeof(m_count));
+ }
+
+ void Clear() override;
+
+ // Write a stack to the block
+ // Returns:
+ // - true: The write succeeded.
+ // - false: The write failed. In this case, the block should be considered full.
+ bool WriteStack(DWORD stackId, StackContents* pStack);
+
+ const char *GetTypeName() override
+ {
+ LIMITED_METHOD_CONTRACT;
+ return "StackBlock";
+ }
+
+private:
+ bool m_hasInitialIndex;
+ unsigned int m_initialIndex;
+ unsigned int m_count;
+};
+
#endif // FEATURE_PERFTRACING
#endif // __EVENTPIPE_BLOCK_H__
#ifdef FEATURE_PERFTRACING
-EventPipeBuffer::EventPipeBuffer(unsigned int bufferSize DEBUG_ARG(EventPipeThread *pWriterThread))
+EventPipeBuffer::EventPipeBuffer(unsigned int bufferSize, EventPipeThread* pWriterThread, unsigned int eventSequenceNumber)
{
CONTRACTL
{
}
CONTRACTL_END;
m_state = EventPipeBufferState::WRITABLE;
-#ifdef DEBUG
m_pWriterThread = pWriterThread;
-#endif
+ m_eventSequenceNumber = eventSequenceNumber;
m_pBuffer = new BYTE[bufferSize];
memset(m_pBuffer, 0, bufferSize);
m_pLimit = m_pBuffer + bufferSize;
QueryPerformanceCounter(&m_creationTimeStamp);
_ASSERTE(m_creationTimeStamp.QuadPart > 0);
- m_pLastPoppedEvent = NULL;
+ m_pCurrentReadEvent = NULL;
m_pPrevBuffer = NULL;
m_pNextBuffer = NULL;
}
EventPipeEventInstance *pInstance = new (m_pCurrent) EventPipeEventInstance(
event,
- (pThread == NULL) ? ::GetCurrentThreadId() : pThread->GetOSThreadId(),
+ (pThread == NULL) ?
+#ifdef FEATURE_PAL
+ ::PAL_GetCurrentOSThreadId()
+#else
+ ::GetCurrentThreadId()
+#endif
+ : pThread->GetOSThreadId64(),
pDataDest,
payload.GetSize(),
(pThread == NULL) ? NULL : pActivityId,
return m_creationTimeStamp;
}
-EventPipeEventInstance *EventPipeBuffer::GetNext(EventPipeEventInstance *pEvent, LARGE_INTEGER beforeTimeStamp)
+void EventPipeBuffer::MoveNextReadEvent()
{
CONTRACTL
{
CONTRACTL_END;
EventPipeEventInstance *pNextInstance = NULL;
- // If input is NULL, return the first event if there is one.
- if (pEvent == NULL)
- {
- // If this buffer contains an event, select it.
- BYTE *pFirstAlignedInstance = GetNextAlignedAddress(m_pBuffer);
- if (m_pCurrent > pFirstAlignedInstance)
- {
- pNextInstance = (EventPipeEventInstance *)pFirstAlignedInstance;
- }
- else
- {
- return NULL;
- }
- }
- else
+
+ // If m_pCurrentReadEvent is NULL we've reached the end of the events
+ if (m_pCurrentReadEvent != NULL)
{
// Confirm that pEvent is within the used range of the buffer.
- if (((BYTE *)pEvent < m_pBuffer) || ((BYTE *)pEvent >= m_pCurrent))
+ if (((BYTE*)m_pCurrentReadEvent < m_pBuffer) || ((BYTE*)m_pCurrentReadEvent >= m_pCurrent))
{
_ASSERT(!"Input pointer is out of range.");
- return NULL;
- }
-
- if (pEvent->GetData())
- {
- // We have a pointer within the bounds of the buffer.
- // Find the next event by skipping the current event with it's data payload immediately after the instance.
- pNextInstance = (EventPipeEventInstance *)GetNextAlignedAddress(const_cast<BYTE *>(pEvent->GetData() + pEvent->GetDataLength()));
+ m_pCurrentReadEvent = NULL;
}
else
{
- // In case we do not have a payload, the next instance is right after the current instance
- pNextInstance = (EventPipeEventInstance *)GetNextAlignedAddress((BYTE *)(pEvent + 1));
- }
-
- // Check to see if we've reached the end of the written portion of the buffer.
- if ((BYTE *)pNextInstance >= m_pCurrent)
- {
- return NULL;
+ if (m_pCurrentReadEvent->GetData())
+ {
+ // We have a pointer within the bounds of the buffer.
+ // Find the next event by skipping the current event with it's data payload immediately after the instance.
+ m_pCurrentReadEvent = (EventPipeEventInstance *)GetNextAlignedAddress(const_cast<BYTE *>(m_pCurrentReadEvent->GetData() + m_pCurrentReadEvent->GetDataLength()));
+ }
+ else
+ {
+ // In case we do not have a payload, the next instance is right after the current instance
+ m_pCurrentReadEvent = (EventPipeEventInstance*)GetNextAlignedAddress((BYTE*)(m_pCurrentReadEvent + 1));
+ }
+ // this may roll over and that is fine
+ m_eventSequenceNumber++;
+
+ // Check to see if we've reached the end of the written portion of the buffer.
+ if ((BYTE*)m_pCurrentReadEvent >= m_pCurrent)
+ {
+ m_pCurrentReadEvent = NULL;
+ }
}
}
// Ensure that the timestamp is valid. The buffer is zero'd before use, so a zero timestamp is invalid.
- LARGE_INTEGER nextTimeStamp = *pNextInstance->GetTimeStamp();
- if (nextTimeStamp.QuadPart == 0)
- {
- return NULL;
- }
-
- // Ensure that the timestamp is earlier than the beforeTimeStamp.
- if (nextTimeStamp.QuadPart >= beforeTimeStamp.QuadPart)
+#ifdef DEBUG
+ if (m_pCurrentReadEvent != NULL)
{
- return NULL;
+ LARGE_INTEGER nextTimeStamp = *m_pCurrentReadEvent->GetTimeStamp();
+ _ASSERTE(nextTimeStamp.QuadPart != 0);
}
-
- return pNextInstance;
+#endif
}
-EventPipeEventInstance *EventPipeBuffer::PeekNext(LARGE_INTEGER beforeTimeStamp)
+EventPipeEventInstance* EventPipeBuffer::GetCurrentReadEvent()
{
CONTRACTL
{
}
CONTRACTL_END;
- // Get the next event using the last popped event as a marker.
- return GetNext(m_pLastPoppedEvent, beforeTimeStamp);
+ return m_pCurrentReadEvent;
}
-void EventPipeBuffer::PopNext(EventPipeEventInstance *pNext)
+unsigned int EventPipeBuffer::GetCurrentSequenceNumber()
{
CONTRACTL
{
}
CONTRACTL_END;
- if (pNext != NULL)
- m_pLastPoppedEvent = pNext;
+ return m_eventSequenceNumber;
+}
+
+EventPipeThread* EventPipeBuffer::GetWriterThread()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pWriterThread;
}
EventPipeBufferState EventPipeBuffer::GetVolatileState()
{
LIMITED_METHOD_CONTRACT;
_ASSERTE(m_pWriterThread->GetLock()->OwnedByCurrentThread());
+ _ASSERTE(m_pCurrentReadEvent == NULL);
m_state.Store(EventPipeBufferState::READ_ONLY);
+
+ // If this buffer contains an event, select it.
+ BYTE *pFirstAlignedInstance = GetNextAlignedAddress(m_pBuffer);
+ if (m_pCurrent > pFirstAlignedInstance)
+ {
+ m_pCurrentReadEvent = (EventPipeEventInstance*)pFirstAlignedInstance;
+ }
+ else
+ {
+ m_pCurrentReadEvent = NULL;
+ }
}
#ifdef _DEBUG
Volatile<EventPipeBufferState> m_state;
// Thread that is/was allowed to write into this buffer when m_state == WRITABLE
-#ifdef DEBUG
EventPipeThread* m_pWriterThread;
-#endif
+
+ // The sequence number corresponding to m_pCurrentReadEvent
+ // Prior to read iteration it is the sequence number of the first event in the buffer
+ unsigned int m_eventSequenceNumber;
// A pointer to the actual buffer.
BYTE *m_pBuffer;
// timestamp >= this one. If not then all bets are off.
LARGE_INTEGER m_creationTimeStamp;
- // Used by PopNext as input to GetNext.
- // If NULL, no events have been popped.
- // The event will still remain in the buffer after it is popped, but PopNext will not return it again.
- EventPipeEventInstance *m_pLastPoppedEvent;
+ // Pointer to the current event being read
+ EventPipeEventInstance *m_pCurrentReadEvent;
// Each buffer will become part of a per-thread linked list of buffers.
// The linked list is invasive, thus we declare the pointers here.
public:
- EventPipeBuffer(unsigned int bufferSize DEBUG_ARG(EventPipeThread* pWriterThread));
+ EventPipeBuffer(unsigned int bufferSize, EventPipeThread* pWriterThread, unsigned int eventSequenceNumber);
~EventPipeBuffer();
// Write an event to the buffer.
// Get the timestamp the buffer was created.
LARGE_INTEGER GetCreationTimeStamp() const;
- // Get the next event from the buffer as long as it is before the specified timestamp.
- // Input of NULL gets the first event.
- EventPipeEventInstance* GetNext(EventPipeEventInstance *pEvent, LARGE_INTEGER beforeTimeStamp);
+ // Advances read cursor to the next event or NULL if there aren't any more. When the
+ // buffer is first made readable the cursor is automatically positioned on the first
+ // event or NULL if there are no events in the buffer.
+ void MoveNextReadEvent();
+
+ // Returns the event at the current read cursor. The returned event pointer is valid
+ // until the buffer is deleted.
+ EventPipeEventInstance* GetCurrentReadEvent();
- // Get the next event from the buffer
- EventPipeEventInstance* PeekNext(LARGE_INTEGER beforeTimeStamp);
+ // Gets the sequence number of the event corresponding to GetCurrentReadEvent();
+ unsigned int GetCurrentSequenceNumber();
- // Advance the buffer to the next event
- // pEvent is expected to be the last event returned from PeekNext()
- void PopNext(EventPipeEventInstance *pEvent);
+ // Get the thread that is (or was) assigned to write to this buffer
+ EventPipeThread* GetWriterThread();
// Check the state of the buffer
EventPipeBufferState GetVolatileState();
#include "eventpipebuffer.h"
#include "eventpipebuffermanager.h"
#include "eventpipefile.h"
+#include "eventpipethread.h"
#include "eventpipesession.h"
-#ifdef FEATURE_PERFTRACING
-
-void ReleaseEventPipeThreadRef(EventPipeThread *pThread)
-{
- LIMITED_METHOD_CONTRACT;
- pThread->Release();
-}
-
-void AcquireEventPipeThreadRef(EventPipeThread *pThread)
-{
- LIMITED_METHOD_CONTRACT;
- pThread->AddRef();
-}
-
-EVENTPIPE_THREAD_LOCAL EventPipeThreadHolder EventPipeThread::gCurrentEventPipeThreadHolder;
-
-EventPipeThread::EventPipeThread() : m_writingEventInProgress(UINT64_MAX)
-{
- CONTRACTL
- {
- THROWS;
- GC_NOTRIGGER;
- MODE_ANY;
- }
- CONTRACTL_END;
-
- m_lock.Init(LOCK_TYPE_DEFAULT);
- m_refCount = 0;
-
- m_pWriteBuffers = new EventPipeWriteBuffers();
- m_pBufferLists = new EventPipeBufferLists();
-}
-
-EventPipeThread::~EventPipeThread()
-{
- LIMITED_METHOD_CONTRACT;
- _ASSERTE(AllValuesAreNull(*m_pWriteBuffers));
- _ASSERTE(AllValuesAreNull(*m_pBufferLists));
-
- delete m_pWriteBuffers;
- delete m_pBufferLists;
-}
-
-/*static */ EventPipeThread *EventPipeThread::Get()
-{
- LIMITED_METHOD_CONTRACT;
- return gCurrentEventPipeThreadHolder;
-}
-
-/*static */ EventPipeThread *EventPipeThread::GetOrCreate()
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_ANY;
- }
- CONTRACTL_END;
-
- if (gCurrentEventPipeThreadHolder == nullptr)
- {
- EX_TRY
- {
- gCurrentEventPipeThreadHolder = new EventPipeThread();
- }
- EX_CATCH
- {
- }
- EX_END_CATCH(SwallowAllExceptions);
- }
- return gCurrentEventPipeThreadHolder;
-}
-
-/*static */ void EventPipeThread::Set(EventPipeThread *pThread)
-{
- LIMITED_METHOD_CONTRACT;
- gCurrentEventPipeThreadHolder = pThread;
-}
-
-void EventPipeThread::AddRef()
-{
- LIMITED_METHOD_CONTRACT;
- FastInterlockIncrement(&m_refCount);
-}
-
-void EventPipeThread::Release()
-{
- LIMITED_METHOD_CONTRACT;
- if (FastInterlockDecrement(&m_refCount) == 0)
- {
- // https://isocpp.org/wiki/faq/freestore-mgmt#delete-this
- // As long as you're careful, it's okay (not evil) for an object to commit suicide (delete this).
- delete this;
- }
-}
-
-SpinLock *EventPipeThread::GetLock()
-{
- LIMITED_METHOD_CONTRACT;
- return &m_lock;
-}
-
-EventPipeBuffer *EventPipeThread::GetWriteBuffer(EventPipeBufferManager *pBufferManager)
-{
- LIMITED_METHOD_CONTRACT;
- _ASSERTE(m_lock.OwnedByCurrentThread());
- _ASSERTE(pBufferManager != nullptr);
-
- EventPipeBuffer *pWriteBuffer = nullptr;
- m_pWriteBuffers->Lookup(pBufferManager, &pWriteBuffer);
- _ASSERTE((pWriteBuffer == nullptr) || (pWriteBuffer->GetVolatileState() == EventPipeBufferState::WRITABLE));
- return pWriteBuffer;
-}
-
-void EventPipeThread::SetWriteBuffer(EventPipeBufferManager *pBufferManager, EventPipeBuffer *pNewBuffer)
-{
- LIMITED_METHOD_CONTRACT;
- _ASSERTE(m_lock.OwnedByCurrentThread());
- _ASSERTE(pBufferManager != nullptr);
- _ASSERTE((pNewBuffer == nullptr) || pNewBuffer->GetVolatileState() == EventPipeBufferState::WRITABLE);
- EventPipeBuffer *pWriteBuffer = nullptr;
- if (m_pWriteBuffers->Lookup(pBufferManager, &pWriteBuffer))
- {
- _ASSERTE((pWriteBuffer == nullptr) || (pWriteBuffer->GetVolatileState() == EventPipeBufferState::WRITABLE));
- if (pWriteBuffer != nullptr)
- pWriteBuffer->ConvertToReadOnly();
- m_pWriteBuffers->Remove(pBufferManager);
- }
-
- EX_TRY
- {
- m_pWriteBuffers->Add(pBufferManager, pNewBuffer);
- }
- EX_CATCH
- {
- }
- EX_END_CATCH(SwallowAllExceptions);
-}
-
-EventPipeBufferList *EventPipeThread::GetBufferList(EventPipeBufferManager *pBufferManager)
-{
- LIMITED_METHOD_CONTRACT;
- _ASSERTE(pBufferManager != nullptr);
- _ASSERTE(pBufferManager->IsLockOwnedByCurrentThread());
-
- EventPipeBufferList *pBufferList = nullptr;
- m_pBufferLists->Lookup(pBufferManager, &pBufferList);
- return pBufferList;
-}
+#ifdef FEATURE_PERFTRACING
-void EventPipeThread::SetBufferList(EventPipeBufferManager *pBufferManager, EventPipeBufferList *pNewBufferList)
+template <typename T>
+T Clamp(T min, T value, T max)
{
- LIMITED_METHOD_CONTRACT;
- _ASSERTE(pBufferManager != nullptr);
- _ASSERTE(pBufferManager->IsLockOwnedByCurrentThread());
-
- EventPipeBufferList *pBufferList = nullptr;
- if (m_pBufferLists->Lookup(pBufferManager, &pBufferList))
- m_pBufferLists->Remove(pBufferManager);
-
- EX_TRY
- {
- m_pBufferLists->Add(pBufferManager, pNewBufferList);
- }
- EX_CATCH
- {
- }
- EX_END_CATCH(SwallowAllExceptions);
+ STATIC_CONTRACT_LEAF;
+ return Min(Max(min, value), max);
}
-void EventPipeThread::Remove(EventPipeBufferManager *pBufferManager)
+EventPipeBufferManager::EventPipeBufferManager(EventPipeSession* pSession, size_t maxSizeOfAllBuffers, size_t sequencePointAllocationBudget)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
- PRECONDITION(pBufferManager != nullptr);
}
CONTRACTL_END;
- if (pBufferManager == nullptr)
- return;
-
- EventPipeBufferList *pBufferList = nullptr;
- if (m_pBufferLists->Lookup(pBufferManager, &pBufferList))
- m_pBufferLists->Remove(pBufferManager);
-}
-
-EventPipeBufferManager::EventPipeBufferManager()
-{
- CONTRACTL
- {
- THROWS;
- GC_NOTRIGGER;
- MODE_ANY;
- }
- CONTRACTL_END;
-
- m_pPerThreadBufferList = new SList<SListElem<EventPipeBufferList *>>();
+ m_pSession = pSession;
+ m_pThreadSessionStateList = new SList<SListElem<EventPipeThreadSessionState *>>();
m_sizeOfAllBuffers = 0;
m_lock.Init(LOCK_TYPE_DEFAULT);
m_writeEventSuspending = FALSE;
m_numEventsDropped = 0;
m_numEventsWritten = 0;
#endif // _DEBUG
+
+ m_pCurrentEvent = nullptr;
+ m_pCurrentBuffer = nullptr;
+ m_pCurrentBufferList = nullptr;
+
+ m_maxSizeOfAllBuffers = Clamp((size_t)100 * 1024, maxSizeOfAllBuffers, (size_t)ULONG_MAX);
+
+ if (sequencePointAllocationBudget == 0)
+ {
+ // sequence points disabled
+ m_sequencePointAllocationBudget = 0;
+ m_remainingSequencePointAllocationBudget = 0;
+ }
+ else
+ {
+ m_sequencePointAllocationBudget = Clamp((size_t)1024 * 1024, sequencePointAllocationBudget, (size_t)1024 * 1024 * 1024);
+ m_remainingSequencePointAllocationBudget = m_sequencePointAllocationBudget;
+ }
+ m_sequencePoints.Init();
}
EventPipeBufferManager::~EventPipeBufferManager()
}
#endif
-EventPipeBuffer *EventPipeBufferManager::AllocateBufferForThread(EventPipeSession &session, unsigned int requestSize, BOOL &writeSuspended)
+EventPipeBuffer* EventPipeBufferManager::AllocateBufferForThread(EventPipeThreadSessionState* pSessionState,
+ unsigned int requestSize,
+ BOOL & writeSuspended)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
+ PRECONDITION(pSessionState != NULL);
PRECONDITION(requestSize > 0);
}
CONTRACTL_END;
return NULL;
}
- // Determine if the requesting thread has at least one buffer.
- // If not, we guarantee that each thread gets at least one (to prevent thrashing when the circular buffer size is too small).
bool allocateNewBuffer = false;
- EventPipeThread *const pEventPipeThread = EventPipeThread::GetOrCreate();
- if (pEventPipeThread == NULL)
- return NULL;
-
- EventPipeBufferList *pThreadBufferList = pEventPipeThread->GetBufferList(this);
+ EventPipeBufferList *pThreadBufferList = pSessionState->GetBufferList();
if (pThreadBufferList == NULL)
{
- pThreadBufferList = new (nothrow) EventPipeBufferList(this, pEventPipeThread);
-
+ pThreadBufferList = new (nothrow) EventPipeBufferList(this, pSessionState->GetThread());
if (pThreadBufferList == NULL)
{
return NULL;
}
- SListElem<EventPipeBufferList *> *pElem = new (nothrow) SListElem<EventPipeBufferList *>(pThreadBufferList);
+ SListElem<EventPipeThreadSessionState *> *pElem = new (nothrow) SListElem<EventPipeThreadSessionState *>(pSessionState);
if (pElem == NULL)
{
+ delete pThreadBufferList;
return NULL;
}
- m_pPerThreadBufferList->InsertTail(pElem);
- pEventPipeThread->SetBufferList(this, pThreadBufferList);
- allocateNewBuffer = true;
+ m_pThreadSessionStateList->InsertTail(pElem);
+ pSessionState->SetBufferList(pThreadBufferList);
}
// Determine if policy allows us to allocate another buffer
- if (!allocateNewBuffer)
+ size_t availableBufferSize = m_maxSizeOfAllBuffers - m_sizeOfAllBuffers;
+ if (requestSize <= availableBufferSize)
{
- if (m_sizeOfAllBuffers < session.GetCircularBufferSize())
- {
- // We don't worry about the fact that a new buffer could put us over the circular buffer size.
- // This is OK, and we won't do it again if we actually go over.
- allocateNewBuffer = true;
- }
+ allocateNewBuffer = true;
}
+
EventPipeBuffer *pNewBuffer = NULL;
if (allocateNewBuffer)
{
// Make sure that buffer size >= request size so that the buffer size does not
// determine the max event size.
- if (bufferSize < requestSize)
- {
- bufferSize = requestSize;
- }
-
+ _ASSERTE(requestSize <= availableBufferSize);
+ bufferSize = Max(requestSize, bufferSize);
+ bufferSize = Min((unsigned int)bufferSize, (unsigned int)availableBufferSize);
+
// Don't allow the buffer size to exceed 1MB.
const unsigned int maxBufferSize = 1024 * 1024;
- if (bufferSize > maxBufferSize)
- {
- bufferSize = maxBufferSize;
- }
+ bufferSize = Min(bufferSize, maxBufferSize);
// EX_TRY is used here as opposed to new (nothrow) because
// the constructor also allocates a private buffer, which
// could throw, and cannot be easily checked
EX_TRY
{
- pNewBuffer = new EventPipeBuffer(bufferSize DEBUG_ARG(pEventPipeThread));
+ // The sequence counter is exclusively mutated on this thread so this is a thread-local
+ // read.
+ unsigned int sequenceNumber = pSessionState->GetVolatileSequenceNumber();
+ pNewBuffer = new EventPipeBuffer(bufferSize, pSessionState->GetThread(), sequenceNumber);
}
EX_CATCH
{
}
m_sizeOfAllBuffers += bufferSize;
+ if (m_sequencePointAllocationBudget != 0)
+ {
+ // sequence point bookkeeping
+ if (bufferSize >= m_remainingSequencePointAllocationBudget)
+ {
+ EventPipeSequencePoint* pSequencePoint = new (nothrow) EventPipeSequencePoint();
+ if (pSequencePoint != NULL)
+ {
+ InitSequencePointThreadListHaveLock(pSequencePoint);
+ EnqueueSequencePoint(pSequencePoint);
+ }
+ m_remainingSequencePointAllocationBudget = m_sequencePointAllocationBudget;
+ }
+ else
+ {
+ m_remainingSequencePointAllocationBudget -= bufferSize;
+ }
+ }
#ifdef _DEBUG
m_numBuffersAllocated++;
#endif // _DEBUG
return NULL;
}
+void EventPipeBufferManager::EnqueueSequencePoint(EventPipeSequencePoint* pSequencePoint)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(m_lock.OwnedByCurrentThread());
+ }
+ CONTRACTL_END;
+
+ m_sequencePoints.InsertTail(pSequencePoint);
+}
+
+void EventPipeBufferManager::InitSequencePointThreadList(EventPipeSequencePoint* pSequencePoint)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(!IsLockOwnedByCurrentThread());
+ }
+ CONTRACTL_END;
+
+ SpinLockHolder __slh(&m_lock);
+ InitSequencePointThreadListHaveLock(pSequencePoint);
+}
+
+void EventPipeBufferManager::InitSequencePointThreadListHaveLock(EventPipeSequencePoint* pSequencePoint)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(IsLockOwnedByCurrentThread());
+ }
+ CONTRACTL_END;
+
+ SListElem<EventPipeThreadSessionState*> *pElem = m_pThreadSessionStateList->GetHead();
+ while (pElem != NULL)
+ {
+ EventPipeThreadSessionState* pSessionState = pElem->GetValue();
+
+ // The sequence number captured here is not guaranteed to be the most recent sequence number, nor
+ // is it guaranteed to match the number of events we would observe in the thread's write buffer
+ // memory. This is only used as a lower bound on the number of events the thread has attempted to
+ // write at the timestamp we will capture below.
+ //
+ // The sequence number is the value that will be used by the next event, so the last written
+ // event is one less. Sequence numbers are allowed to overflow, so going backwards is allowed to
+ // underflow.
+ unsigned int sequenceNumber = pSessionState->GetVolatileSequenceNumber() - 1;
+ EX_TRY
+ {
+ pSequencePoint->ThreadSequenceNumbers.Add(pSessionState, sequenceNumber);
+ pSessionState->GetThread()->AddRef();
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ pElem = m_pThreadSessionStateList->GetNext(pElem);
+ }
+
+ // This needs to come after querying the thread sequence numbers to ensure that any recorded
+ // sequence number is <= the actual sequence number at this timestamp
+ PRECONDITION(m_lock.OwnedByCurrentThread());
+ QueryPerformanceCounter(&pSequencePoint->TimeStamp);
+}
+
+void EventPipeBufferManager::DequeueSequencePoint()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(m_lock.OwnedByCurrentThread());
+ }
+ CONTRACTL_END;
+
+ delete m_sequencePoints.RemoveHead();
+}
+
+bool EventPipeBufferManager::TryPeekSequencePoint(EventPipeSequencePoint** ppSequencePoint)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(m_lock.OwnedByCurrentThread());
+ }
+ CONTRACTL_END;
+
+ *ppSequencePoint = m_sequencePoints.GetHead();
+ return *ppSequencePoint != NULL;
+}
+
void EventPipeBufferManager::DeAllocateBuffer(EventPipeBuffer *pBuffer)
{
CONTRACTL
if (pEventPipeThread == NULL)
{
- allocNewBuffer = true;
+ return false;
}
- else
+
+ EventPipeThreadSessionState* pSessionState = NULL;
{
SpinLockHolder _slh(pEventPipeThread->GetLock());
- pBuffer = pEventPipeThread->GetWriteBuffer(this);
-
+ if (m_writeEventSuspending.LoadWithoutBarrier())
+ {
+ // This session is suspending, we need to avoid initializing any session state and exit
+ return false;
+ }
+ pSessionState = pEventPipeThread->GetOrCreateSessionState(m_pSession);
+ if (pSessionState == NULL)
+ {
+ return false;
+ }
+ pBuffer = pSessionState->GetWriteBuffer();
if (pBuffer == NULL)
{
allocNewBuffer = true;
else
{
// Attempt to write the event to the buffer. If this fails, we should allocate a new buffer.
- allocNewBuffer = !pBuffer->WriteEvent(pEventThread, session, event, payload, pActivityId, pRelatedActivityId, pStack);
+ if (pBuffer->WriteEvent(pEventThread, session, event, payload, pActivityId, pRelatedActivityId, pStack))
+ {
+ pSessionState->IncrementSequenceNumber();
+ }
+ else
+ {
+ allocNewBuffer = true;
+ }
}
}
unsigned int requestSize = sizeof(EventPipeEventInstance) + payload.GetSize();
BOOL writeSuspended = FALSE;
- pBuffer = AllocateBufferForThread(session, requestSize, writeSuspended);
+ pBuffer = AllocateBufferForThread(pSessionState, requestSize, writeSuspended);
if (pBuffer == NULL)
{
// We treat this as the WriteEvent() call occurring after this session stopped listening for events, effectively the
// same as if event.IsEnabled() test above returned false.
if (writeSuspended)
return false;
+
+ // This lock looks unnecessary for the sequence number, but didn't want to
+ // do a broader refactoring to take it out. If it shows up as a perf
+ // problem then we should.
+ SpinLockHolder _slh(pEventPipeThread->GetLock());
+ pSessionState->IncrementSequenceNumber();
}
else
{
_ASSERTE(pEventPipeThread != NULL);
{
SpinLockHolder _slh(pEventPipeThread->GetLock());
- if (m_writeEventSuspending.Load())
+ if (m_writeEventSuspending.LoadWithoutBarrier())
{
// After leaving the manager's lock in AllocateBufferForThread some other thread decided to suspend writes.
// We need to immediately return the buffer we just took without storing it or writing to it.
}
else
{
- pEventPipeThread->SetWriteBuffer(this, pBuffer);
+ pSessionState->SetWriteBuffer(pBuffer);
// Try to write the event after we allocated a buffer.
// This is the first time if the thread had no buffers before the call to this function.
// This is the second time if this thread did have one or more buffers, but they were full.
allocNewBuffer = !pBuffer->WriteEvent(pEventThread, session, event, payload, pActivityId, pRelatedActivityId, pStack);
+ _ASSERTE(!allocNewBuffer);
+ pSessionState->IncrementSequenceNumber();
}
}
}
GC_TRIGGERS;
MODE_PREEMPTIVE;
PRECONDITION(pFile != nullptr);
+ PRECONDITION(GetCurrentEvent() == nullptr);
}
CONTRACTL_END;
- // TODO: Better version of merge sort.
- // 1. Iterate through all of the threads, adding each buffer to a temporary list.
- // 2. While iterating, get the lowest most recent timestamp. This is the timestamp that we want to process up to.
- // 3. Process up to the lowest most recent timestamp for the set of buffers.
- // 4. When we get NULLs from each of the buffers on PopNext(), we're done.
- // 5. While iterating if PopNext() == NULL && Empty() == NULL, remove the buffer from the list. It's empty.
- // 6. While iterating, grab the next lowest most recent timestamp.
- // 7. Walk through the list again and look for any buffers that have a lower most recent timestamp than the next most recent timestamp.
- // 8. If we find one, add it to the list and select its most recent timestamp as the lowest.
- // 9. Process again (go to 3).
- // 10. Continue until there are no more buffers to process.
+ // The V4 format doesn't require full event sorting as V3 did
+ // See the comments in WriteAllBufferToFileV4 for more details
+ if (pFile->GetSerializationFormat() >= EventPipeSerializationFormat::NetTraceV4)
+ {
+ WriteAllBuffersToFileV4(pFile, stopTimeStamp);
+ }
+ else
+ {
+ WriteAllBuffersToFileV3(pFile, stopTimeStamp);
+ }
+}
+
+void EventPipeBufferManager::WriteAllBuffersToFileV3(EventPipeFile *pFile, LARGE_INTEGER stopTimeStamp)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ PRECONDITION(pFile != nullptr);
+ PRECONDITION(GetCurrentEvent() == nullptr);
+ }
+ CONTRACTL_END;
// Naively walk the circular buffer, writing the event stream in timestamp order.
- m_numEventsWritten = 0;
- while (true)
+ MoveNextEventAnyThread(stopTimeStamp);
+ while (GetCurrentEvent() != nullptr)
+ {
+ pFile->WriteEvent(*GetCurrentEvent(), /*CaptureThreadId=*/0, /*sequenceNumber=*/0, /*IsSorted=*/TRUE);
+ MoveNextEventAnyThread(stopTimeStamp);
+ }
+ pFile->Flush();
+}
+
+void EventPipeBufferManager::WriteAllBuffersToFileV4(EventPipeFile *pFile, LARGE_INTEGER stopTimeStamp)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ PRECONDITION(pFile != nullptr);
+ PRECONDITION(GetCurrentEvent() == nullptr);
+ }
+ CONTRACTL_END;
+
+ //
+ // In V3 of the format this code does a full timestamp order sort on the events which made the file easier to consume,
+ // but the perf implications for emitting the file are less desirable. Imagine an application with 500 threads emitting
+ // 10 events per sec per thread (granted this is a questionable number of threads to use in an app, but that isn't
+ // under our control). A nieve sort of 500 ordered lists is going to pull the oldest event from each of 500 lists,
+ // compare all the timestamps, then emit the oldest one. This could easily add a thousand CPU cycles per-event. A
+ // better implementation could maintain a min-heap so that we scale O(log(N)) instead of O(N)but fundamentally sorting
+ // has a cost and we didn't want a file format that forces the runtime to pay it on every event.
+ //
+ // We minimize sorting using two mechanisms:
+ // 1) Explicit sequence points - Every X MB of buffer space that is distributed to threads we record the current
+ // timestamp. We ensure when writing events in the file that all events before the sequence point time are written
+ // prior to the sequence point and all events with later timestamps are written afterwards. For example assume
+ // two threads emitted events like this(B_14 = event on thread B with timestamp 14):
+ //
+ // Time --->
+ // Thread A events: A_1 A_4 A_9 A_10 A_11 A_12 A_13 A_15
+ // Thread B events: B_2 B_6 B_14 B_20
+ // /|\
+ // |
+ // Assume sequence point was triggered here
+ // Then we promise that events A_1, A_4, A_9, A_10, B_2_ and B_6 will be written in one or more event blocks,
+ // (not necessarily in sorted order) then a sequence point block is written, then events A_11, A_12, A_13, B_14,
+ // A_15, and B_20 will be written. The reader can cache all the events between sequence points, sort them, and
+ // then emit them in a total order. Triggering sequence points based on buffer allocation ensures that we won't
+ // need an arbitrarily large cache in the reader to store all the events, however there is a fair amount of slop
+ // in the current scheme. In the worst case you could imagine N threads, each of which was already allocated a
+ // max size buffer (currently 1MB) but only an insignificant portion has been used. Even if the trigger
+ // threshhold is a modest amount such as 10MB, the threads could first write 1MB * N bytes to the stream
+ // beforehand. I'm betting on these extreme cases being very rare and even something like 1GB isn't a crazy
+ // amount of virtual memory to use on to parse an extreme trace. However if I am wrong we can control
+ // both the allocation policy and the triggering instrumentation. Nothing requires us to give out 1MB buffers to
+ // 1000 threads simulatneously, nor are we prevented from observing buffer usage at finer granularity than we
+ // allocated.
+ //
+ // 2) We mark which events are the oldest ones in the stream at the time we emit them and we do this at regular
+ // intervals of time. When we emit all the events every X ms, there will be at least one event in there with
+ // a marker showing that all events older than that one have already been emitted. As soon as the reader sees
+ // this it can sort the events which have older timestamps and emit them.
+ //
+ // Why have both mechanisms? The sequence points in #1 worked fine to guarantee that given the whole trace you
+ // could sort it with a bounded cache, but it doesn't help much for real-time usage. Imagine that we have two
+ // threads emitting 1KB/sec of events and sequence points occur every 10MB. The reader would need to wait for
+ // 10,000 seconds to accumulate all the events before it could sort and process them. On the other hand if we
+ // only had mechanism #2 the reader can generate the sort quickly in real-time, but it is messy to do the buffer
+ // management. The reader reads in a bunch of event block buffers and starts emitting events from sub-sections
+ // of each of them and needs to know when each buffer can be released. The explicit sequence point makes that
+ // very easy - every sequence point all buffers can be released and no further bookkeeping is required.
+
+ EventPipeSequencePoint* pSequencePoint;
+ LARGE_INTEGER curTimestampBoundary;
+ curTimestampBoundary.QuadPart = stopTimeStamp.QuadPart;
{
- EventPipeEventInstance *pOldestInstance = NULL;
- EventPipeBuffer *pOldestContainingBuffer = NULL;
- EventPipeBufferList *pOldestContainingList = NULL;
+ SpinLockHolder _slh(&m_lock);
+ if (TryPeekSequencePoint(&pSequencePoint))
+ {
+ curTimestampBoundary.QuadPart = Min(curTimestampBoundary.QuadPart, pSequencePoint->TimeStamp.QuadPart);
+ }
+ }
- CQuickArrayList<EventPipeBuffer *> bufferList;
- CQuickArrayList<EventPipeBufferList *> bufferListList;
+ while(true) // loop across sequence points
+ {
+ while (true) // loop across events within a sequence point boundary
{
- // Take the lock before walking the buffer list.
- SpinLockHolder _slh(&m_lock);
- SListElem<EventPipeBufferList *> *pElem = m_pPerThreadBufferList->GetHead();
- while (pElem != NULL)
+ // pick the thread that has the oldest event
+ MoveNextEventAnyThread(curTimestampBoundary);
+ if (GetCurrentEvent() == nullptr)
{
- EventPipeBufferList *pBufferList = pElem->GetValue();
- EventPipeBuffer *pBuffer = pBufferList->TryGetBuffer(stopTimeStamp);
- if (pBuffer != nullptr)
- {
- bufferListList.Push(pBufferList);
- bufferList.Push(pBuffer);
- }
- pElem = m_pPerThreadBufferList->GetNext(pElem);
+ break;
+ }
+ ULONGLONG captureThreadId = GetCurrentEventBuffer()->GetWriterThread()->GetOSThreadId();
+ EventPipeBufferList* pBufferList = GetCurrentEventBufferList();
+
+ // loop across events on this thread
+ bool eventsWritten = false;
+ unsigned int sequenceNumber = 0;
+ while (GetCurrentEvent() != nullptr)
+ {
+ // The first event emitted on each thread (detected by !eventsWritten) is guaranteed to
+ // be the oldest event cached in our buffers so we mark it. This implements mechanism #2
+ // in the big comment above.
+
+ sequenceNumber = GetCurrentSequenceNumber();
+ pFile->WriteEvent(*GetCurrentEvent(), captureThreadId, sequenceNumber, !eventsWritten);
+ eventsWritten = true;
+ MoveNextEventSameThread(curTimestampBoundary);
}
+ pBufferList->SetLastReadSequenceNumber(sequenceNumber);
}
- for (size_t i = 0; i < bufferList.Size(); i++)
+ // This finishes any current partially filled EventPipeBlock, and flushes it to the stream
+ pFile->Flush();
+
+ // there are no more events prior to curTimestampBoundary
+ if (curTimestampBoundary.QuadPart == stopTimeStamp.QuadPart)
+ {
+ // We are done
+ break;
+ }
+ else // (curTimestampBoundary.QuadPart < stopTimeStamp.QuadPart)
{
- EventPipeBufferList *pBufferList = bufferListList[i];
- EventPipeBuffer *pBuffer = bufferList[i];
- if (pBufferList->TryConvertBufferToReadOnly(pBuffer))
+ // stopped at sequence point case
+
+ // the sequence point captured a lower bound for sequence number on each thread, but iterating
+ // through the events we may have observed that a higher numbered event was recorded. If so we
+ // should adjust the sequence numbers upwards to ensure the data in the stream is consistent.
{
- // Peek the next event out of the buffer.
- EventPipeBuffer *pContainingBuffer = pBuffer;
- EventPipeEventInstance *pNext = pBuffer->PeekNext(stopTimeStamp);
- if (pNext != NULL)
+ SpinLockHolder _slh(&m_lock);
+
+ SListElem<EventPipeThreadSessionState*> *pElem = m_pThreadSessionStateList->GetHead();
+ while (pElem != NULL)
{
- // If it's the oldest event we've seen, then save it.
- if ((pOldestInstance == NULL) ||
- (pOldestInstance->GetTimeStamp()->QuadPart > pNext->GetTimeStamp()->QuadPart))
+ EventPipeThreadSessionState* pSessionState = pElem->GetValue();
+ unsigned int threadSequenceNumber = 0;
+ pSequencePoint->ThreadSequenceNumbers.Lookup(pSessionState, &threadSequenceNumber);
+ unsigned int lastReadSequenceNumber = pSessionState->GetBufferList()->GetLastReadSequenceNumber();
+ // Sequence numbers can overflow so we can't use a direct lastRead > sequenceNumber comparison
+ // If a thread is able to drop more than 0x80000000 events in between sequence points then we will
+ // miscategorize it, but that seems unlikely.
+ unsigned int lastReadDelta = lastReadSequenceNumber - threadSequenceNumber;
+ if (0 < lastReadDelta && lastReadDelta < 0x80000000)
{
- pOldestInstance = pNext;
- pOldestContainingBuffer = pContainingBuffer;
- pOldestContainingList = pBufferList;
+ pSequencePoint->ThreadSequenceNumbers.AddOrReplace(ThreadSequenceNumberMap::element_t(pSessionState, lastReadSequenceNumber));
}
+ pElem = m_pThreadSessionStateList->GetNext(pElem);
}
}
- }
- if (pOldestInstance == NULL)
- {
- // We're done. There are no more events.
- break;
- }
-
- // Write the oldest event.
- pFile->WriteEvent(*pOldestInstance);
+ // emit the sequence point into the file
+ pFile->WriteSequencePoint(pSequencePoint);
- m_numEventsWritten++;
+ // move to the next sequence point if any
+ {
+ SpinLockHolder _slh(&m_lock);
- {
- SpinLockHolder _slh(&m_lock);
- // Pop the event from the buffer.
- pOldestContainingList->PopNextEvent(pOldestContainingBuffer, pOldestInstance);
+ // advance to the next sequence point, if any
+ DequeueSequencePoint();
+ curTimestampBoundary.QuadPart = stopTimeStamp.QuadPart;
+ if (TryPeekSequencePoint(&pSequencePoint))
+ {
+ curTimestampBoundary.QuadPart = Min(curTimestampBoundary.QuadPart, pSequencePoint->TimeStamp.QuadPart);
+ }
+ }
}
}
-
- if (m_numEventsWritten > 0)
- pFile->Flush();
}
-EventPipeEventInstance *EventPipeBufferManager::GetNextEvent()
+EventPipeEventInstance* EventPipeBufferManager::GetNextEvent()
{
CONTRACTL
{
- THROWS;
+ NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
PRECONDITION(!EventPipe::IsLockOwnedByCurrentThread());
}
CONTRACTL_END;
+ // PERF: This may be too aggressive? If this method is being called frequently enough to keep pace with the
+ // writing threads we could be in a state of high lock contention and lots of churning buffers. Each writer
+ // would take several locks, allocate a new buffer, write one event into it, then the reader would take the
+ // lock, convert the buffer to read-only and read the single event out of it. Allowing more events to accumulate
+ // in the buffers before converting between writable and read-only amortizes a lot of the overhead. One way
+ // to achieve that would be picking a stopTimeStamp that was Xms in the past. This would let Xms of events
+ // to accumulate in the write buffer before we converted it and forced the writer to allocate another. Other more
+ // sophisticated approaches would probably build a low overhead synchronization mechanism to read and write the
+ // buffer at the same time.
LARGE_INTEGER stopTimeStamp;
QueryPerformanceCounter(&stopTimeStamp);
+ MoveNextEventAnyThread(stopTimeStamp);
+ return GetCurrentEvent();
+}
+
+EventPipeEventInstance* EventPipeBufferManager::GetCurrentEvent()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pCurrentEvent;
+}
+
+unsigned int EventPipeBufferManager::GetCurrentSequenceNumber()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pCurrentBuffer->GetCurrentSequenceNumber();
+}
+
+EventPipeBuffer* EventPipeBufferManager::GetCurrentEventBuffer()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pCurrentBuffer;
+}
+
+EventPipeBufferList* EventPipeBufferManager::GetCurrentEventBufferList()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pCurrentBufferList;
+}
+
+void EventPipeBufferManager::MoveNextEventAnyThread(LARGE_INTEGER stopTimeStamp)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ PRECONDITION(!m_lock.OwnedByCurrentThread());
+ }
+ CONTRACTL_END;
- EventPipeEventInstance *pOldestInstance = NULL;
- EventPipeBuffer *pOldestContainingBuffer = NULL;
- EventPipeBufferList *pOldestContainingList = NULL;
+ if (m_pCurrentEvent != nullptr)
+ {
+ m_pCurrentBuffer->MoveNextReadEvent();
+ }
+ m_pCurrentEvent = nullptr;
+ m_pCurrentBuffer = nullptr;
+ m_pCurrentBufferList = nullptr;
+ // We need to do this in two steps because we can't hold m_lock and EventPipeThread::m_lock
+ // at the same time.
+
+ // Step 1 - while holding m_lock get the oldest buffer from each thread
CQuickArrayList<EventPipeBuffer *> bufferList;
CQuickArrayList<EventPipeBufferList *> bufferListList;
{
- // Take the lock before walking the buffer list.
SpinLockHolder _slh(&m_lock);
- SListElem<EventPipeBufferList *> *pElem = m_pPerThreadBufferList->GetHead();
+ SListElem<EventPipeThreadSessionState *> *pElem = m_pThreadSessionStateList->GetHead();
while (pElem != NULL)
{
- EventPipeBufferList *pBufferList = pElem->GetValue();
- EventPipeBuffer *pBuffer = pBufferList->TryGetBuffer(stopTimeStamp);
- if (pBuffer != nullptr)
+ EventPipeBufferList *pBufferList = pElem->GetValue()->GetBufferList();
+ EventPipeBuffer *pBuffer = pBufferList->GetHead();
+ if (pBuffer != nullptr &&
+ pBuffer->GetCreationTimeStamp().QuadPart < stopTimeStamp.QuadPart)
{
bufferListList.Push(pBufferList);
bufferList.Push(pBuffer);
}
- pElem = m_pPerThreadBufferList->GetNext(pElem);
+ pElem = m_pThreadSessionStateList->GetNext(pElem);
}
}
+ // Step 2 - iterate the cached list to find the one with the oldest event. This may require
+ // converting some of the buffers from writable to readable, and that in turn requires
+ // taking the associated EventPipeThread::m_lock for thread that was writing to that buffer.
+ LARGE_INTEGER curOldestTime = stopTimeStamp;
for (size_t i = 0; i < bufferList.Size(); i++)
{
EventPipeBufferList *pBufferList = bufferListList[i];
- EventPipeBuffer *pBuffer = bufferList[i];
- if (pBufferList->TryConvertBufferToReadOnly(pBuffer))
+ EventPipeBuffer *pHeadBuffer = bufferList[i];
+ EventPipeBuffer *pBuffer = AdvanceToNonEmptyBuffer(pBufferList, pHeadBuffer, stopTimeStamp);
+ if (pBuffer == nullptr)
{
- // Peek the next event out of the buffer.
- EventPipeBuffer *pContainingBuffer = pBuffer;
-
- // PERF: This may be too aggressive? If this method is being called frequently enough to keep pace with the
- // writing threads we could be in a state of high lock contention and lots of churning buffers. Each writer
- // would take several locks, allocate a new buffer, write one event into it, then the reader would take the
- // lock, convert the buffer to read-only and read the single event out of it. Allowing more events to accumulate
- // in the buffers before converting between writable and read-only amortizes a lot of the overhead. One way
- // to achieve that would be picking a stopTimeStamp that was Xms in the past. This would let Xms of events
- // to accumulate in the write buffer before we converted it and forced the writer to allocate another. Other more
- // sophisticated approaches would probably build a low overhead synchronization mechanism to read and write the
- // buffer at the same time.
- EventPipeEventInstance *pNext = pBuffer->PeekNext(stopTimeStamp);
- if (pNext != NULL)
+ // there weren't any non-empty buffers in that list prior to stopTimeStamp
+ continue;
+ }
+ // Peek the next event out of the buffer.
+ EventPipeEventInstance *pNext = pBuffer->GetCurrentReadEvent();
+ if (pNext != NULL)
+ {
+ // If it's the oldest event we've seen, then save it.
+ if (pNext->GetTimeStamp()->QuadPart < curOldestTime.QuadPart)
{
- // If it's the oldest event we've seen, then save it.
- if ((pOldestInstance == NULL) ||
- (pOldestInstance->GetTimeStamp()->QuadPart > pNext->GetTimeStamp()->QuadPart))
- {
- pOldestInstance = pNext;
- pOldestContainingBuffer = pContainingBuffer;
- pOldestContainingList = pBufferList;
- }
+ m_pCurrentEvent = pNext;
+ m_pCurrentBuffer = pBuffer;
+ m_pCurrentBufferList = pBufferList;
+ curOldestTime = *(m_pCurrentEvent->GetTimeStamp());
}
}
}
+}
- if (pOldestInstance == NULL)
+void EventPipeBufferManager::MoveNextEventSameThread(LARGE_INTEGER beforeTimeStamp)
+{
+ CONTRACTL
{
- // We're done. There are no more events.
- return nullptr;
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ PRECONDITION(m_pCurrentEvent != nullptr);
+ PRECONDITION(m_pCurrentBuffer != nullptr);
+ PRECONDITION(m_pCurrentBufferList != nullptr);
+ PRECONDITION(!m_lock.OwnedByCurrentThread());
}
+ CONTRACTL_END;
+
+ //advance past the current event
+ m_pCurrentEvent = nullptr;
+ m_pCurrentBuffer->MoveNextReadEvent();
+ // Find the first buffer in the list, if any, which has an event in it
+ m_pCurrentBuffer = AdvanceToNonEmptyBuffer(m_pCurrentBufferList, m_pCurrentBuffer, beforeTimeStamp);
+ if (m_pCurrentBuffer == nullptr)
{
- SpinLockHolder _slh(&m_lock);
- // Pop the event from the buffer.
- pOldestContainingList->PopNextEvent(pOldestContainingBuffer, pOldestInstance);
+ // no more buffers prior to stopTimeStamp
+ _ASSERTE(m_pCurrentEvent == nullptr);
+ _ASSERTE(m_pCurrentBuffer == nullptr);
+ m_pCurrentBufferList = nullptr;
+ return;
}
- // Return the oldest event that hasn't yet been processed.
- return pOldestInstance;
+ // get the event from that buffer
+ EventPipeEventInstance* pNextEvent = m_pCurrentBuffer->GetCurrentReadEvent();
+ LARGE_INTEGER nextTimeStamp = *pNextEvent->GetTimeStamp();
+ if (nextTimeStamp.QuadPart >= beforeTimeStamp.QuadPart)
+ {
+ // event exists, but isn't early enough
+ m_pCurrentEvent = nullptr;
+ m_pCurrentBuffer = nullptr;
+ m_pCurrentBufferList = nullptr;
+ }
+ else
+ {
+ // event is early enough, set the new cursor
+ m_pCurrentEvent = pNextEvent;
+ _ASSERTE(m_pCurrentBuffer != nullptr);
+ _ASSERTE(m_pCurrentBufferList != nullptr);
+ }
+}
+
+EventPipeBuffer* EventPipeBufferManager::AdvanceToNonEmptyBuffer(EventPipeBufferList* pBufferList,
+ EventPipeBuffer* pBuffer,
+ LARGE_INTEGER beforeTimeStamp)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ PRECONDITION(!m_lock.OwnedByCurrentThread());
+ PRECONDITION(pBufferList != nullptr);
+ PRECONDITION(pBuffer != nullptr);
+ PRECONDITION(pBufferList->GetHead() == pBuffer);
+ }
+ CONTRACTL_END;
+
+ EventPipeBuffer* pCurrentBuffer = pBuffer;
+ while (true)
+ {
+ if (!TryConvertBufferToReadOnly(pCurrentBuffer))
+ {
+ // the writer thread hasn't yet stored this buffer into the m_pWriteBuffer
+ // field (there is a small time window after allocation in this state).
+ // This should be the only buffer remaining in the list and it has no
+ // events written into it so we are done iterating.
+ return nullptr;
+ }
+ if (pCurrentBuffer->GetCurrentReadEvent() != nullptr)
+ {
+ // found a non-empty buffer
+ return pCurrentBuffer;
+ }
+ {
+ SpinLockHolder _slh(&m_lock);
+
+ // delete the empty buffer
+ EventPipeBuffer *pRemoved = pBufferList->GetAndRemoveHead();
+ _ASSERTE(pCurrentBuffer == pRemoved);
+ DeAllocateBuffer(pRemoved);
+
+ // get the next buffer
+ pCurrentBuffer = pBufferList->GetHead();
+ if (pCurrentBuffer == nullptr ||
+ pCurrentBuffer->GetCreationTimeStamp().QuadPart >= beforeTimeStamp.QuadPart)
+ {
+ // no more buffers in the list before this timestamp, we're done
+ return nullptr;
+ }
+ }
+ }
+}
+
+bool EventPipeBufferManager::TryConvertBufferToReadOnly(EventPipeBuffer* pNewReadBuffer)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ PRECONDITION(pNewReadBuffer != nullptr);
+ PRECONDITION(!m_lock.OwnedByCurrentThread());
+ }
+ CONTRACTL_END;
+
+ // if already readable, nothing to do
+ if (pNewReadBuffer->GetVolatileState() == EventPipeBufferState::READ_ONLY)
+ {
+ return true;
+ }
+
+ // if not yet readable, disable the thread from writing to it which causes
+ // it to become readable
+ {
+ EventPipeThread* pThread = pNewReadBuffer->GetWriterThread();
+ SpinLockHolder _slh(pThread->GetLock());
+ EventPipeThreadSessionState* pSessionState = pThread->GetSessionState(m_pSession);
+ if (pSessionState->GetWriteBuffer() == pNewReadBuffer)
+ {
+ pSessionState->SetWriteBuffer(nullptr);
+ _ASSERTE(pNewReadBuffer->GetVolatileState() == EventPipeBufferState::READ_ONLY);
+ return true;
+ }
+ }
+
+ // It is possible that EventPipeBufferList::TryGetBuffer(...) returns a writable buffer
+ // yet it is not returned as EventPipeThread::GetWriteBuffer(...). This is because
+ // EventPipeBufferManager::AllocateBufferForThread() insert the new writable buffer into
+ // the EventPipeBufferList first, and then it is added to the writable buffer hash table
+ // by EventPipeThread::SetWriteBuffer() next. The two operations are not atomic so it is possible
+ // to observe this partial state.
+ return pNewReadBuffer->GetVolatileState() == EventPipeBufferState::READ_ONLY;
}
void EventPipeBufferManager::SuspendWriteEvent(EventPipeSessionID sessionId)
THROWS;
GC_NOTRIGGER;
MODE_ANY;
- PRECONDITION(EnsureConsistency());
// All calls to this method must be synchronized by our caller
PRECONDITION(EventPipe::IsLockOwnedByCurrentThread());
}
CQuickArrayList<EventPipeThread *> threadList;
{
SpinLockHolder _slh(&m_lock);
+ _ASSERTE(EnsureConsistency());
+
m_writeEventSuspending.Store(TRUE);
// From this point until m_writeEventSuspending is reset to FALSE it is impossible
- // for new EventPipeBufferLists to be added to the m_pPerThreadBufferList. The only
+ // for new EventPipeThreadSessionStates to be added to the m_pThreadSessionStateList or
+ // for new EventBuffers to be added to an existing EventPipeBufferList. The only
// way AllocateBufferForThread is allowed to add one is by:
// 1) take m_lock - AllocateBufferForThread can't own it now because this thread owns it,
// but after this thread gives it up lower in this function it could be acquired.
// guarantees AllocateBufferForThread will observe all the memory changes this
// thread made prior to releasing m_lock and we've already set it TRUE.
// This ensures that we iterate over the list of threads below we've got the complete list.
- SListElem<EventPipeBufferList *> *pElem = m_pPerThreadBufferList->GetHead();
+ SListElem<EventPipeThreadSessionState *> *pElem = m_pThreadSessionStateList->GetHead();
while (pElem != NULL)
{
threadList.Push(pElem->GetValue()->GetThread());
- pElem = m_pPerThreadBufferList->GetNext(pElem);
+ pElem = m_pThreadSessionStateList->GetNext(pElem);
}
}
EventPipeThread *pThread = threadList[i];
{
SpinLockHolder _slh(pThread->GetLock());
- pThread->SetWriteBuffer(this, nullptr);
-
+ EventPipeThreadSessionState* pSessionState = pThread->GetSessionState(m_pSession);
+ pSessionState->SetWriteBuffer(nullptr);
// From this point until m_writeEventSuspending is reset to FALSE it is impossible
- // for new EventPipeBufferLists to be added to the m_pPerThreadBufferList. The only
- // way AllocateBufferForThread is allowed to add one is by:
- // 1) take m_lock - AllocateBufferForThread can't own it now because this thread owns it,
- // but after this thread gives it up lower in this function it could be acquired.
- // 2) observe m_writeEventSuspending = False - that won't happen, acquiring m_lock
- // guarantees AllocateBufferForThread will observe all the memory changes this
- // thread made prior to releasing m_lock and we've already set it TRUE.
+ // for this thread to set the write buffer to a non-null value which in turn means
+ // it can't write events into any buffer. To do this it would need to both:
+ // 1) Acquire the thread lock - it can't right now but it will be able to do so after
+ // we release the lock below
+ // 2) Observe m_writeEventSuspending = false - that won't happen, acquiring the thread
+ // lock guarantees WriteEvent will observe all the memory
+ // changes this thread made prior to releasing the thread
+ // lock and we already set it TRUE.
}
}
// hadn't yet relinquished it.
{
SpinLockHolder _slh(&m_lock);
- SListElem<EventPipeBufferList *> *pElem = m_pPerThreadBufferList->GetHead();
+ SListElem<EventPipeThreadSessionState *> *pElem = m_pThreadSessionStateList->GetHead();
while (pElem != NULL)
{
// Get the list and remove it from the thread.
- EventPipeBufferList *const pBufferList = pElem->GetValue();
+ EventPipeBufferList *const pBufferList = pElem->GetValue()->GetBufferList();
if (pBufferList != nullptr)
{
EventPipeThread *const pEventPipeThread = pBufferList->GetThread();
// this_session_id into the flag again.
}
}
-
- pElem = m_pPerThreadBufferList->GetNext(pElem);
- }
- }
-
- // Iterate through all the threads, and remove this buffer manager.
- for (size_t i = 0; i < threadList.Size(); i++)
- {
- EventPipeThread *pThread = threadList[i];
- {
- SpinLockHolder _slh(pThread->GetLock());
- pThread->Remove(this);
+ pElem = m_pThreadSessionStateList->GetNext(pElem);
}
}
}
}
CONTRACTL_END;
- _ASSERTE(EnsureConsistency());
- _ASSERTE(m_writeEventSuspending);
+ CQuickArrayList<EventPipeThreadSessionState*> threadSessionStatesToRemove;
- // Take the buffer manager manipulation lock
- SpinLockHolder _slh(&m_lock);
-
- SListElem<EventPipeBufferList *> *pElem = m_pPerThreadBufferList->GetHead();
- while (pElem != NULL)
{
- // Get the list and determine if we can free it.
- EventPipeBufferList *pBufferList = pElem->GetValue();
- EventPipeThread *pThread = pBufferList->GetThread();
- pThread->SetBufferList(this, nullptr);
+ // Take the buffer manager manipulation lock
+ SpinLockHolder _slh(&m_lock);
+
+ _ASSERTE(EnsureConsistency());
+ _ASSERTE(m_writeEventSuspending);
- // Iterate over all nodes in the list and deallocate them.
- EventPipeBuffer *pBuffer = pBufferList->GetAndRemoveHead();
- while (pBuffer != NULL)
+ // This m_writeEventSuspending flag + locks ensures that no thread will touch any of the
+ // state we are dismantling here. This includes:
+ // a) EventPipeThread m_sessions[session_id]
+ // b) EventPipeThreadSessionState
+ // c) EventPipeBufferList
+ // d) EventPipeBuffer
+ // e) EventPipeBufferManager.m_pThreadSessionStateList
+
+ SListElem<EventPipeThreadSessionState*> *pElem = m_pThreadSessionStateList->GetHead();
+ while (pElem != NULL)
{
- DeAllocateBuffer(pBuffer);
- pBuffer = pBufferList->GetAndRemoveHead();
- }
+ // Get the list and determine if we can free it.
+ EventPipeThreadSessionState *pSessionState = pElem->GetValue();
+ EventPipeBufferList *pBufferList = pSessionState->GetBufferList();
+ EventPipeThread *pThread = pSessionState->GetThread();
+ pSessionState->SetBufferList(nullptr);
+
+ // Iterate over all nodes in the buffer list and deallocate them.
+ EventPipeBuffer *pBuffer = pBufferList->GetAndRemoveHead();
+ while (pBuffer != NULL)
+ {
+ DeAllocateBuffer(pBuffer);
+ pBuffer = pBufferList->GetAndRemoveHead();
+ }
- // Remove the buffer list from the per-thread buffer list.
- pElem = m_pPerThreadBufferList->FindAndRemove(pElem);
- _ASSERTE(pElem != NULL);
+ // Now that all the buffer list elements have been freed, free the list itself.
+ delete(pBufferList);
+ pBufferList = NULL;
- SListElem<EventPipeBufferList *> *pCurElem = pElem;
- pElem = m_pPerThreadBufferList->GetNext(pElem);
- delete (pCurElem);
+ // Remove the session state from the session state list.
+ pElem = m_pThreadSessionStateList->FindAndRemove(pElem);
+ _ASSERTE(pElem != NULL);
- // Now that all the list elements have been freed, free the list itself.
- delete (pBufferList);
- pBufferList = NULL;
+ SListElem<EventPipeThreadSessionState *> *pCurElem = pElem;
+ pElem = m_pThreadSessionStateList->GetNext(pElem);
+ delete (pCurElem);
+
+ // And finally queue the removal of the SessionState from the thread
+ EX_TRY
+ {
+ threadSessionStatesToRemove.Push(pSessionState);
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ }
+ }
+
+ // remove and delete the session state
+ for (size_t i = 0; i < threadSessionStatesToRemove.Size(); i++)
+ {
+ EventPipeThreadSessionState* pThreadSessionState = threadSessionStatesToRemove[i];
+ // The strong reference from session state -> thread might be the very last reference
+ // We need to ensure the thread doesn't die until we can release the lock
+ EventPipeThreadHolder pThread = pThreadSessionState->GetThread();
+ {
+ SpinLockHolder _slh(pThreadSessionState->GetThread()->GetLock());
+ pThreadSessionState->GetThread()->DeleteSessionState(pThreadSessionState->GetSession());
+ }
}
}
{
LIMITED_METHOD_CONTRACT;
- SListElem<EventPipeBufferList *> *pElem = m_pPerThreadBufferList->GetHead();
+ SListElem<EventPipeThreadSessionState *> *pElem = m_pThreadSessionStateList->GetHead();
while (pElem != NULL)
{
- EventPipeBufferList *pBufferList = pElem->GetValue();
+ EventPipeBufferList *pBufferList = pElem->GetValue()->GetBufferList();
_ASSERTE(pBufferList->EnsureConsistency());
- pElem = m_pPerThreadBufferList->GetNext(pElem);
+ pElem = m_pThreadSessionStateList->GetNext(pElem);
}
return true;
m_pHeadBuffer = NULL;
m_pTailBuffer = NULL;
m_bufferCount = 0;
+ m_lastReadSequenceNumber = 0;
}
EventPipeBuffer *EventPipeBufferList::GetHead()
return m_bufferCount;
}
-EventPipeBuffer *EventPipeBufferList::TryGetBuffer(LARGE_INTEGER beforeTimeStamp)
+EventPipeThread *EventPipeBufferList::GetThread()
{
LIMITED_METHOD_CONTRACT;
- _ASSERTE(m_pManager->IsLockOwnedByCurrentThread());
- /**
- * There are 4 cases we need to handle in this function:
- * 1) There is no buffer in the list, in this case, return nullptr
- * 2) The head buffer is written to but not read yet, in this case, return that buffer
- * 2.1) It is possible that the head buffer is the only buffer that is created and is empty, or
- * 2.2) The head buffer is written to but not read
- * We cannot differentiate the two cases without reading it - but it is okay, in both cases, the buffer represents the head of the buffer list.
- * Note that writing to the buffer can happen after we return from this function, and it is also okay.
- * 3.) The head buffer is read but not completely reading, and
- * 4.) The head buffer is read completely.
- * This case requires special attention because it is possible that the next buffer in the list contain the oldest event. Fortunately, it is
- * already read so it is safe to read it to determine this case.
- *
- * In any case, if the desired buffer is created after beforeTimeStamp, then we can stop.
- */
-
- if (this->m_pHeadBuffer == nullptr)
- {
- // Case 1
- return nullptr;
- }
-
- EventPipeBuffer *candidate = nullptr;
- EventPipeBufferState bufferState = this->m_pHeadBuffer->GetVolatileState();
- if (bufferState != EventPipeBufferState::READ_ONLY)
- {
- // Case 2 (2.1 or 2.2)
- candidate = this->m_pHeadBuffer;
- }
- else
- {
- if (this->m_pHeadBuffer->PeekNext(beforeTimeStamp))
- {
- // Case 3
- candidate = this->m_pHeadBuffer;
- }
- else
- {
- // Case 4
- candidate = this->m_pHeadBuffer->GetNext();
- }
- }
-
- if (candidate == nullptr || candidate->GetCreationTimeStamp().QuadPart >= beforeTimeStamp.QuadPart)
- {
- // If the oldest buffer is still newer than the beforeTimeStamp, we can stop.
- return nullptr;
- }
-
- return candidate;
+ return m_pThread;
}
-bool EventPipeBufferList::TryConvertBufferToReadOnly(EventPipeBuffer* pNewReadBuffer)
+unsigned int EventPipeBufferList::GetLastReadSequenceNumber()
{
LIMITED_METHOD_CONTRACT;
- _ASSERTE(pNewReadBuffer != nullptr);
- _ASSERTE(!m_pManager->IsLockOwnedByCurrentThread());
- {
- SpinLockHolder _slh(m_pThread->GetLock());
- if (m_pThread->GetWriteBuffer(m_pManager) == pNewReadBuffer)
- {
- m_pThread->SetWriteBuffer(m_pManager, nullptr);
- _ASSERTE(pNewReadBuffer->GetVolatileState() == EventPipeBufferState::READ_ONLY);
- return true;
- }
- }
- // It is possible that EventPipeBufferList::TryGetBuffer(...) returns a writable buffer
- // yet it is not returned as EventPipeThread::GetWriteBuffer(...). This is because
- // EventPipeBufferManager::AllocateBufferForThread() insert the new writable buffer into
- // the EventPipeBufferList first, and then it is added to the writable buffer hash table
- // by EventPipeThread::SetWriteBuffer() next. The two operations are not atomic so it is possible
- // to observe this partial state.
- return pNewReadBuffer->GetVolatileState() == EventPipeBufferState::READ_ONLY;
+ return m_lastReadSequenceNumber;
}
-void EventPipeBufferList::PopNextEvent(EventPipeBuffer *pContainingBuffer, EventPipeEventInstance *pNext)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_ANY;
- }
- CONTRACTL_END;
-
- // Check to see if we need to clean-up the buffer that contained the previously popped event.
- if (pContainingBuffer->GetPrevious() != NULL)
- {
- // Remove the previous node. The previous node should always be the head node.
- EventPipeBuffer *pRemoved = GetAndRemoveHead();
- _ASSERTE(pRemoved != pContainingBuffer);
- _ASSERTE(pContainingBuffer == GetHead());
-
- // De-allocate the buffer.
- m_pManager->DeAllocateBuffer(pRemoved);
- }
-
- // If the event is non-NULL, pop it.
- if (pNext != NULL && pContainingBuffer != NULL)
- {
- pContainingBuffer->PopNext(pNext);
- }
-}
-
-EventPipeThread *EventPipeBufferList::GetThread()
+void EventPipeBufferList::SetLastReadSequenceNumber(unsigned int sequenceNumber)
{
LIMITED_METHOD_CONTRACT;
- return m_pThread;
+ m_lastReadSequenceNumber = sequenceNumber;
}
#ifdef _DEBUG
#ifdef FEATURE_PERFTRACING
#include "eventpipe.h"
+#include "eventpipeeventinstance.h"
+#include "eventpipethread.h"
#include "spinlock.h"
class EventPipeBuffer;
class EventPipeFile;
class EventPipeSession;
class EventPipeThread;
-
-void ReleaseEventPipeThreadRef(EventPipeThread* pThread);
-void AcquireEventPipeThreadRef(EventPipeThread* pThread);
-typedef Wrapper<EventPipeThread*, AcquireEventPipeThreadRef, ReleaseEventPipeThreadRef> EventPipeThreadHolder;
-
-typedef MapSHashWithRemove<EventPipeBufferManager *, EventPipeBuffer *> EventPipeWriteBuffers;
-typedef MapSHashWithRemove<EventPipeBufferManager *, EventPipeBufferList *> EventPipeBufferLists;
-
-#ifndef __GNUC__
- #define EVENTPIPE_THREAD_LOCAL __declspec(thread)
-#else // !__GNUC__
- #define EVENTPIPE_THREAD_LOCAL thread_local
-#endif // !__GNUC__
-
-class EventPipeThread
-{
- static EVENTPIPE_THREAD_LOCAL EventPipeThreadHolder gCurrentEventPipeThreadHolder;
-
- ~EventPipeThread();
-
- // The EventPipeThreadHolder maintains one count while the thread is alive
- // and each session's EventPipeBufferList maintains one count while it
- // exists
- LONG m_refCount;
-
- // this is a dictionary of { buffer-manager, buffer } this thread is
- // allowed to write to if exists or non-null, it must match the tail of the
- // m_bufferList
- // this pointer is protected by m_lock
- EventPipeWriteBuffers *m_pWriteBuffers = nullptr;
-
- // this is a dictionary of { buffer-manager, list of buffers } that were
- // written to by this thread
- // it is protected by EventPipeBufferManager::m_lock
- EventPipeBufferLists *m_pBufferLists = nullptr;
-
- // This lock is designed to have low contention. Normally it is only taken by this thread,
- // but occasionally it may also be taken by another thread which is trying to collect and drain
- // buffers from all threads.
- SpinLock m_lock;
-
- //
- EventPipeSession *m_pRundownSession = nullptr;
-
-#ifdef DEBUG
- template <typename T>
- static bool AllValuesAreNull(T &map)
- {
- LIMITED_METHOD_CONTRACT;
- for (typename T::Iterator iter = map.Begin(); iter != map.End(); ++iter)
- if (iter->Value() != nullptr)
- return false;
- return true;
- }
-#endif // DEBUG
-
-public:
- static EventPipeThread *Get();
- static EventPipeThread *GetOrCreate();
- static void Set(EventPipeThread *pThread);
-
- bool IsRundownThread() const
- {
- LIMITED_METHOD_CONTRACT;
- return (m_pRundownSession != nullptr);
- }
-
- void SetAsRundownThread(EventPipeSession *pSession)
- {
- LIMITED_METHOD_CONTRACT;
- m_pRundownSession = pSession;
- }
-
- EventPipeSession *GetRundownSession() const
- {
- LIMITED_METHOD_CONTRACT;
- return m_pRundownSession;
- }
-
- EventPipeThread();
- void AddRef();
- void Release();
- SpinLock *GetLock();
- Volatile<EventPipeSessionID> m_writingEventInProgress;
-
- EventPipeBuffer *GetWriteBuffer(EventPipeBufferManager *pBufferManager);
- void SetWriteBuffer(EventPipeBufferManager *pBufferManager, EventPipeBuffer *pNewBuffer);
- EventPipeBufferList *GetBufferList(EventPipeBufferManager *pBufferManager);
- void SetBufferList(EventPipeBufferManager *pBufferManager, EventPipeBufferList *pBufferList);
- void Remove(EventPipeBufferManager *pBufferManager);
-
- void SetSessionWriteInProgress(uint64_t index)
- {
- LIMITED_METHOD_CONTRACT;
- m_writingEventInProgress.Store((index < 64) ? (1ULL << index) : UINT64_MAX);
- }
-
- EventPipeSessionID GetSessionWriteInProgress() const
- {
- LIMITED_METHOD_CONTRACT;
- return m_writingEventInProgress.Load();
- }
-};
+struct EventPipeSequencePoint;
class EventPipeBufferManager
{
friend class EventPipeBufferList;
private:
-
- // A list of linked-lists of buffer objects.
- // Each entry in this list represents a set of buffers owned by a single thread.
- // The actual Thread object has a pointer to the object contained in this list. This ensures that
- // each thread can access its own list, while at the same time, ensuring that when
- // a thread is destroyed, we keep the buffers around without having to perform any
- // migration or book-keeping.
- SList<SListElem<EventPipeBufferList*>> *m_pPerThreadBufferList;
+ // The session this buffer manager belongs to
+ EventPipeSession* m_pSession;
+
+ // A list of per-thread session state
+ // Each entry in this list represents the session state owned by a single thread
+ // which includes the list of buffers the thread has written and its current
+ // event sequence number. The EventPipeThread object also has a pointer to the
+ // session state contained in this list. This ensures that each thread can access
+ // its own data, while at the same time, ensuring that when a thread is destroyed,
+ // we keep the buffers around without having to perform any migration or
+ // book-keeping.
+ SList<SListElem<EventPipeThreadSessionState*>> *m_pThreadSessionStateList;
// The total allocation size of buffers under management.
size_t m_sizeOfAllBuffers;
+ // The maximum allowable size of buffers under management.
+ // Attempted allocations above this threshold result in
+ // dropped events.
+ size_t m_maxSizeOfAllBuffers;
+
+ // The amount of allocations we can do at this moment before
+ // triggering a sequence point
+ size_t m_remainingSequencePointAllocationBudget;
+
+ // The total amount of allocations we can do after one sequence
+ // point before triggering the next one
+ size_t m_sequencePointAllocationBudget;
+
+ // A queue of sequence points.
+ SList<EventPipeSequencePoint> m_sequencePoints;
+
// Lock to protect access to the per-thread buffer list and total allocation size.
SpinLock m_lock;
Volatile<BOOL> m_writeEventSuspending;
+ // Iterator state for reader thread
+ // These are not protected by m_lock and expected to only be used on the reader thread
+ EventPipeEventInstance* m_pCurrentEvent;
+ EventPipeBuffer* m_pCurrentBuffer;
+ EventPipeBufferList* m_pCurrentBufferList;
+
#ifdef _DEBUG
// For debugging purposes.
unsigned int m_numBuffersAllocated;
unsigned int m_numBuffersLeaked;
Volatile<LONG> m_numEventsStored;
Volatile<LONG> m_numEventsDropped;
+ unsigned long m_numEventsWritten;
#endif // _DEBUG
- unsigned long m_numEventsWritten;
// Allocate a new buffer for the specified thread.
// This function will store the buffer in the thread's buffer list for future use and also return it here.
// A NULL return value means that a buffer could not be allocated.
- EventPipeBuffer* AllocateBufferForThread(EventPipeSession &session, unsigned int requestSize, BOOL & writeSuspended);
+ EventPipeBuffer* AllocateBufferForThread(EventPipeThreadSessionState* pSessionState, unsigned int requestSize, BOOL & writeSuspended);
// Add a buffer to the thread buffer list.
void AddBufferToThreadBufferList(EventPipeBufferList *pThreadBuffers, EventPipeBuffer *pBuffer);
- // Find the thread that owns the oldest buffer that is eligible to be stolen.
- EventPipeBufferList* FindThreadToStealFrom();
+ // Enqueue a sequence point into the queue.
+ void EnqueueSequencePoint(EventPipeSequencePoint* pEnqueuedSequencePoint);
+
+ // Dequeue a sequence point from the queue. This is a no-op if the queue is empty.
+ void DequeueSequencePoint();
+
+ // Peek the first sequence point in the queue. Returns FALSE if the queue is empty.
+ bool TryPeekSequencePoint(EventPipeSequencePoint** ppSequencePoint);
+
+ // Inits a sequence point that has the list of current threads and sequence
+ // numbers (Requires m_lock is already held)
+ void InitSequencePointThreadListHaveLock(EventPipeSequencePoint* pSequencePoint);
// De-allocates the input buffer.
void DeAllocateBuffer(EventPipeBuffer *pBuffer);
+ // Detaches this buffer from an active writer thread and marks it read-only so that the reader
+ // thread can use it. If the writer thread has not yet stored the buffer into its thread-local
+ // slot it will not be converted, but such buffers have no events in them so there is no reason
+ // to read them.
+ bool TryConvertBufferToReadOnly(EventPipeBuffer* pNewReadBuffer);
+
+ // Finds the first buffer in EventPipeBufferList that has a readable event prior to beforeTimeStamp,
+ // starting with pBuffer
+ EventPipeBuffer* AdvanceToNonEmptyBuffer(EventPipeBufferList* pBufferList,
+ EventPipeBuffer* pBuffer,
+ LARGE_INTEGER beforeTimeStamp);
+
+ // -------------- Reader Iteration API ----------------
+ // An iterator that can enumerate all the events which have been written into this buffer manager.
+ // Initially the iterator starts uninitialized and GetCurrentEvent() returns NULL. Calling MoveNextXXX()
+ // attempts to advance the cursor to the next event. If there is no event prior to stopTimeStamp then
+ // the GetCurrentEvent() again returns NULL, otherwise it returns that event. The event pointer returned
+ // by GetCurrentEvent() is valid until MoveNextXXX() is called again. Once all events in a buffer have
+ // been read the iterator will delete that buffer from the pool.
+
+ // Moves to the next oldest event searching across all threads. If there is no event older than
+ // stopTimeStamp then GetCurrentEvent() will return NULL.
+ void MoveNextEventAnyThread(LARGE_INTEGER stopTimeStamp);
+
+ // Moves to the next oldest event from the same thread as the current event. If there is no event
+ // older than stopTimeStamp then GetCurrentEvent() will return NULL. This should only be called
+ // when GetCurrentEvent() is non-null (because we need to know what thread's events to iterate)
+ void MoveNextEventSameThread(LARGE_INTEGER stopTimeStamp);
+
+ // Returns the current event the iteration cursor is on, or NULL if the iteration is unitialized/
+ // the last call to MoveNextXXX() didn't find any suitable event.
+ EventPipeEventInstance* GetCurrentEvent();
+
+ // Gets the sequence number corresponding to event from GetCurrentEvent()
+ unsigned int GetCurrentSequenceNumber();
+
+ // Gets the buffer corresponding to event from GetCurrentEvent()
+ EventPipeBuffer* GetCurrentEventBuffer();
+
+ // Gets the buffer list corresponding to event from GetCurrentEvent()
+ EventPipeBufferList* GetCurrentEventBufferList();
+
public:
- EventPipeBufferManager();
+ EventPipeBufferManager(EventPipeSession* pEventSession, size_t maxSizeOfAllBuffers, size_t sequencePointAllocationBudget);
~EventPipeBufferManager();
// Write an event to the input thread's current event buffer.
// Otherwise, if a stack trace is needed, one will be automatically collected.
bool WriteEvent(Thread *pThread, EventPipeSession &session, EventPipeEvent &event, EventPipeEventPayload &payload, LPCGUID pActivityId, LPCGUID pRelatedActivityId, Thread *pEventThread = NULL, StackContents *pStack = NULL);
- // Suspends all WriteEvent activity. All existing buffers will be in the
+ // Inits a sequence point that has the list of current threads and sequence
+ // numbers
+ void InitSequencePointThreadList(EventPipeSequencePoint* pSequencePoint);
+
// READ_ONLY state and no new EventPipeBuffers or EventPipeBufferLists can be created. Calls to
// WriteEvent that start during the suspension period or were in progress but hadn't yet recorded
// their event into a buffer before the start of the suspension period will return false and the
// The stopTimeStamp is used to determine when tracing was stopped to ensure that we
// skip any events that might be partially written due to races when tracing is stopped.
void WriteAllBuffersToFile(EventPipeFile *pFile, LARGE_INTEGER stopTimeStamp);
+ void WriteAllBuffersToFileV3(EventPipeFile *pFastSerializableObject, LARGE_INTEGER stopTimeStamp);
+ void WriteAllBuffersToFileV4(EventPipeFile *pFastSerializableObject, LARGE_INTEGER stopTimeStamp);
// Attempt to de-allocate resources as best we can. It is possible for some buffers to leak because
// threads can be in the middle of a write operation and get blocked, and we may not get an opportunity
// The number of buffers in the list.
unsigned int m_bufferCount;
+ // The sequence number of the last event that was read, only
+ // updated/read by the reader thread.
+ unsigned int m_lastReadSequenceNumber;
+
public:
EventPipeBufferList(EventPipeBufferManager *pManager, EventPipeThread* pThread);
// Get the count of buffers in the list.
unsigned int GetCount() const;
- // Pop the event from the buffer, and potentially clean-up the previous buffer
- // pNext is expected to be the last event returned from TryGetBuffer()->PeekNext()
- void PopNextEvent(EventPipeBuffer *pContainingBuffer, EventPipeEventInstance *pNext);
-
// Get the thread associated with this list.
EventPipeThread* GetThread();
- // Get the first buffer that might contain the oldest event
- EventPipeBuffer* TryGetBuffer(LARGE_INTEGER beforeTimeStamp);
-
- // Convert the buffer into read only
- bool TryConvertBufferToReadOnly(EventPipeBuffer *pNewReadBuffer);
+ // Read/Write the last read sequence number
+ unsigned int GetLastReadSequenceNumber();
+ void SetLastReadSequenceNumber(unsigned int sequenceNumber);
#ifdef _DEBUG
// Validate the consistency of the list.
LPCWSTR strOutputPath,
IpcStream *const pStream,
EventPipeSessionType sessionType,
+ EventPipeSerializationFormat format,
unsigned int circularBufferSizeInMB,
const EventPipeProviderConfiguration *pProviders,
uint32_t numProviders,
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
+ PRECONDITION(format < EventPipeSerializationFormat::Count);
PRECONDITION(circularBufferSizeInMB > 0);
PRECONDITION(numProviders > 0 && pProviders != nullptr);
PRECONDITION(EventPipe::IsLockOwnedByCurrentThread());
}
CONTRACTL_END;
- const EventPipeSessionID SessionId = GenerateSessionId();
- return !IsValidId(SessionId) ? nullptr : new EventPipeSession(SessionId, strOutputPath, pStream, sessionType, circularBufferSizeInMB, pProviders, numProviders);
+ const unsigned int index = GenerateSessionIndex();
+ if (index >= EventPipe::MaxNumberOfSessions)
+ {
+ return nullptr;
+ }
+ return new EventPipeSession(index, strOutputPath, pStream, sessionType, format, circularBufferSizeInMB, pProviders, numProviders);
}
void EventPipeConfiguration::DeleteSession(EventPipeSession *pSession)
// Construct the event instance.
EventPipeEventInstance *pInstance = new EventPipeEventInstance(
*m_pMetadataEvent,
+#ifdef FEATURE_PAL
+ PAL_GetCurrentOSThreadId(),
+#else
GetCurrentThreadId(),
+#endif
pInstancePayload,
instancePayloadSize,
NULL /* pActivityId */,
LPCWSTR strOutputPath,
IpcStream *const pStream,
EventPipeSessionType sessionType,
+ EventPipeSerializationFormat format,
unsigned int circularBufferSizeInMB,
const EventPipeProviderConfiguration *pProviders,
uint32_t numProviders,
}
private:
- // Helper function used to generate a "EventPipeSession ID" (bitmask).
- EventPipeSessionID GenerateSessionId() const
+ // Helper function used to locate a free index in the range 0 - EventPipe::MaxNumberOfSessions
+ // Returns EventPipe::MaxNumberOfSessions if there are no free indexes
+ unsigned int GenerateSessionIndex() const
{
LIMITED_METHOD_CONTRACT;
-
+ _ASSERTE(EventPipe::MaxNumberOfSessions == 64);
uint64_t id = 1;
- for (uint64_t i = 0; i < 64; ++i, id <<= i)
+ for (unsigned int i = 0; i < 64; ++i, id <<= i)
if ((m_activeSessions & id) == 0)
- break;
- return id;
+ return i;
+ return EventPipe::MaxNumberOfSessions;
}
// Get the provider without taking the lock.
EventPipeEventInstance::EventPipeEventInstance(
EventPipeEvent &event,
- DWORD threadID,
+ ULONGLONG threadId,
BYTE *pData,
unsigned int length,
LPCGUID pActivityId,
m_debugEventEnd = 0xCAFEBABE;
#endif // _DEBUG
m_pEvent = &event;
- m_threadID = threadID;
+ m_threadId = threadId;
if (pActivityId != NULL)
{
m_activityId = *pActivityId;
}
}
-unsigned int EventPipeEventInstance::GetAlignedTotalSize() const
+unsigned int EventPipeEventInstance::GetAlignedTotalSize(EventPipeSerializationFormat format) const
{
CONTRACT(unsigned int)
{
CONTRACT_END;
// Calculate the size of the total payload so that it can be written to the file.
- unsigned int payloadLength =
- sizeof(m_metadataId) + // Metadata ID
- sizeof(m_threadID) + // Thread ID
- sizeof(m_timeStamp) + // TimeStamp
- sizeof(m_activityId) + // Activity ID
- sizeof(m_relatedActivityId) + // Related Activity ID
- sizeof(m_dataLength) + // Data payload length
- m_dataLength + // Event payload data
- sizeof(unsigned int) + // Prepended stack payload size in bytes
- m_stackContents.GetSize(); // Stack payload size
+ unsigned int payloadLength = 0;
+
+ if (format == EventPipeSerializationFormat::NetPerfV3)
+ {
+ payloadLength =
+ sizeof(m_metadataId) + // Metadata ID
+ sizeof(DWORD) + // Thread ID
+ sizeof(m_timeStamp) + // TimeStamp
+ sizeof(m_activityId) + // Activity ID
+ sizeof(m_relatedActivityId) + // Related Activity ID
+ sizeof(m_dataLength) + // Data payload length
+ m_dataLength + // Event payload data
+ sizeof(unsigned int) + // Prepended stack payload size in bytes
+ m_stackContents.GetSize(); // Stack payload size
+ }
+ else if (format == EventPipeSerializationFormat::NetTraceV4)
+ {
+ payloadLength =
+ sizeof(m_metadataId) + // Metadata ID
+ sizeof(unsigned int) + // Sequence number (implied by the buffer containing the event instance)
+ sizeof(m_threadId) + // Thread ID
+ sizeof(ULONGLONG) + // Capture Thread ID (implied by the buffer containing the event instance)
+ sizeof(unsigned int) + // Stack intern table id
+ sizeof(m_timeStamp) + // TimeStamp
+ sizeof(m_activityId) + // Activity ID
+ sizeof(m_relatedActivityId) + // Related Activity ID
+ sizeof(m_dataLength) + // Data payload length
+ m_dataLength; // Event payload data
+ }
+ else
+ {
+ _ASSERTE(!"Unrecognized format");
+ }
+
// round up to ALIGNMENT_SIZE bytes
if (payloadLength % ALIGNMENT_SIZE != 0)
SString message;
message.Printf("Provider=%s/EventID=%d/Version=%d", providerName.GetANSI(scratch), m_pEvent->GetEventID(), m_pEvent->GetEventVersion());
- pFile->WriteEvent(m_timeStamp, m_threadID, message, m_stackContents);
+ pFile->WriteEvent(m_timeStamp, (DWORD)m_threadId, message, m_stackContents);
}
EX_CATCH{} EX_END_CATCH(SwallowAllExceptions);
}
}
#endif // _DEBUG
+EventPipeSequencePoint::EventPipeSequencePoint()
+{
+ LIMITED_METHOD_CONTRACT;
+ TimeStamp.QuadPart = 0;
+}
+
+EventPipeSequencePoint::~EventPipeSequencePoint()
+{
+ // Each entry in the map owns a ref-count on the corresponding thread
+ for (ThreadSequenceNumberMap::Iterator pCur = ThreadSequenceNumbers.Begin();
+ pCur != ThreadSequenceNumbers.End();
+ pCur++)
+ {
+ pCur->Key()->GetThread()->Release();
+ }
+}
+
#endif // FEATURE_PERFTRACING
#include "eventpipeevent.h"
#include "eventpipesession.h"
#include "eventpipeblock.h"
+#include "eventpipethread.h"
#include "fastserializableobject.h"
#include "fastserializer.h"
public:
- EventPipeEventInstance(EventPipeEvent &event, DWORD threadID, BYTE *pData, unsigned int length, LPCGUID pActivityId, LPCGUID pRelatedActivityId);
+ EventPipeEventInstance(EventPipeEvent &event, ULONGLONG threadID, BYTE *pData, unsigned int length, LPCGUID pActivityId, LPCGUID pRelatedActivityId);
void EnsureStack(const EventPipeSession &session);
m_metadataId = metadataId;
}
- DWORD GetThreadId() const
+ DWORD GetThreadId32() const
{
LIMITED_METHOD_CONTRACT;
- return m_threadID;
+ return (DWORD)m_threadId;
+ }
+
+ ULONGLONG GetThreadId64() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_threadId;
}
const GUID* GetActivityId() const
return m_stackContents.GetSize();
}
- unsigned int GetAlignedTotalSize() const;
+ unsigned int GetAlignedTotalSize(EventPipeSerializationFormat format) const;
#ifdef _DEBUG
// Serialize this event to the JSON file.
EventPipeEvent *m_pEvent;
unsigned int m_metadataId;
- DWORD m_threadID;
+ ULONGLONG m_threadId;
LARGE_INTEGER m_timeStamp;
GUID m_activityId;
GUID m_relatedActivityId;
void SetTimeStamp(LARGE_INTEGER timeStamp);
};
+typedef MapSHash<EventPipeThreadSessionState *, unsigned int> ThreadSequenceNumberMap;
+
+// A point in time marker that is used as a boundary when emitting events.
+// The events in a Nettrace file are not emitted in a fully sorted order
+// but we do guarantee that all events before a sequence point are emitted
+// prior to any events after the sequence point
+struct EventPipeSequencePoint
+{
+ // Entry in EventPipeBufferManager m_sequencePointList
+ SLink m_Link;
+
+ // The timestamp the sequence point was captured
+ LARGE_INTEGER TimeStamp;
+ ThreadSequenceNumberMap ThreadSequenceNumbers;
+
+ EventPipeSequencePoint();
+ ~EventPipeSequencePoint();
+};
+
#endif // FEATURE_PERFTRACING
#endif // __EVENTPIPE_EVENTINSTANCE_H__
#ifdef FEATURE_PERFTRACING
-EventPipeFile::EventPipeFile(StreamWriter *pStreamWriter) : FastSerializableObject(3, 0)
+
+StackHashEntry* StackHashEntry::CreateNew(StackContents* pStack, ULONG id, ULONG hash)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ StackHashEntry* pEntry = (StackHashEntry*) new (nothrow) BYTE[offsetof(StackHashEntry, StackBytes) + pStack->GetSize()];
+ if (pEntry == NULL)
+ {
+ return NULL;
+ }
+ pEntry->Id = id;
+ pEntry->Hash = hash;
+ pEntry->StackSizeInBytes = pStack->GetSize();
+ memcpy_s(pEntry->StackBytes, pStack->GetSize(), pStack->GetPointer(), pStack->GetSize());
+ return pEntry;
+}
+
+StackHashKey StackHashEntry::GetKey() const
+{
+ LIMITED_METHOD_CONTRACT;
+ StackHashKey key((BYTE*)StackBytes, StackSizeInBytes, Hash);
+ return key;
+}
+
+StackHashKey::StackHashKey(StackContents* pStack) :
+ pStackBytes(pStack->GetPointer()),
+ Hash(HashBytes(pStack->GetPointer(), pStack->GetSize())),
+ StackSizeInBytes(pStack->GetSize())
+{}
+
+StackHashKey::StackHashKey(BYTE* pStackBytes, ULONG stackSizeInBytes, ULONG hash) :
+ pStackBytes(pStackBytes),
+ Hash(hash),
+ StackSizeInBytes(stackSizeInBytes)
+{}
+
+DWORD GetFileVersion(EventPipeSerializationFormat format)
+{
+ LIMITED_METHOD_CONTRACT;
+ switch(format)
+ {
+ case EventPipeSerializationFormat::NetPerfV3:
+ return 3;
+ case EventPipeSerializationFormat::NetTraceV4:
+ return 4;
+ default:
+ _ASSERTE(!"Unrecognized EventPipeSerializationFormat");
+ return 0;
+ }
+}
+
+DWORD GetFileMinVersion(EventPipeSerializationFormat format)
+{
+ LIMITED_METHOD_CONTRACT;
+ switch (format)
+ {
+ case EventPipeSerializationFormat::NetPerfV3:
+ return 0;
+ case EventPipeSerializationFormat::NetTraceV4:
+ return 4;
+ default:
+ _ASSERTE(!"Unrecognized EventPipeSerializationFormat");
+ return 0;
+ }
+}
+
+EventPipeFile::EventPipeFile(StreamWriter *pStreamWriter, EventPipeSerializationFormat format) :
+ FastSerializableObject(GetFileVersion(format), GetFileMinVersion(format), format >= EventPipeSerializationFormat::NetTraceV4)
{
CONTRACTL
{
}
CONTRACTL_END;
- m_pBlock = new EventPipeBlock(100 * 1024);
+ m_format = format;
+ m_pBlock = new EventPipeEventBlock(100 * 1024, format);
+ m_pMetadataBlock = new EventPipeMetadataBlock(100 * 1024);
+ m_pStackBlock = new EventPipeStackBlock(100 * 1024);
// File start time information.
GetSystemTime(&m_fileOpenSystemTime);
m_samplingRateInNs = SampleProfiler::GetSamplingRate();
- // Create the file stream and write the header.
- m_pSerializer = new FastSerializer(pStreamWriter);
+ bool fSuccess = true;
+ if (m_format >= EventPipeSerializationFormat::NetTraceV4)
+ {
+ const char* pHeader = "Nettrace";
+ uint32_t bytesWritten = 0;
+ fSuccess = pStreamWriter->Write(pHeader, 8, bytesWritten) && bytesWritten == 8;
+ }
+ if (fSuccess)
+ {
+ // Create the file stream and write the FastSerialization header.
+ m_pSerializer = new FastSerializer(pStreamWriter);
+ }
+ else
+ {
+ m_pSerializer = nullptr;
+ }
m_serializationLock.Init(LOCK_TYPE_DEFAULT);
m_pMetadataIds = new MapSHashWithRemove<EventPipeEvent*, unsigned int>();
- // Start and 0 - The value is always incremented prior to use, so the first ID will be 1.
+ // Start at 0 - The value is always incremented prior to use, so the first ID will be 1.
m_metadataIdCounter = 0;
+ // Start at 0 - The value is always incremented prior to use, so the first ID will be 1.
+ m_stackIdCounter = 0;
+
+#ifdef DEBUG
+ QueryPerformanceCounter(&m_lastSortedTimestamp);
+#endif
+
// Write the first object to the file.
m_pSerializer->WriteObject(this);
}
if (m_pBlock != NULL && m_pSerializer != NULL)
WriteEnd();
+ for (EventPipeStackHash::Iterator pCur = m_stackHash.Begin(); pCur != m_stackHash.End(); pCur++)
+ {
+ delete *pCur;
+ }
+
delete m_pBlock;
+ delete m_pMetadataBlock;
+ delete m_pStackBlock;
delete m_pSerializer;
delete m_pMetadataIds;
}
+EventPipeSerializationFormat EventPipeFile::GetSerializationFormat() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_format;
+}
+
bool EventPipeFile::HasErrors() const
{
LIMITED_METHOD_CONTRACT;
return (m_pSerializer == nullptr) || m_pSerializer->HasWriteErrors();
}
-void EventPipeFile::WriteEvent(EventPipeEventInstance &instance)
+void EventPipeFile::WriteEvent(EventPipeEventInstance &instance, ULONGLONG captureThreadId, unsigned int sequenceNumber, BOOL isSortedEvent)
{
CONTRACTL
{
}
CONTRACTL_END;
+#ifdef DEBUG
+ _ASSERTE(instance.GetTimeStamp()->QuadPart >= m_lastSortedTimestamp.QuadPart);
+ if (isSortedEvent)
+ {
+ m_lastSortedTimestamp = *(instance.GetTimeStamp());
+ }
+#endif
+
+ unsigned int stackId = 0;
+ if (m_format >= EventPipeSerializationFormat::NetTraceV4)
+ {
+ stackId = GetStackId(instance);
+ }
+
// Check to see if we've seen this event type before.
// If not, then write the event metadata to the event stream first.
unsigned int metadataId = GetMetadataId(*instance.GetEvent());
EventPipeEventInstance* pMetadataInstance = EventPipe::BuildEventMetadataEvent(instance, metadataId);
- WriteToBlock(*pMetadataInstance, 0); // 0 breaks recursion and represents the metadata event.
+ WriteEventToBlock(*pMetadataInstance, 0); // metadataId=0 breaks recursion and represents the metadata event.
SaveMetadataId(*instance.GetEvent(), metadataId);
delete pMetadataInstance;
}
- WriteToBlock(instance, metadataId);
+ WriteEventToBlock(instance, metadataId, captureThreadId, sequenceNumber, stackId, isSortedEvent);
}
-void EventPipeFile::Flush()
+void EventPipeFile::WriteSequencePoint(EventPipeSequencePoint* pSequencePoint)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(pSequencePoint != nullptr);
+ }
+ CONTRACTL_END;
+
+ if (m_format < EventPipeSerializationFormat::NetTraceV4)
+ {
+ // sequence points aren't used in NetPerf format
+ return;
+ }
+
+ Flush(FlushAllBlocks);
+ EventPipeSequencePointBlock sequencePointBlock(pSequencePoint);
+ m_pSerializer->WriteObject(&sequencePointBlock);
+
+ // stack cache resets on sequence points
+ m_stackIdCounter = 0;
+ for (EventPipeStackHash::Iterator pCur = m_stackHash.Begin(); pCur != m_stackHash.End(); pCur++)
+ {
+ delete *pCur;
+ }
+ m_stackHash.RemoveAll();
+}
+
+void EventPipeFile::Flush(FlushFlags flags)
{
// Write existing buffer to the stream/file regardless of whether it is full or not.
CONTRACTL
MODE_ANY;
}
CONTRACTL_END;
- m_pSerializer->WriteObject(m_pBlock); // we write current block to the disk, whether it's full or not
- m_pBlock->Clear();
+ // we write current blocks to the disk, whether they are full or not
+ if ((m_pMetadataBlock->GetBytesWritten() != 0) && ((flags & FlushMetadataBlock) != 0))
+ {
+ _ASSERTE(m_format >= EventPipeSerializationFormat::NetTraceV4);
+ m_pSerializer->WriteObject(m_pMetadataBlock);
+ m_pMetadataBlock->Clear();
+ }
+ if ((m_pStackBlock->GetBytesWritten() != 0) && ((flags & FlushStackBlock) != 0))
+ {
+ _ASSERTE(m_format >= EventPipeSerializationFormat::NetTraceV4);
+ m_pSerializer->WriteObject(m_pStackBlock);
+ m_pStackBlock->Clear();
+ }
+ if ((m_pBlock->GetBytesWritten() != 0) && ((flags & FlushEventBlock) != 0))
+ {
+ m_pSerializer->WriteObject(m_pBlock);
+ m_pBlock->Clear();
+ }
}
void EventPipeFile::WriteEnd()
}
CONTRACTL_END;
- m_pSerializer->WriteObject(m_pBlock); // we write current block to the disk, whether it's full or not
-
- m_pBlock->Clear();
+ Flush();
// "After the last EventBlock is emitted, the stream is ended by emitting a NullReference Tag which indicates that there are no more objects in the stream to read."
// see https://github.com/Microsoft/perfview/blob/master/src/TraceEvent/EventPipe/EventPipeFormat.md for more
m_pSerializer->WriteTag(FastSerializerTags::NullReference);
}
-void EventPipeFile::WriteToBlock(EventPipeEventInstance &instance, unsigned int metadataId)
+void EventPipeFile::WriteEventToBlock(EventPipeEventInstance &instance,
+ unsigned int metadataId,
+ ULONGLONG captureThreadId,
+ unsigned int sequenceNumber,
+ unsigned int stackId,
+ BOOL isSortedEvent)
{
CONTRACTL
{
instance.SetMetadataId(metadataId);
- if (m_pBlock->WriteEvent(instance))
+ // If we are flushing events we need to flush metadata and stacks as well
+ // to ensure referenced metadata/stacks were written to the file before the
+ // event which referenced them.
+ FlushFlags flags = FlushAllBlocks;
+ EventPipeEventBlockBase* pBlock = m_pBlock;
+ if(metadataId == 0 && m_format >= EventPipeSerializationFormat::NetTraceV4)
+ {
+ flags = FlushMetadataBlock;
+ pBlock = m_pMetadataBlock;
+ }
+
+ if (pBlock->WriteEvent(instance, captureThreadId, sequenceNumber, stackId, isSortedEvent))
return; // the block is not full, we added the event and continue
// we can't write this event to the current block (it's full)
// so we write what we have in the block to the serializer
- m_pSerializer->WriteObject(m_pBlock);
-
- m_pBlock->Clear();
+ Flush(flags);
- bool result = m_pBlock->WriteEvent(instance);
+ bool result = pBlock->WriteEvent(instance, captureThreadId, sequenceNumber, stackId, isSortedEvent);
_ASSERTE(result == true); // we should never fail to add event to a clear block (if we do the max size is too small)
}
m_pMetadataIds->Add(&event, metadataId);
}
+unsigned int EventPipeFile::GetStackId(EventPipeEventInstance &instance)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(m_format >= EventPipeSerializationFormat::NetTraceV4);
+ }
+ CONTRACTL_END;
+
+ unsigned int stackId = 0;
+ StackHashEntry* pEntry = NULL;
+ StackHashKey key(instance.GetStack());
+ if (NULL == (pEntry = m_stackHash.Lookup(key)))
+ {
+ stackId = ++m_stackIdCounter;
+
+ pEntry = StackHashEntry::CreateNew(instance.GetStack(), stackId, key.Hash);
+ if (pEntry != NULL)
+ {
+ EX_TRY
+ {
+ m_stackHash.Add(pEntry);
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ }
+
+ if (m_pStackBlock->WriteStack(stackId, instance.GetStack()))
+ return stackId;
+
+ // we can't write this stack to the current block (it's full)
+ // so we write what we have in the block to the serializer
+ Flush(FlushStackBlock);
+
+ bool result = m_pStackBlock->WriteStack(stackId, instance.GetStack());
+ _ASSERTE(result == true); // we should never fail to add event to a clear block (if we do the max size is too small)
+ }
+ else
+ {
+ stackId = pEntry->Id;
+ }
+
+ return stackId;
+}
+
#endif // FEATURE_PERFTRACING
class EventPipeConfiguration;
class EventPipeEventInstance;
class FastSerializer;
+struct EventPipeSequencePoint;
+
+struct StackHashKey
+{
+ BYTE* pStackBytes;
+ ULONG Hash;
+ ULONG StackSizeInBytes;
+
+ StackHashKey(StackContents* pStack);
+ StackHashKey(BYTE* pStack, ULONG stackSizeInBytes, ULONG hash);
+};
+
+struct StackHashEntry
+{
+ ULONG Id;
+ ULONG Hash;
+ ULONG StackSizeInBytes;
+ // This is the first byte of StackSizeInBytes bytes of stack data
+ BYTE StackBytes[1];
+
+ static StackHashEntry* CreateNew(StackContents* pStack, ULONG id, ULONG hash);
+ StackHashKey GetKey() const;
+};
+
+class EventPipeStackHashTraits : public NoRemoveSHashTraits<DefaultSHashTraits<StackHashEntry*>>
+{
+public:
+ typedef typename DefaultSHashTraits<StackHashEntry*>::element_t element_t;
+ typedef typename DefaultSHashTraits<StackHashEntry*>::count_t count_t;
+
+ typedef const StackHashKey key_t;
+
+ static key_t GetKey(element_t e)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return e->GetKey();
+ }
+ static BOOL Equals(key_t k1, key_t k2)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return k1.StackSizeInBytes == k2.StackSizeInBytes &&
+ memcmp(k1.pStackBytes, k2.pStackBytes, k1.StackSizeInBytes) == 0;
+ }
+ static count_t Hash(key_t k)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (count_t)(size_t)k.Hash;
+ }
+
+ static element_t Null() { LIMITED_METHOD_CONTRACT; return nullptr; }
+ static bool IsNull(const element_t &e) { LIMITED_METHOD_CONTRACT; return e == nullptr; }
+};
+
+typedef SHash<EventPipeStackHashTraits> EventPipeStackHash;
class EventPipeFile final : public FastSerializableObject
{
public:
- EventPipeFile(StreamWriter *pStreamWriter);
+ EventPipeFile(StreamWriter *pStreamWriter, EventPipeSerializationFormat format);
~EventPipeFile();
- void WriteEvent(EventPipeEventInstance &instance);
- void Flush();
+ EventPipeSerializationFormat GetSerializationFormat() const;
+ void WriteEvent(EventPipeEventInstance &instance, ULONGLONG captureThreadId, unsigned int sequenceNumber, BOOL isSortedEvent);
+ void WriteSequencePoint(EventPipeSequencePoint* pSequencePoint);
+ enum FlushFlags
+ {
+ FlushEventBlock = 1,
+ FlushMetadataBlock = 2,
+ FlushStackBlock = 4,
+ FlushAllBlocks = FlushEventBlock | FlushMetadataBlock | FlushStackBlock
+ };
+ void Flush(FlushFlags flags = FlushAllBlocks);
bool HasErrors() const;
const char *GetTypeName() override
unsigned int GetMetadataId(EventPipeEvent &event);
+ unsigned int GetStackId(EventPipeEventInstance &instance);
+
void SaveMetadataId(EventPipeEvent &event, unsigned int metadataId);
- void WriteToBlock(EventPipeEventInstance &instance, unsigned int metadataId);
+ void WriteEventToBlock(EventPipeEventInstance &instance,
+ unsigned int metadataId,
+ ULONGLONG captureThreadId = 0,
+ unsigned int sequenceNumber = 0,
+ unsigned int stackId = 0,
+ BOOL isSortedEvent = TRUE);
+
+ // The format to serialize
+ EventPipeSerializationFormat m_format;
// The object responsible for serialization.
FastSerializer *m_pSerializer;
- EventPipeBlock *m_pBlock;
+ EventPipeEventBlock *m_pBlock;
+ EventPipeMetadataBlock *m_pMetadataBlock;
+ EventPipeStackBlock *m_pStackBlock;
// The system time when the file was opened.
SYSTEMTIME m_fileOpenSystemTime;
MapSHashWithRemove<EventPipeEvent *, unsigned int> *m_pMetadataIds;
Volatile<LONG> m_metadataIdCounter;
+
+ unsigned int m_stackIdCounter;
+ EventPipeStackHash m_stackHash;
+#ifdef DEBUG
+ LARGE_INTEGER m_lastSortedTimestamp;
+#endif
};
#endif // FEATURE_PERFTRACING
BEGIN_QCALL;
{
+ // This was a quick and dirty mechanism for testing but it may not be the final
+ // configuration scheme we want. This path handles both the AI profiler scenario
+ // doing private reflection and the EnableEventPipe env var. If we want to flip
+ // the default for one but not the other we'll have to hoist the configuration
+ // check into managed code.
+ EventPipeSerializationFormat format = EventPipeSerializationFormat::NetPerfV3;
+ if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_EventPipeNetTraceFormat) > 0)
+ {
+ format = EventPipeSerializationFormat::NetTraceV4;
+ }
+
sessionID = EventPipe::Enable(
outputFile,
circularBufferSizeInMB,
pProviders,
numProviders,
outputFile != NULL ? EventPipeSessionType::File : EventPipeSessionType::Listener,
+ format,
nullptr);
}
END_QCALL;
{
pInstance->ProviderID = pNextInstance->GetEvent()->GetProvider();
pInstance->EventID = pNextInstance->GetEvent()->GetEventID();
- pInstance->ThreadID = pNextInstance->GetThreadId();
+ pInstance->ThreadID = pNextInstance->GetThreadId32();
pInstance->TimeStamp.QuadPart = pNextInstance->GetTimeStamp()->QuadPart;
pInstance->ActivityId = *pNextInstance->GetActivityId();
pInstance->RelatedActivityId = *pNextInstance->GetRelatedActivityId();
return;
}
+ // IPC should produce nettrace by default or be selectable via protocol
+ // but this is a simple starting point for testing
+ EventPipeSerializationFormat format = EventPipeSerializationFormat::NetPerfV3;
+ if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_EventPipeNetTraceFormat) > 0)
+ {
+ format = EventPipeSerializationFormat::NetTraceV4;
+ }
+
auto sessionId = EventPipe::Enable(
nullptr, // strOutputPath (ignored in this scenario)
payload->circularBufferSizeInMB, // circularBufferSizeInMB
payload->providerConfigs.Ptr(), // pConfigs
static_cast<uint32_t>(payload->providerConfigs.Size()), // numConfigs
EventPipeSessionType::IpcStream, // EventPipeSessionType
+ format, // EventPipeSerializationFormat
pStream); // IpcStream
if (sessionId == 0)
#ifdef FEATURE_PERFTRACING
EventPipeSession::EventPipeSession(
- EventPipeSessionID id,
+ unsigned int index,
LPCWSTR strOutputPath,
IpcStream *const pStream,
EventPipeSessionType sessionType,
+ EventPipeSerializationFormat format,
unsigned int circularBufferSizeInMB,
const EventPipeProviderConfiguration *pProviders,
uint32_t numProviders,
- bool rundownEnabled) : m_Id(id),
+ bool rundownEnabled) : m_Id((EventPipeSessionID)1 << index),
+ m_index(index),
m_pProviderList(new EventPipeSessionProviderList(pProviders, numProviders)),
- m_CircularBufferSizeInBytes(static_cast<size_t>(circularBufferSizeInMB) << 20),
- m_pBufferManager(new EventPipeBufferManager()),
m_rundownEnabled(rundownEnabled),
- m_SessionType(sessionType)
+ m_SessionType(sessionType),
+ m_format(format)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
+ PRECONDITION(index < EventPipe::MaxNumberOfSessions);
+ PRECONDITION(EventPipe::MaxNumberOfSessions == 64); // If MaxNumberOfSessions ever changed, fix the m_id calculation above
+ PRECONDITION(format < EventPipeSerializationFormat::Count);
PRECONDITION(circularBufferSizeInMB > 0);
PRECONDITION(numProviders > 0 && pProviders != nullptr);
PRECONDITION(EventPipe::IsLockOwnedByCurrentThread());
}
CONTRACTL_END;
+ size_t sequencePointAllocationBudget = 0;
+ // Hard coded 10MB for now, we'll probably want to make
+ // this configurable later.
+ if (GetSessionType() != EventPipeSessionType::Listener &&
+ GetSerializationFormat() >= EventPipeSerializationFormat::NetTraceV4)
+ {
+ sequencePointAllocationBudget = 10 * 1024 * 1024;
+ }
+
+ m_pBufferManager = new EventPipeBufferManager(this, static_cast<size_t>(circularBufferSizeInMB) << 20, sequencePointAllocationBudget);
+
// Create the event pipe file.
// A NULL output path means that we should not write the results to a file.
// This is used in the EventListener case.
{
case EventPipeSessionType::File:
if (strOutputPath != nullptr)
- m_pFile = new EventPipeFile(new FileStreamWriter(SString(strOutputPath)));
+ m_pFile = new EventPipeFile(new FileStreamWriter(SString(strOutputPath)), format);
break;
case EventPipeSessionType::IpcStream:
- m_pFile = new EventPipeFile(new IpcStreamWriter(m_Id, pStream));
+ m_pFile = new EventPipeFile(new IpcStreamWriter(m_Id, pStream), format);
break;
default:
return !m_pFile->HasErrors();
}
-bool EventPipeSession::WriteEvent(
+bool EventPipeSession::WriteEventBuffered(
Thread *pThread,
EventPipeEvent &event,
EventPipeEventPayload &payload,
// Filter events specific to "this" session based on precomputed flag on provider/events.
return event.IsEnabled(GetId()) ?
- m_pBufferManager->WriteEvent(pThread, *this, event, payload, pActivityId, pRelatedActivityId) :
+ m_pBufferManager->WriteEvent(pThread, *this, event, payload, pActivityId, pRelatedActivityId, pEventThread, pStack) :
false;
}
-void EventPipeSession::WriteEvent(EventPipeEventInstance &instance)
+void EventPipeSession::WriteEventUnbuffered(EventPipeEventInstance &instance, EventPipeThread* pThread)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (m_pFile == nullptr)
+ return;
+ EventPipeThreadSessionState* pState = nullptr;
+ ULONGLONG captureThreadId;
+ unsigned int sequenceNumber;
+ {
+ SpinLockHolder _slh(pThread->GetLock());
+ pState = pThread->GetSessionState(this);
+ if (pState == nullptr)
+ {
+ return;
+ }
+ captureThreadId = pThread->GetOSThreadId();
+ sequenceNumber = pState->GetSequenceNumber();
+ pState->IncrementSequenceNumber();
+ }
+ m_pFile->WriteEvent(instance, captureThreadId, sequenceNumber, TRUE);
+}
+
+void EventPipeSession::WriteSequencePointUnbuffered()
{
CONTRACTL
{
if (m_pFile == nullptr)
return;
- m_pFile->WriteEvent(instance);
+ EventPipeSequencePoint sequencePoint;
+ m_pBufferManager->InitSequencePointThreadList(&sequencePoint);
+ m_pFile->WriteSequencePoint(&sequencePoint);
}
EventPipeEventInstance *EventPipeSession::GetNextEvent()
class EventPipeFile;
class EventPipeSessionProvider;
class EventPipeSessionProviderList;
+class EventPipeThread;
// TODO: Revisit the need of this enum and its usage.
enum class EventPipeSessionType
IpcStream
};
+enum class EventPipeSerializationFormat
+{
+ // Default format used in .Net Core 2.0-3.0 Preview 6
+ // TBD - it may remain the default format .Net Core 3.0 when
+ // used with private EventPipe managed API via reflection.
+ // This format had limited official exposure in documented
+ // end-user RTM scenarios, but it is supported by PerfView,
+ // TraceEvent, and was used by AI profiler
+ NetPerfV3,
+
+ // Default format we plan to use in .Net Core 3 Preview7+
+ // for most if not all scenarios
+ NetTraceV4,
+
+ Count
+};
+
class EventPipeSession
{
private:
const EventPipeSessionID m_Id;
+ const unsigned int m_index;
// The set of configurations for each provider in the session.
EventPipeSessionProviderList *const m_pProviderList;
- // The configured size of the circular buffer.
- const size_t m_CircularBufferSizeInBytes;
-
// Session buffer manager.
- EventPipeBufferManager *const m_pBufferManager;
+ EventPipeBufferManager * m_pBufferManager;
// True if rundown is enabled.
Volatile<bool> m_rundownEnabled;
// This determines behavior within the system (e.g. policies around which events to drop, etc.)
const EventPipeSessionType m_SessionType;
+ // For file/IPC sessions this controls the format emitted. For in-proc EventListener it is
+ // irrelevant.
+ EventPipeSerializationFormat m_format;
+
// Start date and time in UTC.
FILETIME m_sessionStartTime;
public:
EventPipeSession(
- EventPipeSessionID id,
+ unsigned int index,
LPCWSTR strOutputPath,
IpcStream *const pStream,
EventPipeSessionType sessionType,
+ EventPipeSerializationFormat format,
unsigned int circularBufferSizeInMB,
const EventPipeProviderConfiguration *pProviders,
uint32_t numProviders,
return m_Id;
}
+ unsigned int GetIndex() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_index;
+ }
+
// Get the session type.
EventPipeSessionType GetSessionType() const
{
return m_SessionType;
}
- // Get the configured size of the circular buffer.
- size_t GetCircularBufferSize() const
+ // Get the format version used by the file/IPC serializer
+ EventPipeSerializationFormat GetSerializationFormat() const
{
LIMITED_METHOD_CONTRACT;
- return m_CircularBufferSizeInBytes;
+ return m_format;
}
// Determine if rundown is enabled.
return m_ipcStreamingEnabled;
}
+#ifdef DEBUG
+ EventPipeBufferManager* GetBufferManager() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pBufferManager;
+ }
+#endif
+
// Add a new provider to the session.
void AddSessionProvider(EventPipeSessionProvider *pProvider);
bool WriteAllBuffersToFile();
- bool WriteEvent(
+ bool WriteEventBuffered(
Thread *pThread,
EventPipeEvent &event,
EventPipeEventPayload &payload,
Thread *pEventThread = nullptr,
StackContents *pStack = nullptr);
- void WriteEvent(EventPipeEventInstance &instance);
+ void WriteEventUnbuffered(EventPipeEventInstance &instance, EventPipeThread* pThread);
+
+ // Write a sequence point into the output stream synchronously
+ void WriteSequencePointUnbuffered();
EventPipeEventInstance *GetNextEvent();
--- /dev/null
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#include "common.h"
+#include "eventpipe.h"
+#include "eventpipebuffer.h"
+#include "eventpipebuffermanager.h"
+
+#ifdef FEATURE_PERFTRACING
+
+EventPipeThreadSessionState::EventPipeThreadSessionState(EventPipeThread* pThread, EventPipeSession* pSession DEBUG_ARG(EventPipeBufferManager* pBufferManager)) :
+ m_pThread(pThread),
+ m_pSession(pSession),
+ m_pWriteBuffer(nullptr),
+ m_pBufferList(nullptr),
+#ifdef DEBUG
+ m_pBufferManager(pBufferManager),
+#endif
+ m_sequenceNumber(1)
+{
+}
+
+EventPipeThread* EventPipeThreadSessionState::GetThread()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pThread;
+}
+
+EventPipeSession* EventPipeThreadSessionState::GetSession()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pSession;
+}
+
+EventPipeBuffer *EventPipeThreadSessionState::GetWriteBuffer()
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(m_pThread->IsLockOwnedByCurrentThread());
+
+ _ASSERTE((m_pWriteBuffer == nullptr) || (m_pWriteBuffer->GetVolatileState() == EventPipeBufferState::WRITABLE));
+ return m_pWriteBuffer;
+}
+
+void EventPipeThreadSessionState::SetWriteBuffer(EventPipeBuffer *pNewBuffer)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(m_pThread->IsLockOwnedByCurrentThread());
+ _ASSERTE((pNewBuffer == nullptr) || pNewBuffer->GetVolatileState() == EventPipeBufferState::WRITABLE);
+
+ _ASSERTE((m_pWriteBuffer == nullptr) || (m_pWriteBuffer->GetVolatileState() == EventPipeBufferState::WRITABLE));
+ if (m_pWriteBuffer != nullptr)
+ m_pWriteBuffer->ConvertToReadOnly();
+ m_pWriteBuffer = pNewBuffer;
+}
+
+EventPipeBufferList *EventPipeThreadSessionState::GetBufferList()
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(m_pBufferManager->IsLockOwnedByCurrentThread());
+ return m_pBufferList;
+}
+
+void EventPipeThreadSessionState::SetBufferList(EventPipeBufferList *pNewBufferList)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(m_pBufferManager->IsLockOwnedByCurrentThread());
+ m_pBufferList = pNewBufferList;
+}
+
+unsigned int EventPipeThreadSessionState::GetVolatileSequenceNumber()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_sequenceNumber.LoadWithoutBarrier();
+}
+
+unsigned int EventPipeThreadSessionState::GetSequenceNumber()
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(m_pThread->IsLockOwnedByCurrentThread());
+ return m_sequenceNumber.LoadWithoutBarrier();
+}
+
+void EventPipeThreadSessionState::IncrementSequenceNumber()
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(m_pThread->IsLockOwnedByCurrentThread());
+ m_sequenceNumber++;
+}
+
+
+
+void ReleaseEventPipeThreadRef(EventPipeThread *pThread)
+{
+ LIMITED_METHOD_CONTRACT;
+ pThread->Release();
+}
+
+void AcquireEventPipeThreadRef(EventPipeThread *pThread)
+{
+ LIMITED_METHOD_CONTRACT;
+ pThread->AddRef();
+}
+
+#ifndef __GNUC__
+__declspec(thread)
+#else // !__GNUC__
+thread_local
+#endif // !__GNUC__
+EventPipeThreadHolder EventPipeThread::gCurrentEventPipeThreadHolder;
+
+EventPipeThread::EventPipeThread()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ m_lock.Init(LOCK_TYPE_DEFAULT);
+ m_refCount = 0;
+
+#ifdef FEATURE_PAL
+ m_osThreadId = ::PAL_GetCurrentOSThreadId();
+#else
+ m_osThreadId = ::GetCurrentThreadId();
+#endif
+ memset(m_sessionState, 0, sizeof(EventPipeThreadSessionState*) * EventPipe::MaxNumberOfSessions);
+}
+
+EventPipeThread::~EventPipeThread()
+{
+ LIMITED_METHOD_CONTRACT;
+#ifdef DEBUG
+ for (int i = 0; i < EventPipe::MaxNumberOfSessions; i++)
+ {
+ _ASSERTE(m_sessionState[i] == NULL);
+ }
+#endif
+}
+
+/*static */ EventPipeThread *EventPipeThread::Get()
+{
+ LIMITED_METHOD_CONTRACT;
+ return gCurrentEventPipeThreadHolder;
+}
+
+/*static */ EventPipeThread* EventPipeThread::GetOrCreate()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (gCurrentEventPipeThreadHolder == nullptr)
+ {
+ EX_TRY
+ {
+ gCurrentEventPipeThreadHolder = new EventPipeThread();
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ }
+ return gCurrentEventPipeThreadHolder;
+}
+
+void EventPipeThread::AddRef()
+{
+ LIMITED_METHOD_CONTRACT;
+ FastInterlockIncrement(&m_refCount);
+}
+
+void EventPipeThread::Release()
+{
+ LIMITED_METHOD_CONTRACT;
+ if (FastInterlockDecrement(&m_refCount) == 0)
+ {
+ // https://isocpp.org/wiki/faq/freestore-mgmt#delete-this
+ // As long as you're careful, it's okay (not evil) for an object to commit suicide (delete this).
+ delete this;
+ }
+}
+
+EventPipeThreadSessionState* EventPipeThread::GetOrCreateSessionState(EventPipeSession* pSession)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(pSession != nullptr);
+ _ASSERTE(IsLockOwnedByCurrentThread());
+
+ unsigned int index = pSession->GetIndex();
+ _ASSERTE(index < EventPipe::MaxNumberOfSessions);
+ EventPipeThreadSessionState* pState = m_sessionState[index];
+ if (pState == nullptr)
+ {
+ pState = new (nothrow) EventPipeThreadSessionState(this, pSession DEBUG_ARG(pSession->GetBufferManager()));
+ m_sessionState[index] = pState;
+ }
+ return pState;
+}
+
+EventPipeThreadSessionState* EventPipeThread::GetSessionState(EventPipeSession* pSession)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(pSession != nullptr);
+ _ASSERTE(IsLockOwnedByCurrentThread());
+
+ unsigned int index = pSession->GetIndex();
+ _ASSERTE(index < EventPipe::MaxNumberOfSessions);
+ EventPipeThreadSessionState* pState = m_sessionState[index];
+
+ _ASSERTE(pState != nullptr);
+ return pState;
+}
+
+void EventPipeThread::DeleteSessionState(EventPipeSession* pSession)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(pSession != nullptr);
+ _ASSERTE(IsLockOwnedByCurrentThread());
+
+ unsigned int index = pSession->GetIndex();
+ _ASSERTE(index < EventPipe::MaxNumberOfSessions);
+ EventPipeThreadSessionState* pState = m_sessionState[index];
+
+ _ASSERTE(pState != nullptr);
+ delete pState;
+ m_sessionState[index] = nullptr;
+}
+
+SpinLock* EventPipeThread::GetLock()
+{
+ LIMITED_METHOD_CONTRACT;
+ return &m_lock;
+}
+
+#ifdef DEBUG
+bool EventPipeThread::IsLockOwnedByCurrentThread()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_lock.OwnedByCurrentThread();
+}
+#endif
+
+SIZE_T EventPipeThread::GetOSThreadId()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_osThreadId;
+}
+
+#endif // FEATURE_PERFTRACING
--- /dev/null
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#ifndef __EVENTPIPE_THREAD_H__
+#define __EVENTPIPE_THREAD_H__
+
+#ifdef FEATURE_PERFTRACING
+
+#include "eventpipe.h"
+#include "eventpipebuffer.h"
+#include "eventpipesession.h"
+#include "spinlock.h"
+
+class EventPipeBuffer;
+class EventPipeBufferList;
+class EventPipeBufferManager;
+class EventPipeThread;
+
+void ReleaseEventPipeThreadRef(EventPipeThread* pThread);
+void AcquireEventPipeThreadRef(EventPipeThread* pThread);
+typedef Wrapper<EventPipeThread*, AcquireEventPipeThreadRef, ReleaseEventPipeThreadRef> EventPipeThreadHolder;
+
+class EventPipeThreadSessionState
+{
+ // immutable
+ EventPipeThreadHolder m_pThread;
+
+ // immutable
+ EventPipeSession* m_pSession;
+
+ // The buffer this thread is allowed to write to if non-null, it must
+ // match the tail of m_bufferList
+ // protected by m_pThread::GetLock()
+ EventPipeBuffer* m_pWriteBuffer;
+
+ // The list of buffers that were written to by this thread. This
+ // is populated lazily the first time a thread tries to allocate
+ // a buffer for this session. It is set back to null when
+ // event writing is suspended during session disable.
+ // protected by the buffer manager lock
+ EventPipeBufferList* m_pBufferList;
+
+#ifdef DEBUG
+ // protected by the buffer manager lock
+ EventPipeBufferManager* m_pBufferManager;
+#endif
+
+ // The number of events that were attempted to be written by this
+ // thread. Each event was either succesfully recorded in a buffer
+ // or it was dropped.
+ //
+ // Only updated by the current thread under m_pThread::GetLock(). Other
+ // event writer threads are allowed to do unsychronized reads when
+ // capturing a sequence point but this does not provide any consistency
+ // guarantee. In particular there is no promise that the other thread
+ // is observing the most recent sequence number, nor is there a promise
+ // that the observable number of events in the write buffer matches the
+ // sequence number. A writer thread will always update the sequence
+ // number in tandem with an event write or drop, but without a write
+ // barrier between those memory writes they might observed out-of-order
+ // by the thread capturing the sequence point. The only utility this
+ // unsychronized read has is that if some other thread observes a sequence
+ // number X, it knows this thread must have attempted to write at least
+ // X events prior to the moment in time when the read occured. If the event
+ // buffers are later read and there are fewer than X events timestamped
+ // prior to the sequence point we can be certain the others were dropped.
+ Volatile<unsigned int> m_sequenceNumber;
+
+public:
+ EventPipeThreadSessionState(EventPipeThread* pThread, EventPipeSession* pSession DEBUG_ARG(EventPipeBufferManager* pBufferManager));
+
+ EventPipeThread* GetThread();
+ EventPipeSession* GetSession();
+ EventPipeBuffer *GetWriteBuffer();
+ void SetWriteBuffer(EventPipeBuffer *pNewBuffer);
+ EventPipeBufferList *GetBufferList();
+ void SetBufferList(EventPipeBufferList *pBufferList);
+ unsigned int GetVolatileSequenceNumber();
+ unsigned int GetSequenceNumber();
+ void IncrementSequenceNumber();
+};
+
+#ifndef __GNUC__
+#define EVENTPIPE_THREAD_LOCAL __declspec(thread)
+#else // !__GNUC__
+#define EVENTPIPE_THREAD_LOCAL thread_local
+#endif // !__GNUC__
+
+class EventPipeThread
+{
+ static EVENTPIPE_THREAD_LOCAL EventPipeThreadHolder gCurrentEventPipeThreadHolder;
+
+ ~EventPipeThread();
+
+ // The EventPipeThreadHolder maintains one count while the thread is alive
+ // and each session's EventPipeBufferList maintains one count while it
+ // exists
+ LONG m_refCount;
+
+ // Per-session state.
+ // The pointers in this array are only read/written under m_lock
+ // Some of the data within the ThreadSessionState object can be accessed
+ // without m_lock however, see the fields of that type for details.
+ EventPipeThreadSessionState* m_sessionState[EventPipe::MaxNumberOfSessions];
+
+ // This lock is designed to have low contention. Normally it is only taken by this thread,
+ // but occasionally it may also be taken by another thread which is trying to collect and drain
+ // buffers from all threads.
+ SpinLock m_lock;
+
+ // This is initialized when the Thread object is first constructed and remains
+ // immutable afterwards
+ SIZE_T m_osThreadId;
+
+ // If this is set to a valid id before the corresponding entry of s_pSessions is set to null,
+ // that pointer will be protected from deletion. See EventPipe::DisableInternal() and
+ // EventPipe::WriteInternal for more detail.
+ Volatile<EventPipeSessionID> m_writingEventInProgress;
+
+ //
+ EventPipeSession *m_pRundownSession = nullptr;
+
+public:
+ static EventPipeThread *Get();
+ static EventPipeThread* GetOrCreate();
+
+ EventPipeThread();
+ void AddRef();
+ void Release();
+ SpinLock *GetLock();
+#ifdef DEBUG
+ bool IsLockOwnedByCurrentThread();
+#endif
+
+ EventPipeThreadSessionState* GetOrCreateSessionState(EventPipeSession* pSession);
+ EventPipeThreadSessionState* GetSessionState(EventPipeSession* pSession);
+ void DeleteSessionState(EventPipeSession* pSession);
+ SIZE_T GetOSThreadId();
+
+ bool IsRundownThread() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (m_pRundownSession != nullptr);
+ }
+
+ void SetAsRundownThread(EventPipeSession *pSession)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pRundownSession = pSession;
+ }
+
+ EventPipeSession *GetRundownSession() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pRundownSession;
+ }
+
+ void SetSessionWriteInProgress(uint64_t index)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_writingEventInProgress.Store((index < 64) ? (1ULL << index) : UINT64_MAX);
+ }
+
+ EventPipeSessionID GetSessionWriteInProgress() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_writingEventInProgress.Load();
+ }
+};
+
+#endif // FEATURE_PERFTRACING
+
+#endif // __EVENTPIPE_THREAD_H__
class FastSerializableObject
{
public:
- FastSerializableObject(int objectVersion, int minReaderVersion) :
- m_objectVersion(objectVersion), m_minReaderVersion(minReaderVersion)
+ FastSerializableObject(int objectVersion, int minReaderVersion, bool isPrivate) :
+ m_objectVersion(objectVersion), m_minReaderVersion(minReaderVersion), m_isPrivate(isPrivate)
{
LIMITED_METHOD_CONTRACT;
}
return m_minReaderVersion;
}
+ bool IsPrivate() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_isPrivate;
+ }
+
private:
const int m_objectVersion;
const int m_minReaderVersion;
+ const bool m_isPrivate;
};
#endif // FEATURE_PERFTRACING
}
CONTRACTL_END;
- WriteTag(FastSerializerTags::BeginObject);
+ WriteTag(pObject->IsPrivate() ? FastSerializerTags::BeginPrivateObject : FastSerializerTags::BeginObject);
WriteSerializationType(pObject);
CONTRACTL_END;
// Write the BeginObject tag.
- WriteTag(FastSerializerTags::BeginObject);
+ WriteTag(pObject->IsPrivate() ? FastSerializerTags::BeginPrivateObject : FastSerializerTags::BeginObject);
// Write a NullReferenceTag, which implies that the following fields belong to SerializationType.
WriteTag(FastSerializerTags::NullReference);
}
CONTRACTL_END;
+#ifdef FEATURE_PAL
+ SIZE_T ourId = 0;
+#else
DWORD ourId = 0;
+#endif
HANDLE h = NULL;
DWORD dwCreationFlags = CREATE_SUSPENDED;
lpThreadArgs->lpThreadFunction = start;
lpThreadArgs->lpArg = args;
- h = ::CreateThread(NULL /*=SECURITY_ATTRIBUTES*/,
- sizeToCommitOrReserve,
- intermediateThreadProc,
- lpThreadArgs,
- dwCreationFlags,
- &ourId);
+#ifdef FEATURE_PAL
+ h = ::PAL_CreateThread64(NULL /*=SECURITY_ATTRIBUTES*/,
+#else
+ h = ::CreateThread( NULL /*=SECURITY_ATTRIBUTES*/,
+#endif
+ sizeToCommitOrReserve,
+ intermediateThreadProc,
+ lpThreadArgs,
+ dwCreationFlags,
+ &ourId);
if (h == NULL)
return FALSE;
}
CONTRACTL_END;
+#ifdef FEATURE_PAL
+ m_OSThreadId = ::PAL_GetCurrentOSThreadId();
+#else
m_OSThreadId = ::GetCurrentThreadId();
+#endif
#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
// Be very careful in here because we haven't set up e.g. TLS yet.
{
// We should never be attempting to CoUninitialize another thread than
// the currently running thread.
+#ifdef FEATURE_PAL
+ _ASSERTE(m_OSThreadId == ::PAL_GetCurrentOSThreadId());
+#else
_ASSERTE(m_OSThreadId == ::GetCurrentThreadId());
+#endif
// CoUninitialize the thread and reset the STA/MTA/CoInitialized state bits.
::CoUninitialize();
// Don't use the TS_Unstarted state bit to check for this, it's cleared far
// too late in the day for us. Instead check whether we're in the correct
// thread context.
+#ifdef FEATURE_PAL
+ if (m_OSThreadId != ::PAL_GetCurrentOSThreadId())
+#else
if (m_OSThreadId != ::GetCurrentThreadId())
+#endif
{
FastInterlockOr((ULONG *) &m_State, (state == AS_InSTA) ? TS_InSTA : TS_InMTA);
return state;
return m_ThreadId;
}
+ // The actual OS thread ID may be 64 bit on some platforms but
+ // the runtime has historically used 32 bit IDs. We continue to
+ // downcast by default to limit the impact but GetOSThreadId64()
+ // is available for code-paths which correctly handle it.
DWORD GetOSThreadId()
{
LIMITED_METHOD_CONTRACT;
#ifndef DACCESS_COMPILE
_ASSERTE (m_OSThreadId != 0xbaadf00d);
#endif // !DACCESS_COMPILE
+ return (DWORD)m_OSThreadId;
+ }
+
+ // Allows access to the full 64 bit id on platforms which use it
+ SIZE_T GetOSThreadId64()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+#ifndef DACCESS_COMPILE
+ _ASSERTE(m_OSThreadId != 0xbaadf00d);
+#endif // !DACCESS_COMPILE
return m_OSThreadId;
}
// This API is to be used for Debugger only.
// We need to be able to return the true value of m_OSThreadId.
+ // On platforms with 64 bit thread IDs we downcast to 32 bit.
//
DWORD GetOSThreadIdForDebugger()
{
SUPPORTS_DAC;
LIMITED_METHOD_CONTRACT;
- return m_OSThreadId;
+ return (DWORD) m_OSThreadId;
}
BOOL IsThreadPoolThread()
|| handle == SWITCHOUT_HANDLE_VALUE
|| m_OSThreadId == 0
|| m_OSThreadId == 0xbaadf00d
- || ::MatchThreadHandleToOsId(handle, m_OSThreadId) );
+ || ::MatchThreadHandleToOsId(handle, (DWORD)m_OSThreadId) );
}
#endif
|| h == SWITCHOUT_HANDLE_VALUE
|| m_OSThreadId == 0
|| m_OSThreadId == 0xbaadf00d
- || ::MatchThreadHandleToOsId(h, m_OSThreadId) );
+ || ::MatchThreadHandleToOsId(h, (DWORD)m_OSThreadId) );
#endif
FastInterlockExchangePointer(&m_ThreadHandle, h);
}
HANDLE m_ThreadHandleForClose;
HANDLE m_ThreadHandleForResume;
BOOL m_WeOwnThreadHandle;
- DWORD m_OSThreadId;
+ SIZE_T m_OSThreadId;
BOOL CreateNewOSThread(SIZE_T stackSize, LPTHREAD_START_ROUTINE start, void *args);
if (pContext->ContextFlags & (CONTEXT_SERVICE_ACTIVE|CONTEXT_EXCEPTION_ACTIVE))
{
// cannot process exception
- LOG((LF_ALWAYS, LL_WARNING, "thread [os id=0x08%x id=0x08%x] redirect failed due to ContextFlags of 0x%08x\n", m_OSThreadId, m_ThreadId, pContext->ContextFlags));
+ LOG((LF_ALWAYS, LL_WARNING, "thread [os id=0x08%x id=0x08%x] redirect failed due to ContextFlags of 0x%08x\n", (DWORD)m_OSThreadId, m_ThreadId, pContext->ContextFlags));
isSafeToRedirect = FALSE;
}
}
if (thread->m_fPreemptiveGCDisabled)
{
- DWORD id = thread->m_OSThreadId;
+ DWORD id = (DWORD) thread->m_OSThreadId;
if (id == 0xbaadf00d)
{
sprintf_s (message, COUNTOF(message), "Thread CLR ID=%x cannot be suspended",