project(CoreProfiler)
-set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wall -Wno-null-conversion")
+# Use uppercase CMAKE_BUILD_TYPE for the string comparisons below
+string(TOUPPER ${CMAKE_BUILD_TYPE} UPPERCASE_CMAKE_BUILD_TYPE)
+
+# For single-configuration toolset
+# set the different configuration defines.
+if (UPPERCASE_CMAKE_BUILD_TYPE STREQUAL DEBUG)
+ # First DEBUG
+ set_property(DIRECTORY PROPERTY COMPILE_DEFINITIONS ${CLR_DEFINES_DEBUG_INIT})
+elseif (UPPERCASE_CMAKE_BUILD_TYPE STREQUAL CHECKED)
+ # Then CHECKED
+ set_property(DIRECTORY PROPERTY COMPILE_DEFINITIONS ${CLR_DEFINES_CHECKED_INIT})
+elseif (UPPERCASE_CMAKE_BUILD_TYPE STREQUAL RELEASE)
+ # Then RELEASE
+ set_property(DIRECTORY PROPERTY COMPILE_DEFINITIONS ${CLR_DEFINES_RELEASE_INIT})
+elseif (UPPERCASE_CMAKE_BUILD_TYPE STREQUAL RELWITHDEBINFO)
+ # And then RELWITHDEBINFO
+ set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS ${CLR_DEFINES_RELWITHDEBINFO_INIT})
+else ()
+ message(FATAL_ERROR "Unknown build type! Set CMAKE_BUILD_TYPE to DEBUG, CHECKED, RELEASE, or RELWITHDEBINFO!")
+endif ()
set(CLR_CMAKE_PLATFORM_UNIX 1)
--- /dev/null
+SET (CMAKE_C_FLAGS_INIT "-Wall -std=c11")
+SET (CMAKE_C_FLAGS_DEBUG_INIT "-g -O0")
+SET (CLR_C_FLAGS_CHECKED_INIT "-g -O1")
+# Refer to the below instruction to support __thread with -O2/-O3 on Linux/ARM
+# https://github.com/dotnet/coreclr/blob/master/Documentation/building/linux-instructions.md
+SET (CMAKE_C_FLAGS_RELEASE_INIT "-g -O1")
+SET (CMAKE_C_FLAGS_RELWITHDEBINFO_INIT "-g -O1")
+
+SET (CMAKE_CXX_FLAGS_INIT "-Wall -Wno-null-conversion -std=c++11")
+SET (CMAKE_CXX_FLAGS_DEBUG_INIT "-g -O0")
+SET (CLR_CXX_FLAGS_CHECKED_INIT "-g -O1")
+SET (CMAKE_CXX_FLAGS_RELEASE_INIT "-g -O1")
+SET (CMAKE_CXX_FLAGS_RELWITHDEBINFO_INIT "-g -O1")
+
+SET (CLR_DEFINES_DEBUG_INIT DEBUG _DEBUG _DBG URTBLDENV_FRIENDLY=Checked BUILDENV_CHECKED=1)
+SET (CLR_DEFINES_CHECKED_INIT DEBUG _DEBUG _DBG URTBLDENV_FRIENDLY=Checked BUILDENV_CHECKED=1)
+SET (CLR_DEFINES_RELEASE_INIT NDEBUG URTBLDENV_FRIENDLY=Retail)
+SET (CLR_DEFINES_RELWITHDEBINFO_INIT NDEBUG URTBLDENV_FRIENDLY=Retail)
+
+SET (CMAKE_INSTALL_PREFIX $ENV{__CMakeBinDir})
--- /dev/null
+SET (CMAKE_C_FLAGS_INIT "-Wall -std=c11")
+SET (CMAKE_C_FLAGS_DEBUG_INIT "-g -O0")
+SET (CLR_C_FLAGS_CHECKED_INIT "-g -O2")
+# Refer to the below instruction to support __thread with -O2/-O3 on Linux/ARM
+# https://github.com/dotnet/coreclr/blob/master/Documentation/building/linux-instructions.md
+SET (CMAKE_C_FLAGS_RELEASE_INIT "-g -O3")
+SET (CMAKE_C_FLAGS_RELWITHDEBINFO_INIT "-g -O2")
+
+SET (CMAKE_CXX_FLAGS_INIT "-Wall -Wno-null-conversion -std=c++11")
+SET (CMAKE_CXX_FLAGS_DEBUG_INIT "-g -O0")
+SET (CLR_CXX_FLAGS_CHECKED_INIT "-g -O2")
+SET (CMAKE_CXX_FLAGS_RELEASE_INIT "-g -O3")
+SET (CMAKE_CXX_FLAGS_RELWITHDEBINFO_INIT "-g -O2")
+
+SET (CLR_DEFINES_DEBUG_INIT DEBUG _DEBUG _DBG URTBLDENV_FRIENDLY=Checked BUILDENV_CHECKED=1)
+SET (CLR_DEFINES_CHECKED_INIT DEBUG _DEBUG _DBG URTBLDENV_FRIENDLY=Checked BUILDENV_CHECKED=1)
+SET (CLR_DEFINES_RELEASE_INIT NDEBUG URTBLDENV_FRIENDLY=Retail)
+SET (CLR_DEFINES_RELWITHDEBINFO_INIT NDEBUG URTBLDENV_FRIENDLY=Retail)
+
+SET (CMAKE_INSTALL_PREFIX $ENV{__CMakeBinDir})
+%{!?buildtype: %define buildtype Release}
+
Name: coreprofiler
Summary: PUT SUMMARY HERE
Version: 1.0.0
-Release: 1
+Release: 1.rt
Group: Development/Toolchain
License: MIT
Source0: coreprofiler.tar.gz
export CPLUS_INCLUDE_PATH="${LLVM_INCLUDEDIR}/llvm/:${LLVM_INCLUDEDIR}/llvm-c/:${CLANG_HEADERS}:${GPP_INCLUDE_PATHS}:${CLANG_INCLUDE_PATHS}"
export C_INCLUDE_PATH="${LLVM_INCLUDEDIR}/llvm-c/:%{_includedir}"
+%ifarch armv7l
+%define _overridefile clang-compiler-override-arm.txt
+%else
+%define _overridefile clang-compiler-override.txt
+%endif
+
mkdir build
cd build
cmake ../coreprofiler \
-DCMAKE_CXX_COMPILER=clang++ \
-DCLR_BIN_DIR=%{_datarootdir}/%{netcoreappdir} \
-DCLR_SRC_DIR=%{_datarootdir}/%{netcoreappdir} \
- -DCMAKE_BUILD_TYPE=Release \
+ -DCMAKE_BUILD_TYPE=%{buildtype} \
+ -DCMAKE_EXPORT_COMPILE_COMMANDS=ON \
+ -DCMAKE_USER_MAKE_RULES_OVERRIDE=../coreprofiler/%{_overridefile} \
-DCLR_CMAKE_TARGET_ARCH_%{ARCH}=1
make %{?jobs:-j%jobs}
${CLR_BIN_DIR}/inc
${CLR_SRC_DIR}/src/pal/inc/rt
${CLR_SRC_DIR}/src/pal/prebuilt/inc
- ${CLR_SRC_DIR}/src/pal/inc
+ ${CLR_SRC_DIR}/src/pal/inc
${CLR_SRC_DIR}/src/inc)
include_directories(AFTER ${CLR_INCLUDE_DIR})
dllmain.cpp
info/classinfo.cpp
info/functioninfo.cpp
+ info/objectinfo.cpp
misc/localtime.cpp
misc/sigaction.cpp
profiler.cpp
profilerinfo.cpp
profilermanager.cpp
sync/shared_mutex.cpp
- trace/basetrace.cpp
trace/commontrace.cpp
trace/cputrace.cpp
trace/executiontrace.cpp
- trace/eventchannel.cpp
trace/memorytrace.cpp
tracelog.cpp
)
winContext->R15 = mc->gregs[REG_R15];
winContext->EFlags = mc->gregs[REG_EFL];
winContext->Rip = mc->gregs[REG_RIP];
- winContext->SegCs = (*((WORD *)mc->gregs[REG_CSGSFS] + 0));
- winContext->SegGs = (*((WORD *)mc->gregs[REG_CSGSFS] + 1));
- winContext->SegFs = (*((WORD *)mc->gregs[REG_CSGSFS] + 2));
}
return S_OK;
FetchValue("PROF_EXECUTION_TRACE", new_config.ExecutionTraceEnabled);
FetchValue("PROF_MEMORY_TRACE", new_config.MemoryTraceEnabled);
- if (!new_config.MemoryTraceEnabled)
+ if (new_config.MemoryTraceEnabled &&
+ new_config.ExecutionTraceEnabled &&
+ new_config.CollectionMethod == CollectionMethod::Sampling)
{
- new_config.StackTrackingEnabled = false;
+ new_config.StackTrackingEnabled = true;
}
FetchValue("PROF_STACK_TRACK", new_config.StackTrackingEnabled);
+ bool GcTraceEnabled;
+ if (FetchValue("PROF_GC_TRACE", GcTraceEnabled))
+ {
+ new_config.GcAllocTableTraceEnabled = GcTraceEnabled;
+ }
+ FetchValue("PROF_GC_TRACE_ALT", new_config.GcAllocTableTraceEnabled);
+
// Apply changes to the current configuration.
config = new_config;
}
, CpuTraceTimeoutMs(10)
, ExecutionTraceEnabled(false)
, MemoryTraceEnabled(false)
- , StackTrackingEnabled(true)
+ , StackTrackingEnabled(false)
+ , GcAllocTableTraceEnabled(false)
{}
void ProfilerConfig::Validate()
{
// Instrumentation specific options verification.
}
- else
- {
- if (StackTrackingEnabled)
- {
- warnings.push_back(
- "stack tracking option is redundant for instrumentation");
- }
- }
if (CollectionMethod != CollectionMethod::Sampling)
{
// line tracing above.
warnings.push_back("hight granularity option requires sampling");
}
+
+ if (StackTrackingEnabled)
+ {
+ StackTrackingEnabled = false;
+ warnings.push_back(
+ "stack tracking option requires sampling and turned off");
+ }
}
if (CollectionMethod == CollectionMethod::None)
{
warnings.push_back("stack tracking is memory tracing option");
}
+
+ if (GcAllocTableTraceEnabled)
+ {
+ warnings.push_back(
+ "GC allocations table tracing is memory tracing option");
+ }
}
return warnings;
bool MemoryTraceEnabled;
bool StackTrackingEnabled;
+ bool GcAllocTableTraceEnabled;
//
// Validation and verification.
}
catch (const std::exception &e)
{
+ this->ownerClass = nullptr;
hr = profiler.HandleException(e);
}
}
catch (const std::exception &e)
{
+ this->ownerClass = nullptr;
hr = profiler.HandleException(e);
}
bool isNamePrinted;
private:
-
static void ParseElementType(
const Profiler &profiler,
IMetaDataImport *pMDImport,
--- /dev/null
+#include <exception>
+#include <stdexcept>
+
+#include "profiler.h"
+#include "classstorage.h"
+#include "classinfo.h"
+#include "objectinfo.h"
+
+__forceinline HRESULT ObjectInfo::InitializeType(
+ const Profiler &profiler,
+ ClassStorage &storage,
+ const ProfilerInfo &info) noexcept
+{
+ HRESULT hr = S_OK;
+
+ try
+ {
+ ClassID classId;
+ hr = info.v1()->GetClassFromObject(this->id, &classId);
+ if (FAILED(hr))
+ {
+ throw HresultException(
+ "ObjectInfo::InitializeType(): GetClassFromObject()", hr
+ );
+ }
+
+ this->type = &storage.Place(classId).first;
+ hr = this->type->Initialize(profiler, storage);
+ }
+ catch (const std::exception &e)
+ {
+ this->type = nullptr;
+ hr = profiler.HandleException(e);
+ }
+
+ return hr;
+}
+
+__forceinline HRESULT ObjectInfo::InitializeTypeFromClassId(
+ const Profiler &profiler,
+ ClassStorage &storage,
+ ClassID classId) noexcept
+{
+ HRESULT hr = S_OK;
+
+ try
+ {
+ this->type = &storage.Place(classId).first;
+ hr = this->type->Initialize(profiler, storage);
+ }
+ catch (const std::exception &e)
+ {
+ this->type = nullptr;
+ hr = profiler.HandleException(e);
+ }
+
+ return hr;
+}
+
+__forceinline HRESULT ObjectInfo::InitializeSize(
+ const Profiler &profiler,
+ const ProfilerInfo &info) noexcept
+{
+ HRESULT hr = S_OK;
+
+ try
+ {
+ if (info.version() >= 4)
+ {
+ hr = info.v4()->GetObjectSize2(this->id, &this->size);
+ }
+ else
+ {
+ ULONG size = 0;
+ hr = info.v1()->GetObjectSize(this->id, &size);
+ this->size = size;
+ }
+ if (FAILED(hr))
+ {
+ throw HresultException(
+ "ObjectInfo::Initialize(): GetObjectSize()", hr);
+ }
+ }
+ catch (const std::exception &e)
+ {
+ this->size = 0;
+ hr = profiler.HandleException(e);
+ }
+
+ return hr;
+}
+
+HRESULT ObjectInfo::Initialize(
+ const Profiler &profiler,
+ ClassStorage &storage) noexcept
+{
+ HRESULT hrReturn = S_OK;
+ HRESULT hr;
+
+ if (this->isInitialized)
+ {
+ return hrReturn;
+ }
+
+ _ASSERTE(this->id != 0);
+ const ProfilerInfo &info = profiler.GetProfilerInfo();
+
+ hr = this->InitializeType(profiler, storage, info);
+ if (FAILED(hr) && SUCCEEDED(hrReturn))
+ {
+ hrReturn = hr;
+ }
+
+ hr = this->InitializeSize(profiler, info);
+ if (FAILED(hr) && SUCCEEDED(hrReturn))
+ {
+ hrReturn = hr;
+ }
+
+ this->isInitialized = true;
+ return hrReturn;
+}
+
+HRESULT ObjectInfo::Initialize(
+ const Profiler &profiler,
+ ClassStorage &storage,
+ ClassID classId) noexcept
+{
+ HRESULT hrReturn = S_OK;
+ HRESULT hr;
+
+ if (this->isInitialized)
+ {
+ return hrReturn;
+ }
+
+ _ASSERTE(this->id != 0);
+ const ProfilerInfo &info = profiler.GetProfilerInfo();
+
+ hr = this->InitializeTypeFromClassId(profiler, storage, classId);
+ if (FAILED(hr) && SUCCEEDED(hrReturn))
+ {
+ hrReturn = hr;
+ }
+
+ hr = this->InitializeSize(profiler, info);
+ if (FAILED(hr) && SUCCEEDED(hrReturn))
+ {
+ hrReturn = hr;
+ }
+
+ this->isInitialized = true;
+ return hrReturn;
+}
--- /dev/null
+#ifndef _OBJECT_INFO_H_
+#define _OBJECT_INFO_H_
+
+#include <cor.h>
+#include <corhdr.h>
+#include <corprof.h>
+
+class Profiler;
+
+class ProfilerInfo;
+
+class ClassStorage;
+
+struct ObjectInfo
+{
+ ObjectID id;
+ SIZE_T size;
+ ClassInfo* type;
+ bool isInitialized;
+
+private:
+ HRESULT InitializeType(
+ const Profiler &profiler,
+ ClassStorage &storage,
+ const ProfilerInfo &info) noexcept;
+
+ HRESULT InitializeTypeFromClassId(
+ const Profiler &profiler,
+ ClassStorage &storage,
+ ClassID classId) noexcept;
+
+ HRESULT InitializeSize(
+ const Profiler &profiler,
+ const ProfilerInfo &info) noexcept;
+
+public:
+ HRESULT Initialize(
+ const Profiler &profiler,
+ ClassStorage &storage) noexcept;
+
+ HRESULT Initialize(
+ const Profiler &profiler,
+ ClassStorage &storage,
+ ClassID classId) noexcept;
+};
+
+#endif // _OBJECT_INFO_H_
}
}
-#ifdef _DEBUG
-
#include <utility>
class Log
{
private:
+ template<LogLevel L>
class LogLine
{
public:
- LogLine(LogLevel level, std::ostream *stream = nullptr)
+ LogLine(std::ostream *stream = nullptr)
: m_stream(stream)
{
if (m_stream)
{
- *m_stream << "[" << LogLevelName(level) << "]\t";
+ *m_stream << "[" << LogLevelName(L) << "]\t";
}
}
std::ostream *m_stream;
};
+ template<>
+ class LogLine<LogLevel::None>
+ {
+ public:
+ LogLine() = default;
+
+ LogLine(const LogLine&) = delete;
+
+ LogLine &operator=(const LogLine&) = delete;
+
+ LogLine(LogLine &&other) = default;
+
+ LogLine &operator=(LogLine&&) = delete;
+
+ ~LogLine() = default;
+
+ template<typename T>
+ LogLine &operator<<(T value)
+ {
+ return *this;
+ }
+ };
+
public:
Log(LogLevel level, const std::string &filename)
: m_level(level)
m_stream->exceptions(except);
}
- LogLine Fatal()
+ LogLine<LogLevel::Fatal> Fatal()
{
return DoLog<LogLevel::Fatal>();
}
- LogLine Error()
+ LogLine<LogLevel::Error> Error()
{
return DoLog<LogLevel::Error>();
}
- LogLine Warn()
+ LogLine<LogLevel::Warn> Warn()
{
return DoLog<LogLevel::Warn>();
}
- LogLine Info()
+ LogLine<LogLevel::Info> Info()
{
return DoLog<LogLevel::Info>();
}
- LogLine Debug()
+#ifdef NDEBUG
+ LogLine<LogLevel::None> Debug()
+ {
+ return LogLine<LogLevel::None>();
+ }
+
+ LogLine<LogLevel::None> Trace()
+ {
+ return LogLine<LogLevel::None>();
+ }
+#else
+ LogLine<LogLevel::Debug> Debug()
{
return DoLog<LogLevel::Debug>();
}
- LogLine Trace()
+ LogLine<LogLevel::Trace> Trace()
{
return DoLog<LogLevel::Trace>();
}
+#endif // NDEBUG
private:
LogLevel m_level;
bool m_stream_owner;
template<LogLevel L>
- LogLine DoLog()
+ LogLine<L> DoLog()
{
// With RVO optimization LogLine destructor will be called only once.
// Otherwise with overloaded move constructor only last destructor call
// will print std::endl.
if (m_level >= L)
{
- return LogLine(L, m_stream);
+ return LogLine<L>(m_stream);
}
else
{
- return LogLine(L);
- }
- }
-};
-
-#else // !_DEBUG
-
-class Log
-{
-private:
- class LogLine
- {
- public:
- LogLine() {}
-
- LogLine(const LogLine&) = delete;
-
- LogLine &operator=(const LogLine&) = delete;
-
- LogLine(LogLine&&) = default;
-
- LogLine &operator=(LogLine&&) = delete;
-
- template<typename T>
- LogLine &operator<<(T value)
- {
- return *this;
+ return LogLine<L>();
}
- };
-
-public:
- Log(LogLevel, const std::string&) {}
-
- explicit Log(const std::string&) {}
-
- Log(LogLevel, std::ostream&) {}
-
- explicit Log(LogLevel) {}
-
- explicit Log(std::ostream&) {}
-
- Log() {}
-
- Log(const Log&) = delete;
-
- Log &operator=(const Log&) = delete;
-
- Log(Log&&) = default;
-
- Log &operator=(Log&&) = default;
-
- void swap(Log &other) {}
-
- std::ostream::iostate exceptions() const
- {
- return std::ostream::goodbit;
- }
-
- void exceptions(std::ostream::iostate except) {}
-
- LogLine Fatal()
- {
- return LogLine();
- }
-
- LogLine Error()
- {
- return LogLine();
- }
-
- LogLine Warn()
- {
- return LogLine();
- }
-
- LogLine Info()
- {
- return LogLine();
- }
-
- LogLine Debug()
- {
- return LogLine();
- }
-
- LogLine Trace()
- {
- return LogLine();
}
};
-#endif // !_DEBUG
-
#endif // _LOG_H_
#include "localtime.h"
#include "profilermanager.h"
+
#include "profiler.h"
+#include "commontrace.h"
+#include "cputrace.h"
+#include "executiontrace.h"
+#include "memorytrace.h"
// static
HRESULT Profiler::CreateObject(
{
}
-Log &Profiler::LOG() const noexcept
-{
- return const_cast<Log&>(m_logger);
-}
-
-ITraceLog &Profiler::TRACE() const noexcept
-{
- // NOTE: default-constructed TraceLog object should not be used for output!
- _ASSERTE(m_traceLog != nullptr);
- return const_cast<ITraceLog&>(*m_traceLog);
-}
-
-DWORD Profiler::GetTickCountFromInit() const noexcept
-{
- return GetTickCount() - m_firstTickCount;
-}
-
HRESULT Profiler::HandleException(const std::exception &e) const noexcept
{
// Find type of exception.
this->HandleException(HresultException(what_arg, hr));
}
-ProfilerConfig &Profiler::GetConfig() noexcept
-{
- return m_profConfig;
-}
-
-const ProfilerInfo &Profiler::GetProfilerInfo() const noexcept
-{
- return m_info;
-}
-
-CommonTrace &Profiler::GetCommonTrace() noexcept
-{
- return m_commonTrace;
-}
-
-CpuTrace &Profiler::GetCpuTrace() noexcept
-{
- return m_cpuTrace;
-}
-
-ExecutionTrace &Profiler::GetExecutionTrace() noexcept
-{
- return m_executionTrace;
-}
-
-MemoryTrace &Profiler::GetMemoryTrace() noexcept
-{
- return m_memoryTrace;
-}
-
void Profiler::SetupLogging(LoggerConfig &config)
{
if (config.OutputStream == LoggerOutputStream::Stdout)
ObjectID objectRefIds[])
{
LOG().Trace() << "ObjectReferences()";
- return S_OK;
+
+ HRESULT hr;
+ hr = m_memoryTrace.ObjectReferences(
+ objectId, classId, cObjectRefs, objectRefIds);
+ // NOTE: for more then one callback handler check HR and use S_OK
+ // if at least one handler returned S_OK to continue callback calls.
+
+ return hr;
}
HRESULT STDMETHODCALLTYPE Profiler::RootReferences(
return S_OK;
}
-
HRESULT STDMETHODCALLTYPE Profiler::GarbageCollectionStarted(
int cGenerations,
BOOL generationCollected[],
COR_PRF_GC_REASON reason)
{
LOG().Trace() << "GarbageCollectionStarted()";
- return S_OK;
+
+ HRESULT hr;
+ hr = m_memoryTrace.GarbageCollectionStarted(
+ cGenerations, generationCollected, reason);
+
+ return hr;
}
HRESULT STDMETHODCALLTYPE Profiler::SurvivingReferences(
HRESULT STDMETHODCALLTYPE Profiler::GarbageCollectionFinished()
{
LOG().Trace() << "GarbageCollectionFinished()";
+
+ HRESULT hr;
+ hr = m_memoryTrace.GarbageCollectionFinished();
+
return S_OK;
}
public:
// Returns mutable reference to the Logger even for a constant
// reference to the Profiler.
- Log &LOG() const noexcept;
+ Log &LOG() const noexcept
+ {
+ return m_logger;
+ }
// Returns mutable reference to the TraceLog even for a constant
// reference to the Profiler.
- ITraceLog &TRACE() const noexcept;
+ ITraceLog &TRACE() const noexcept
+ {
+ // NOTE: default-constructed TraceLog object should not be used for output!
+ _ASSERTE(m_traceLog != nullptr);
+ return *m_traceLog;
+ }
// Retrieves the number of milliseconds that have elapsed since the Profiler
// was initialized.
- DWORD GetTickCountFromInit() const noexcept;
+ DWORD GetTickCountFromInit() const noexcept
+ {
+ return GetTickCount() - m_firstTickCount;
+ }
// Check type of the exception, send corresponding information to the log
// and return HRESULT related to this exception.
// Simple Getters.
//
- ProfilerConfig &GetConfig() noexcept;
+ const ProfilerConfig &GetConfig() const noexcept
+ {
+ return m_profConfig;
+ }
- const ProfilerInfo &GetProfilerInfo() const noexcept;
+ const ProfilerInfo &GetProfilerInfo() const noexcept
+ {
+ return m_info;
+ }
- CommonTrace &GetCommonTrace() noexcept;
+ CommonTrace &GetCommonTrace() const noexcept
+ {
+ return m_commonTrace;
+ }
- CpuTrace &GetCpuTrace() noexcept;
+ CpuTrace &GetCpuTrace() const noexcept
+ {
+ return m_cpuTrace;
+ }
- ExecutionTrace &GetExecutionTrace() noexcept;
+ ExecutionTrace &GetExecutionTrace() const noexcept
+ {
+ return m_executionTrace;
+ }
- MemoryTrace &GetMemoryTrace() noexcept;
+ MemoryTrace &GetMemoryTrace() const noexcept
+ {
+ return m_memoryTrace;
+ }
private:
//
private:
LONG m_cRef;
- Log m_logger;
+ mutable Log m_logger;
BOOL m_initialized;
BOOL m_shutdowned;
ProfilerConfig m_profConfig;
ProfilerInfo m_info;
- std::unique_ptr<ITraceLog> m_traceLog;
+ mutable std::unique_ptr<ITraceLog> m_traceLog;
- CommonTrace m_commonTrace;
- CpuTrace m_cpuTrace;
- ExecutionTrace m_executionTrace;
- MemoryTrace m_memoryTrace;
+ mutable CommonTrace m_commonTrace;
+ mutable CpuTrace m_cpuTrace;
+ mutable ExecutionTrace m_executionTrace;
+ mutable MemoryTrace m_memoryTrace;
DWORD m_firstTickCount;
};
m_version = 0;
}
}
-
-unsigned int ProfilerInfo::version() const noexcept
-{
- return m_version;
-}
-
-ICorProfilerInfo *ProfilerInfo::v1() const noexcept
-{
- _ASSERTE(m_version >= 1);
- return m_pProfilerInfo;
-}
-
-ICorProfilerInfo2 *ProfilerInfo::v2() const noexcept
-{
- _ASSERTE(m_version >= 2);
- return m_pProfilerInfo2;
-}
-
-ICorProfilerInfo3 *ProfilerInfo::v3() const noexcept
-{
- _ASSERTE(m_version >= 3);
- return m_pProfilerInfo3;
-}
-
-ICorProfilerInfo4 *ProfilerInfo::v4() const noexcept
-{
- _ASSERTE(m_version >= 4);
- return m_pProfilerInfo4;
-}
-
-ICorProfilerInfo5 *ProfilerInfo::v5() const noexcept
-{
- _ASSERTE(m_version >= 5);
- return m_pProfilerInfo5;
-}
-
-ICorProfilerInfo6 *ProfilerInfo::v6() const noexcept
-{
- _ASSERTE(m_version >= 6);
- return m_pProfilerInfo6;
-}
-
-ICorProfilerInfo7 *ProfilerInfo::v7() const noexcept
-{
- _ASSERTE(m_version >= 7);
- return m_pProfilerInfo7;
-}
// Get version of the Profiler Info API. Zero value means that no API
// versions is supported.
- unsigned int version() const noexcept;
+ unsigned int version() const noexcept
+ {
+ return m_version;
+ }
//
// These methods provide access to a specific version of the Profiler Info
// interface. You should be sure that the requested version is supported.
// Requesting of unsupported interface version invokes undefined behavior.
//
- ICorProfilerInfo *v1() const noexcept;
- ICorProfilerInfo2 *v2() const noexcept;
- ICorProfilerInfo3 *v3() const noexcept;
- ICorProfilerInfo4 *v4() const noexcept;
- ICorProfilerInfo5 *v5() const noexcept;
- ICorProfilerInfo6 *v6() const noexcept;
- ICorProfilerInfo7 *v7() const noexcept;
+ ICorProfilerInfo *v1() const noexcept
+ {
+ _ASSERTE(m_version >= 1);
+ return m_pProfilerInfo;
+ }
+
+ ICorProfilerInfo2 *v2() const noexcept
+ {
+ _ASSERTE(m_version >= 2);
+ return m_pProfilerInfo2;
+ }
+
+ ICorProfilerInfo3 *v3() const noexcept
+ {
+ _ASSERTE(m_version >= 3);
+ return m_pProfilerInfo3;
+ }
+
+ ICorProfilerInfo4 *v4() const noexcept
+ {
+ _ASSERTE(m_version >= 4);
+ return m_pProfilerInfo4;
+ }
+
+ ICorProfilerInfo5 *v5() const noexcept
+ {
+ _ASSERTE(m_version >= 5);
+ return m_pProfilerInfo5;
+ }
+
+ ICorProfilerInfo6 *v6() const noexcept
+ {
+ _ASSERTE(m_version >= 6);
+ return m_pProfilerInfo6;
+ }
+
+ ICorProfilerInfo7 *v7() const noexcept
+ {
+ _ASSERTE(m_version >= 7);
+ return m_pProfilerInfo7;
+ }
private:
// Pointers to the implementation of the ProfilerInfo interface(s).
+++ /dev/null
-#include "profiler.h"
-#include "profilerinfo.h"
-#include "basetrace.h"
-
-BaseTrace::BaseTrace(Profiler &profiler)
- : m_disabled(true)
- , m_profiler(profiler)
- , m_info(profiler.GetProfilerInfo())
-{
-}
-
-BaseTrace::~BaseTrace()
-{
-}
-
-Log &BaseTrace::LOG() const noexcept
-{
- return m_profiler.LOG();
-}
-
-ITraceLog &BaseTrace::TRACE() const noexcept
-{
- return m_profiler.TRACE();
-}
-
-bool BaseTrace::IsEnabled() const noexcept
-{
- return !m_disabled;
-}
class ProfilerInfo;
+struct ProfilerConfig;
+
class Log;
class ITraceLog;
class BaseTrace
{
protected:
- BaseTrace(Profiler &profiler);
+ inline BaseTrace(Profiler &profiler);
- ~BaseTrace();
+ ~BaseTrace()
+ {}
- Log &LOG() const noexcept;
+ inline Log &LOG() const noexcept;
- ITraceLog &TRACE() const noexcept;
+ inline ITraceLog &TRACE() const noexcept;
public:
- bool IsEnabled() const noexcept;
+ bool IsEnabled() const noexcept
+ {
+ return !m_disabled;
+ }
protected:
bool m_disabled;
#include <winerror.h>
-#include "profiler.h"
#include "intervalsplitter.h"
#include "commontrace.h"
+#include "traceinlines.h"
+
#define CONTROL_SIGNAL_MIN (SIGRTMIN + 4)
#define LOG_SIGNAL (CONTROL_SIGNAL_MIN + 0)
#define LOG_SIGNAL_STOP (CONTROL_SIGNAL_MIN + 1)
// Line Tracing.
//
-//#if !defined(_TARGET_ARM_) && !defined(_TARGET_X86_)
+#if 0
if (config.LineTraceEnabled)
{
config.LineTraceEnabled = false;
LOG().Warn() <<
"Line tracing currently is not supported at this platform";
}
-//#endif // _TARGET_ARM_ or _TARGET_X86_
+#endif
//
// Initializing thread local storage.
}
}
-bool CommonTrace::IsSamplingSuspended() const noexcept
-{
- return m_samplingSuspended;
-}
-
HRESULT CommonTrace::AppDomainCreationFinished(
AppDomainID appDomainId,
HRESULT hrStatus) noexcept
return hr;
}
-
-auto CommonTrace::GetThreadStorage() ->
- decltype(m_threadStorage.lock())
-{
- return m_threadStorage.lock();
-}
-
-auto CommonTrace::GetThreadStorage() const ->
- decltype(m_threadStorage.lock_shared())
-{
- return m_threadStorage.lock_shared();
-}
-
-auto CommonTrace::GetClassStorage() ->
- decltype(m_classStorage.lock())
-{
- return m_classStorage.lock();
-}
-
-auto CommonTrace::GetClassStorage() const ->
- decltype(m_classStorage.lock_shared())
-{
- return m_classStorage.lock_shared();
-}
#include <thread>
-#ifdef _TARGET_AMD64_
-#include <future>
-#endif // _TARGET_AMD64_
-
#include <cor.h>
#include <corhdr.h>
#include <corprof.h>
ThreadInfo &thrInfo, void *context) noexcept;
public:
- ThreadInfo *GetThreadInfo() noexcept;
+ ThreadInfo *GetThreadInfo() noexcept; // TODO
// Simple and safety version of GetThreadInfo() that can be used in signal
// handlers.
- ThreadInfo *GetThreadInfoR() const noexcept;
+ ThreadInfo *GetThreadInfoR() const noexcept; // TODO
void InterruptSampling(
SamplingSharedState &state,
SamplingAction beforeAction = {},
SamplingAction action = {},
- SamplingAction afterAction = {}) noexcept;
+ SamplingAction afterAction = {}) noexcept; // TODO
- void HandleSample(void *context) noexcept;
+ void HandleSample(void *context) noexcept; // TODO
- void HandleSamplingPauseResume(bool shouldPause) noexcept;
+ void HandleSamplingPauseResume(bool shouldPause) noexcept; // TODO
- bool IsSamplingSuspended() const noexcept;
+ bool IsSamplingSuspended() const noexcept
+ {
+ return m_samplingSuspended;
+ }
HRESULT AppDomainCreationFinished(
AppDomainID appDomainId,
bool m_samplingSuspended;
public:
- auto GetThreadStorage() -> decltype(m_threadStorage.lock());
+ auto GetThreadStorage() -> decltype(m_threadStorage.lock())
+ {
+ return m_threadStorage.lock();
+ }
- auto GetThreadStorage() const -> decltype(m_threadStorage.lock_shared());
+ auto GetThreadStorage() const -> decltype(m_threadStorage.lock_shared())
+ {
+ return m_threadStorage.lock_shared();
+ }
- auto GetClassStorage() -> decltype(m_classStorage.lock());
+ auto GetClassStorage() -> decltype(m_classStorage.lock())
+ {
+ return m_classStorage.lock();
+ }
- auto GetClassStorage() const -> decltype(m_classStorage.lock_shared());
+ auto GetClassStorage() const -> decltype(m_classStorage.lock_shared())
+ {
+ return m_classStorage.lock_shared();
+ }
};
#endif // _COMMON_TRACE_H_
#include <errno.h>
-#include "profiler.h"
#include "cputrace.h"
+#include "traceinlines.h"
+
CpuTrace::CpuTrace(Profiler &profiler)
: BaseTrace(profiler)
, m_logThread()
+++ /dev/null
-#include <assert.h>
-
-#include "eventchannel.h"
-
-#define EVENT_CHANNEL_START_CAP 128 // Should be power of 2.
-
-using Stack = EventSummary::Stack;
-
-EventSummary::EventSummary(Stack::size_type stackSize)
- : ticks(0)
- , count(0)
- , matchPrefixSize(stackSize)
- , stackSize(stackSize)
- , ipIsChanged(false)
- , ip(0)
- , newFrames()
-{}
-
-bool EventSummary::HasStackSample() const noexcept
-{
- return count > 0
- || matchPrefixSize != stackSize
- || ipIsChanged
- || newFrames.size() > 0;
-}
-
-bool EventSummary::HasAllocSample() const noexcept
-{
- return allocTable.size() > 0;
-}
-
-EventChannel::EventChannel()
- : m_stack()
- , m_currentState()
- , m_buffer(EVENT_CHANNEL_START_CAP)
- , m_mutex()
- , m_bufferCapacityIncreaseIsPlanned(false)
-{}
-
-void EventChannel::IncreaseBufferCapacity()
-{
- m_buffer.reserve(m_buffer.capacity() * 2);
- m_bufferCapacityIncreaseIsPlanned = false;
-}
-
-bool EventChannel::EnsureBufferCapacity(ChanCanRealloc canRealloc)
-{
- bool isBufferNoSpace = m_buffer.size() == m_buffer.capacity();
- Stack::difference_type needStackSize =
- m_stack.size() - m_currentState.matchPrefixSize;
- bool isStackNoSpace = m_currentState.newFrames.capacity() < needStackSize;
- switch (canRealloc)
- {
- case ChanCanRealloc::NO:
- if (isBufferNoSpace)
- {
- this->PlanToIncreaseBufferCapacity();
- }
- assert(!isStackNoSpace);
- return !isBufferNoSpace;
-
- case ChanCanRealloc::YES:
- if (isBufferNoSpace || m_bufferCapacityIncreaseIsPlanned)
- {
- std::lock_guard<decltype(m_mutex)> lock(m_mutex);
- this->IncreaseBufferCapacity();
- }
- if (isStackNoSpace)
- {
- m_currentState.newFrames.reserve(needStackSize);
- }
- assert(m_buffer.capacity() != m_buffer.size());
- assert(m_currentState.newFrames.capacity() >= needStackSize);
- return true;
- }
-}
-
-void EventChannel::Push(const FunctionInfo &funcInfo) noexcept
-{
- assert(m_currentState.matchPrefixSize <= m_stack.size());
- m_stack.push_back(Frame{&funcInfo, 0});
-
- // XXX: exception in this call will terminate process!
- this->EnsureBufferCapacity(); // Perform planned reallocation.
-}
-
-void EventChannel::Pop() noexcept
-{
- assert(!m_stack.empty());
- assert(m_currentState.matchPrefixSize <= m_stack.size());
- m_stack.pop_back();
- if (m_stack.size() < m_currentState.matchPrefixSize)
- {
- m_currentState.matchPrefixSize = m_stack.size();
- m_currentState.ipIsChanged = false;
- }
-
- // XXX: exception in this call will terminate process!
- this->EnsureBufferCapacity(); // Perform planned reallocation.
-}
-
-void EventChannel::ChIP(UINT_PTR ip, size_t idxFromTop) noexcept
-{
- assert(idxFromTop < m_stack.size());
- assert(m_currentState.matchPrefixSize <= m_stack.size());
- assert(
- m_stack.size() - idxFromTop >= m_currentState.matchPrefixSize
- );
-
- Frame &frame = const_cast<Frame&>(this->GetFrameFromTop(idxFromTop));
- size_t frameIdx = m_stack.size() - idxFromTop - 1;
- assert(&m_stack[frameIdx] == &frame);
- if (frame.ip != ip)
- {
- if (frameIdx + 1 == m_currentState.matchPrefixSize)
- {
- m_currentState.ipIsChanged = true;
- }
- frame.ip = ip;
- }
-
- // XXX: exception in this call will terminate process!
- this->EnsureBufferCapacity(); // Perform planned reallocation.
-}
-
-void EventChannel::Allocation(
- ClassInfo &classInfo, SIZE_T size, UINT_PTR ip) noexcept
-{
- AllocInfo &allocInfo =
- m_currentState.allocTable[classInfo.internalId.id][ip];
- allocInfo.allocCount++;
- allocInfo.memSize += size;
-}
-
-bool EventChannel::Sample(
- DWORD ticks, ULONG count, ChanCanRealloc canRealloc) noexcept
-{
- assert(m_currentState.matchPrefixSize <= m_stack.size());
- assert(m_currentState.matchPrefixSize <= m_currentState.stackSize);
- assert(!m_currentState.ipIsChanged || m_currentState.matchPrefixSize > 0);
-
- // XXX: exception in this call will terminate process!
- if (!this->EnsureBufferCapacity(canRealloc))
- {
- // No space for new sample.
- return false;
- }
-
- m_currentState.ticks = ticks;
- m_currentState.count = count;
-
- if (m_currentState.ipIsChanged)
- {
- m_currentState.ip = m_stack[m_currentState.matchPrefixSize - 1].ip;
- }
-
- assert(m_currentState.newFrames.size() == 0);
- m_currentState.newFrames.assign(
- m_stack.cbegin() + m_currentState.matchPrefixSize, m_stack.cend());
-
- m_buffer.push_back(std::move(m_currentState));
- m_currentState = EventSummary(m_stack.size());
-
- return true;
-}
-
-void EventChannel::PlanToIncreaseBufferCapacity() noexcept
-{
- m_bufferCapacityIncreaseIsPlanned = true;
-}
-
-Stack::size_type EventChannel::GetStackSize() const noexcept
-{
- return m_stack.size();
-}
-
-bool EventChannel::HasStackSample() const noexcept
-{
- return m_currentState.HasStackSample() ||
- m_stack.size() > m_currentState.matchPrefixSize;
-}
-
-bool EventChannel::HasAllocSample() const noexcept
-{
- return m_currentState.HasAllocSample();
-}
-
-const Frame &EventChannel::GetFrameFromTop(
- Stack::size_type idxFromTop) const noexcept
-{
- assert(idxFromTop < m_stack.size());
- return m_stack.rbegin()[idxFromTop];
-}
-
-size_t EventChannel::GetEventSummaryCount() const noexcept
-{
- return m_buffer.size();
-}
-
-const EventSummary &EventChannel::GetCurrentEventSummary() noexcept
-{
- m_mutex.lock();
- return m_buffer.front();
-}
-
-void EventChannel::NextEventSummary() noexcept
-{
- m_buffer.pop_front();
- m_mutex.unlock();
-}
#include <mutex>
#include <atomic>
+#include <assert.h>
+
#include "functioninfo.h"
-#include "classinfo.h"
+#include "objectinfo.h"
#include "ringbuffer.h"
+#define EVENT_CHANNEL_START_CAP 128 // Should be power of 2.
+
struct Frame
{
const FunctionInfo *pFuncInfo;
SIZE_T memSize = 0;
};
-typedef std::map<ULONG, std::map<UINT_PTR, AllocInfo>> AllocTable;
+typedef std::map<ULONG, AllocInfo> AllocTable;
+
+typedef std::map<ULONG, std::map<UINT_PTR, AllocInfo>> AllocIpTable;
struct EventSummary
{
typedef std::vector<Frame> Stack;
- explicit EventSummary(Stack::size_type stackSize = 0);
+ explicit EventSummary(Stack::size_type stackSize = 0)
+ // Sample
+ : ticks(0)
+ , count(0)
+ , matchPrefixSize(stackSize)
+ , stackSize(stackSize)
+ , ipIsChanged(false)
+ , ip(0)
+ , newFrames()
+ // Allocations
+ , allocIpTable()
+ {}
//
// Sample
UINT_PTR ip;
Stack newFrames;
- bool HasStackSample() const noexcept;
+ bool HasStackSample() const noexcept
+ {
+ return count > 0
+ || matchPrefixSize != stackSize
+ || ipIsChanged
+ || newFrames.size() > 0;
+ }
//
// Allocations
//
- AllocTable allocTable;
+ AllocIpTable allocIpTable;
- bool HasAllocSample() const noexcept;
+ bool HasAllocSample() const noexcept
+ {
+ return allocIpTable.size() > 0;
+ }
};
enum class ChanCanRealloc
public:
typedef EventSummary::Stack Stack;
- EventChannel();
+ EventChannel()
+ : m_stack()
+ , m_currentState()
+ , m_buffer(EVENT_CHANNEL_START_CAP)
+ , m_mutex()
+ , m_bufferCapacityIncreaseIsPlanned(false)
+ {}
private:
- void IncreaseBufferCapacity();
-
- bool EnsureBufferCapacity(ChanCanRealloc canRealloc = ChanCanRealloc::YES);
+ void IncreaseBufferCapacity()
+ {
+ m_buffer.reserve(m_buffer.capacity() * 2);
+ m_bufferCapacityIncreaseIsPlanned = false;
+ }
+
+ void PlanToIncreaseBufferCapacity() noexcept
+ {
+ m_bufferCapacityIncreaseIsPlanned = true;
+ }
+
+ bool EnsureBufferCapacity(ChanCanRealloc canRealloc = ChanCanRealloc::YES)
+ {
+ bool isBufferNoSpace = m_buffer.size() == m_buffer.capacity();
+ Stack::difference_type needStackSize =
+ m_stack.size() - m_currentState.matchPrefixSize;
+ bool isStackNoSpace = m_currentState.newFrames.capacity() < needStackSize;
+ switch (canRealloc)
+ {
+ case ChanCanRealloc::NO:
+ if (isBufferNoSpace)
+ {
+ this->PlanToIncreaseBufferCapacity();
+ }
+ assert(!isStackNoSpace);
+ return !isBufferNoSpace;
+
+ case ChanCanRealloc::YES:
+ if (isBufferNoSpace || m_bufferCapacityIncreaseIsPlanned)
+ {
+ std::lock_guard<decltype(m_mutex)> lock(m_mutex);
+ this->IncreaseBufferCapacity();
+ }
+ if (isStackNoSpace)
+ {
+ m_currentState.newFrames.reserve(needStackSize);
+ }
+ assert(m_buffer.capacity() != m_buffer.size());
+ assert(m_currentState.newFrames.capacity() >= needStackSize);
+ return true;
+ }
+ }
public:
//
// Writer methods.
//
- void Push(const FunctionInfo &funcInfo) noexcept;
-
- void Pop() noexcept;
-
- void ChIP(UINT_PTR ip, size_t idxFromTop = 0) noexcept;
-
- void Allocation(
- ClassInfo &classInfo, SIZE_T size, UINT_PTR ip = 0) noexcept;
+ void Push(const FunctionInfo &funcInfo) noexcept
+ {
+ assert(m_currentState.matchPrefixSize <= m_stack.size());
+ m_stack.push_back(Frame{&funcInfo, 0});
+
+ // XXX: exception in this call will terminate process!
+ this->EnsureBufferCapacity(); // Perform planned reallocation.
+ }
+
+ void Pop() noexcept
+ {
+ assert(!m_stack.empty());
+ assert(m_currentState.matchPrefixSize <= m_stack.size());
+ m_stack.pop_back();
+ if (m_stack.size() < m_currentState.matchPrefixSize)
+ {
+ m_currentState.matchPrefixSize = m_stack.size();
+ m_currentState.ipIsChanged = false;
+ }
+
+ // XXX: exception in this call will terminate process!
+ this->EnsureBufferCapacity(); // Perform planned reallocation.
+ }
+
+ void ChIP(UINT_PTR ip, size_t idxFromTop = 0) noexcept
+ {
+ assert(idxFromTop < m_stack.size());
+ assert(m_currentState.matchPrefixSize <= m_stack.size());
+ assert(
+ m_stack.size() - idxFromTop >= m_currentState.matchPrefixSize
+ );
+
+ Frame &frame = const_cast<Frame&>(this->GetFrameFromTop(idxFromTop));
+ size_t frameIdx = m_stack.size() - idxFromTop - 1;
+ assert(&m_stack[frameIdx] == &frame);
+ if (frame.ip != ip)
+ {
+ if (frameIdx + 1 == m_currentState.matchPrefixSize)
+ {
+ m_currentState.ipIsChanged = true;
+ }
+ frame.ip = ip;
+ }
+
+ // XXX: exception in this call will terminate process!
+ this->EnsureBufferCapacity(); // Perform planned reallocation.
+ }
+
+ void Allocation(ObjectInfo &objInfo, UINT_PTR ip = 0) noexcept
+ {
+ _ASSERTE(objInfo.type != nullptr);
+ AllocInfo &allocInfo =
+ m_currentState.allocIpTable[objInfo.type->internalId.id][ip];
+ allocInfo.allocCount++;
+ allocInfo.memSize += objInfo.size;
+ }
bool Sample(
DWORD ticks, ULONG count,
- ChanCanRealloc canRealloc = ChanCanRealloc::YES) noexcept;
-
- void PlanToIncreaseBufferCapacity() noexcept;
-
- Stack::size_type GetStackSize() const noexcept;
+ ChanCanRealloc canRealloc = ChanCanRealloc::YES) noexcept
+ {
+ assert(m_currentState.matchPrefixSize <= m_stack.size());
+ assert(m_currentState.matchPrefixSize <= m_currentState.stackSize);
+ assert(!m_currentState.ipIsChanged || m_currentState.matchPrefixSize > 0);
+
+ // XXX: exception in this call will terminate process!
+ if (!this->EnsureBufferCapacity(canRealloc))
+ {
+ // No space for new sample.
+ return false;
+ }
+
+ m_currentState.ticks = ticks;
+ m_currentState.count = count;
+
+ if (m_currentState.ipIsChanged)
+ {
+ m_currentState.ip = m_stack[m_currentState.matchPrefixSize - 1].ip;
+ }
+
+ assert(m_currentState.newFrames.size() == 0);
+ m_currentState.newFrames.assign(
+ m_stack.cbegin() + m_currentState.matchPrefixSize, m_stack.cend());
+
+ m_buffer.push_back(std::move(m_currentState));
+ m_currentState = EventSummary(m_stack.size());
+
+ return true;
+ }
+
+ Stack::size_type GetStackSize() const noexcept
+ {
+ return m_stack.size();
+ }
const Frame &GetFrameFromTop(
- Stack::size_type idxFromTop = 0) const noexcept;
-
- bool HasStackSample() const noexcept;
-
- bool HasAllocSample() const noexcept;
+ Stack::size_type idxFromTop = 0) const noexcept
+ {
+ assert(idxFromTop < m_stack.size());
+ return m_stack.rbegin()[idxFromTop];
+ }
+
+ bool HasStackSample() const noexcept
+ {
+ return m_currentState.HasStackSample() ||
+ m_stack.size() > m_currentState.matchPrefixSize;
+ }
+
+ bool HasAllocSample() const noexcept
+ {
+ return m_currentState.HasAllocSample();
+ }
//
// Reader methods.
//
- size_t GetEventSummaryCount() const noexcept;
+ size_t GetEventSummaryCount() const noexcept
+ {
+ return m_buffer.size();
+ }
// Reference only valid until next call to NextEventSummary().
- const EventSummary &GetCurrentEventSummary() noexcept;
-
- void NextEventSummary() noexcept;
+ const EventSummary &GetCurrentEventSummary() noexcept
+ {
+ m_mutex.lock();
+ return m_buffer.front();
+ }
+
+ void NextEventSummary() noexcept
+ {
+ m_buffer.pop_front();
+ m_mutex.unlock();
+ }
private:
Stack m_stack;
#include <exception>
#include <tuple>
-#include "profiler.h"
#include "executiontrace.h"
+#include "traceinlines.h"
+
EXTERN_C UINT_PTR __stdcall FunctionIDMapStub(
FunctionID funcId,
void *clientData,
// Check activation condition.
//
- if (config.ExecutionTraceEnabled || config.MemoryTraceEnabled)
+ if (config.ExecutionTraceEnabled ||
+ (config.MemoryTraceEnabled && config.StackTrackingEnabled))
{
m_disabled = false;
}
m_disabled = true;
}
-bool ExecutionTrace::IsPseudoFunction(
- const FunctionInfo &funcInfo) const noexcept
-{
- _ASSERTE(m_pUnmanagedFunctionInfo != nullptr);
- _ASSERTE(m_pUnmanagedFunctionInfo->internalId.id == 0);
- _ASSERTE(m_pJitFunctionInfo != nullptr);
- _ASSERTE(m_pJitFunctionInfo->internalId.id == 1);
-
- return funcInfo.internalId.id >= 0 && funcInfo.internalId.id <= 1;
-}
-
UINT_PTR ExecutionTrace::GetCurrentManagedIP(
ThreadInfo &thrInfo, CONTEXT *winContext) noexcept
{
}
}
-bool ExecutionTrace::NeedSample(
- ThreadInfo &thrInfo, SamplingSharedState &state) const noexcept
-{
- if (m_disabled || !m_profiler.GetConfig().ExecutionTraceEnabled)
- return false;
-
- return (thrInfo.fixTicks != state.genTicks) ||
- (
- thrInfo.eventChannel.HasStackSample() &&
- (m_profiler.GetConfig().CollectionMethod ==
- CollectionMethod::Instrumentation &&
- !m_profiler.GetCommonTrace().IsSamplingSuspended())
- );
-}
-
-HRESULT ContextToStackSnapshotContext(
- const void *context, CONTEXT *winContext) noexcept;
-
-void ExecutionTrace::PrepareSample(
- ThreadInfo &thrInfo, SamplingSharedState &state) noexcept
-{
- if (m_profiler.GetConfig().LineTraceEnabled &&
- thrInfo.eventChannel.GetStackSize() > 0)
- {
- if (state.context)
- {
- CONTEXT winContext;
- if (SUCCEEDED(ContextToStackSnapshotContext(
- state.context, &winContext)))
- {
- this->RestoreManagedIP(thrInfo, &winContext);
- }
- else
- {
- thrInfo.eventChannel.ChIP(0);
- }
- }
- else
- {
- this->RestoreManagedIP(thrInfo);
- }
- state.isIpRestored = true;
- }
-}
-
-void ExecutionTrace::AfterSample(
- ThreadInfo &thrInfo, SamplingSharedState &state) noexcept
-{
- if (state.isSampleSucceeds)
- {
- thrInfo.maxRestoreIpIdx = 0;
- }
-}
-
-HRESULT ContextToStackSnapshotContext(
- const void *context, CONTEXT *winContext) noexcept;
-
void ExecutionTrace::UpdateCallStackPush(const FunctionInfo &funcInfo) noexcept
{
SamplingSharedState state = {};
HRESULT hr = S_OK;
try
{
- this->UpdateCallStackPush(*m_pJitFunctionInfo);
+ if (m_profiler.GetConfig().LineTraceEnabled)
+ {
+ this->UpdateCallStackPush(*m_pJitFunctionInfo, 0);
+ }
+ else
+ {
+ this->UpdateCallStackPush(*m_pJitFunctionInfo);
+ }
m_functionStorage.lock()->Place(functionId);
}
catch (const std::exception &e)
if (reason == COR_PRF_TRANSITION_CALL)
{
- this->UpdateCallStackPush(*m_pUnmanagedFunctionInfo);
+ if (m_profiler.GetConfig().LineTraceEnabled)
+ {
+ this->UpdateCallStackPush(*m_pUnmanagedFunctionInfo, 0);
+ }
+ else
+ {
+ this->UpdateCallStackPush(*m_pUnmanagedFunctionInfo);
+ }
}
return S_OK;
#include <corprof.h>
#include "basetrace.h"
+#include "commontrace.h"
#include "sharedresource.h"
#include "threadinfo.h"
// #include "shared_iterator_range.h"
+HRESULT ContextToStackSnapshotContext(
+ const void *context, CONTEXT *winContext) noexcept;
+
class ExecutionTrace final : public BaseTrace
{
private:
void ProcessConfig(ProfilerConfig &config);
- bool IsPseudoFunction(const FunctionInfo &funcInfo) const noexcept;
+ __forceinline bool IsPseudoFunction(
+ const FunctionInfo &funcInfo) const noexcept;
void Shutdown() noexcept;
void RestoreManagedIP(
ThreadInfo &thrInfo, CONTEXT *winContext = nullptr) noexcept;
- bool NeedSample(
+ __forceinline bool NeedSample(
ThreadInfo &thrInfo, SamplingSharedState &state) const noexcept;
- void PrepareSample(
+ __forceinline void PrepareSample(
ThreadInfo &thrInfo, SamplingSharedState &state) noexcept;
- void AfterSample(
+ __forceinline void AfterSample(
ThreadInfo &thrInfo, SamplingSharedState &state) noexcept;
private:
-#include "profiler.h"
+#include <algorithm>
+
#include "memorytrace.h"
+#include "traceinlines.h"
+
MemoryTrace::MemoryTrace(Profiler &profiler)
: BaseTrace(profiler)
+ , m_objectTrackingSuspended(false)
+ , m_objectTrackingFailure(false)
+ , m_survivedObjects()
{
}
);
}
- // This events are common for memory tracing.
- events = events
- | COR_PRF_ENABLE_OBJECT_ALLOCATED
- | COR_PRF_MONITOR_OBJECT_ALLOCATED;
+ if (config.CollectionMethod == CollectionMethod::Instrumentation ||
+ config.CollectionMethod == CollectionMethod::Sampling)
+ {
+ // This events are required for tracking objects.
+ events = events
+ | COR_PRF_ENABLE_OBJECT_ALLOCATED
+ | COR_PRF_MONITOR_OBJECT_ALLOCATED;
+ }
+
+ if (config.GcAllocTableTraceEnabled)
+ {
+ // This events are required for GC tracing.
+ events = events | COR_PRF_MONITOR_GC;
+ }
//
// Set Event Mask.
m_disabled = true;
}
-bool MemoryTrace::NeedSample(
- ThreadInfo &thrInfo, SamplingSharedState &state) const noexcept
+HRESULT MemoryTrace::InitAllocInfoByTypes(AllocTable &allocInfoByTypes) noexcept
{
- if (m_disabled)
- return false;
-
- return thrInfo.eventChannel.HasAllocSample() &&
- (
- (thrInfo.fixTicks != state.genTicks) ||
- (m_profiler.GetConfig().CollectionMethod ==
- CollectionMethod::Instrumentation) ||
- (m_profiler.GetConfig().StackTrackingEnabled &&
- state.stackWillBeChanged)
- );
+ HRESULT hr = S_OK;
+ try
+ {
+ auto storage_lock = m_profiler.GetCommonTrace().GetClassStorage();
+ for (const auto &objectWithClass : m_survivedObjects)
+ {
+ ObjectID objectId = objectWithClass.first;
+ ClassID classId = objectWithClass.second;
+
+ ObjectInfo objInfo = {};
+ objInfo.id = objectId;
+ // NOTE: it is OK to use classId == 0 here.
+ hr = objInfo.Initialize(m_profiler, *storage_lock, classId);
+ if (FAILED(hr))
+ {
+ throw std::runtime_error(
+ "MemoryTrace::GetAllocInfoByTypes(): "
+ "Failed to initialize object info"
+ );
+ }
+
+ _ASSERTE(objInfo.type != nullptr);
+ if (!objInfo.type->isNamePrinted)
+ {
+ TRACE().DumpClassName(*objInfo.type);
+ objInfo.type->isNamePrinted = true;
+ }
+
+ AllocInfo &allocInfo =
+ allocInfoByTypes[objInfo.type->internalId.id];
+ allocInfo.allocCount++;
+ allocInfo.memSize += objInfo.size;
+ }
+ }
+ catch (const std::exception &e)
+ {
+ hr = m_profiler.HandleException(e);
+ }
+
+ return hr;
}
HRESULT MemoryTrace::ObjectAllocated(
HRESULT hr = S_OK;
try
{
- auto storage_lock = m_profiler.GetCommonTrace().GetClassStorage();
- ClassInfo &classInfo = storage_lock->Place(classId).first;
- classInfo.Initialize(m_profiler, *storage_lock);
- if (!classInfo.isNamePrinted)
- {
- TRACE().DumpClassName(classInfo);
- classInfo.isNamePrinted = true;
- }
-
- SIZE_T objectSize = 0;
- if (m_info.version() >= 4)
+ ObjectInfo objInfo = {};
+ objInfo.id = objectId;
{
- hr = m_info.v4()->GetObjectSize2(objectId, &objectSize);
+ auto storage_lock = m_profiler.GetCommonTrace().GetClassStorage();
+ hr = objInfo.Initialize(m_profiler, *storage_lock, classId);
}
- else
+ if (FAILED(hr))
{
- ULONG size = 0;
- hr = m_info.v1()->GetObjectSize(objectId, &size);
- objectSize = size;
+ throw std::runtime_error(
+ "MemoryTrace::ObjectAllocated(): "
+ "Failed to initialize object info"
+ );
}
- if (FAILED(hr))
+
+ _ASSERTE(objInfo.type != nullptr);
+ if (!objInfo.type->isNamePrinted)
{
- throw HresultException(
- "MemoryTrace::ObjectAllocated(): GetObjectSize()", hr);
+ TRACE().DumpClassName(*objInfo.type);
+ objInfo.type->isNamePrinted = true;
}
UINT_PTR ip = 0;
SamplingSharedState state = {};
m_profiler.GetCommonTrace().InterruptSampling(
state,
- [this, &classInfo, &objectSize, &ip]
+ [this, &objInfo, &ip]
(ThreadInfo &thrInfo, SamplingSharedState &state)
{
EventChannel &channel = thrInfo.eventChannel;
GetCurrentManagedIP(thrInfo);
}
}
- channel.Allocation(classInfo, objectSize, ip);
+ channel.Allocation(objInfo, ip);
}
);
}
return hr;
}
+
+HRESULT MemoryTrace::GarbageCollectionStarted(
+ int cGenerations,
+ BOOL generationCollected[],
+ COR_PRF_GC_REASON reason) noexcept
+{
+ if (m_disabled)
+ return S_OK;
+
+ _ASSERTE(m_survivedObjects.empty());
+
+ // Reset object tracking transaction members.
+ // Caching value of IsSamplingSuspended() for transaction.
+ m_objectTrackingSuspended =
+ m_profiler.GetCommonTrace().IsSamplingSuspended();
+ m_objectTrackingFailure = false;
+
+ return S_OK;
+}
+
+HRESULT MemoryTrace::ObjectReferences(
+ ObjectID objectId,
+ ClassID classId,
+ ULONG cObjectRefs,
+ ObjectID objectRefIds[]) noexcept
+{
+ // Try to cancel callbacks if we don't need them.
+
+ if (m_disabled)
+ return E_FAIL;
+
+ if (m_objectTrackingSuspended)
+ return E_FAIL;
+
+ if (m_objectTrackingFailure)
+ return E_FAIL;
+
+ HRESULT hr = E_FAIL;
+
+ //
+ // GC Allocations Table Tracing
+ //
+
+ if (m_profiler.GetConfig().GcAllocTableTraceEnabled)
+ {
+ hr = S_OK;
+ try
+ {
+ // NOTE: it is unsafe to use objectId references here so we save
+ // them for handling in GarbageCollectionFinished() callback.
+
+ auto objectWithClass = std::make_pair(objectId, classId);
+ // Ensure there is no copy of objectWithClass already in vector.
+ // NOTE: only in Debug build.
+ _ASSERTE(
+ std::find_if(
+ m_survivedObjects.begin(),
+ m_survivedObjects.end(),
+ [&objectWithClass]
+ (decltype(m_survivedObjects)::const_reference e)
+ { return e.first == objectWithClass.first; }
+ ) == m_survivedObjects.end()
+ );
+ m_survivedObjects.push_back(objectWithClass);
+ }
+ catch (const std::exception &e)
+ {
+ // Can't update m_survivedObjects, so cancel transaction.
+ m_objectTrackingFailure = true;
+ m_survivedObjects.clear();
+ hr = m_profiler.HandleException(e);
+ }
+ }
+
+ return hr;
+}
+
+HRESULT MemoryTrace::GarbageCollectionFinished() noexcept
+{
+ if (m_disabled)
+ return S_OK;
+
+ // Check transaction is not suspended.
+ if (m_objectTrackingSuspended)
+ return S_OK;
+
+ // Check transaction is vaid.
+ if (m_objectTrackingFailure)
+ return S_OK;
+
+ HRESULT hr = S_OK;
+
+ //
+ // GC Allocations Table Tracing
+ //
+
+ if (m_profiler.GetConfig().GcAllocTableTraceEnabled)
+ {
+ try
+ {
+ DWORD ticks = m_profiler.GetTickCountFromInit();
+ AllocTable allocInfoByTypes;
+ hr = this->InitAllocInfoByTypes(allocInfoByTypes);
+ if (SUCCEEDED(hr))
+ {
+ // TODO: force other threads to do their memory samples
+ // in High Granularity mode.
+ TRACE().DumpGcHeapAllocTable(ticks, allocInfoByTypes);
+ }
+ }
+ catch (const std::exception &e)
+ {
+ hr = m_profiler.HandleException(e);
+ }
+
+ m_survivedObjects.clear();
+ }
+
+ return hr;
+}
#ifndef _MEMORY_TRACE_H_
#define _MEMORY_TRACE_H_
+#include <vector>
+#include <utility>
+
#include <cor.h>
#include <corhdr.h>
#include <corprof.h>
#include "basetrace.h"
+#include "commontrace.h"
class MemoryTrace : public BaseTrace
{
void Shutdown() noexcept;
- bool NeedSample(
+ __forceinline bool NeedSample(
ThreadInfo &thrInfo, SamplingSharedState &state) const noexcept;
// void PrepareSample(
// ThreadInfo &thrInfo, SamplingSharedState &state) noexcept;
private:
+ HRESULT InitAllocInfoByTypes(AllocTable &allocInfoByTypes) noexcept;
public:
//
HRESULT ObjectAllocated(
ObjectID objectId,
ClassID classId) noexcept;
+
+ HRESULT GarbageCollectionStarted(
+ int cGenerations,
+ BOOL generationCollected[],
+ COR_PRF_GC_REASON reason) noexcept;
+
+ HRESULT ObjectReferences(
+ ObjectID objectId,
+ ClassID classId,
+ ULONG cObjectRefs,
+ ObjectID objectRefIds[]) noexcept;
+
+ HRESULT GarbageCollectionFinished() noexcept;
+
+private:
+ bool m_objectTrackingSuspended;
+ bool m_objectTrackingFailure;
+ std::vector<std::pair<ObjectID, ClassID>> m_survivedObjects;
};
#endif // _MEMORY_TRACE_H_
--- /dev/null
+#include "profiler.h"
+
+//
+// BaseTrace
+//
+
+BaseTrace::BaseTrace(Profiler &profiler)
+ : m_disabled(true)
+ , m_profiler(profiler)
+ , m_info(profiler.GetProfilerInfo())
+{}
+
+Log &BaseTrace::LOG() const noexcept
+{
+ return m_profiler.LOG();
+}
+
+ITraceLog &BaseTrace::TRACE() const noexcept
+{
+ return m_profiler.TRACE();
+}
+
+//
+// ExecutionTrace
+//
+
+bool ExecutionTrace::IsPseudoFunction(
+ const FunctionInfo &funcInfo) const noexcept
+{
+ _ASSERTE(m_pUnmanagedFunctionInfo != nullptr);
+ _ASSERTE(m_pUnmanagedFunctionInfo->internalId.id == 0);
+ _ASSERTE(m_pJitFunctionInfo != nullptr);
+ _ASSERTE(m_pJitFunctionInfo->internalId.id == 1);
+
+ return funcInfo.internalId.id >= 0 && funcInfo.internalId.id <= 1;
+}
+
+bool ExecutionTrace::NeedSample(
+ ThreadInfo &thrInfo, SamplingSharedState &state) const noexcept
+{
+ if (m_disabled || !m_profiler.GetConfig().ExecutionTraceEnabled)
+ return false;
+
+ return (thrInfo.fixTicks != state.genTicks) ||
+ (
+ thrInfo.eventChannel.HasStackSample() &&
+ (m_profiler.GetConfig().CollectionMethod ==
+ CollectionMethod::Instrumentation &&
+ !m_profiler.GetCommonTrace().IsSamplingSuspended())
+ );
+}
+
+void ExecutionTrace::PrepareSample(
+ ThreadInfo &thrInfo, SamplingSharedState &state) noexcept
+{
+ if (m_profiler.GetConfig().LineTraceEnabled &&
+ thrInfo.eventChannel.GetStackSize() > 0)
+ {
+ if (state.context)
+ {
+ CONTEXT winContext;
+ if (SUCCEEDED(ContextToStackSnapshotContext(
+ state.context, &winContext)))
+ {
+ this->RestoreManagedIP(thrInfo, &winContext);
+ }
+ else
+ {
+ thrInfo.eventChannel.ChIP(0);
+ }
+ }
+ else
+ {
+ this->RestoreManagedIP(thrInfo);
+ }
+ state.isIpRestored = true;
+ }
+}
+
+void ExecutionTrace::AfterSample(
+ ThreadInfo &thrInfo, SamplingSharedState &state) noexcept
+{
+ if (state.isSampleSucceeds)
+ {
+ thrInfo.maxRestoreIpIdx = 0;
+ }
+}
+
+//
+// MemoryTrace
+//
+
+bool MemoryTrace::NeedSample(
+ ThreadInfo &thrInfo, SamplingSharedState &state) const noexcept
+{
+ if (m_disabled)
+ return false;
+
+ return thrInfo.eventChannel.HasAllocSample() &&
+ (
+ (thrInfo.fixTicks != state.genTicks) ||
+ (m_profiler.GetConfig().CollectionMethod ==
+ CollectionMethod::Instrumentation) ||
+ (m_profiler.GetConfig().StackTrackingEnabled &&
+ state.stackWillBeChanged)
+ );
+}
PAL_fprintf(
m_pStream, "sam mem 0x%08x %d", threadIid.id, summary.ticks
);
- for (const auto &classIdIpAllocInfo : summary.allocTable)
+ for (const auto &classIidIpAllocInfo : summary.allocIpTable)
{
- for (const auto &IpAllocInfo : classIdIpAllocInfo.second)
+ for (const auto &IpAllocInfo : classIidIpAllocInfo.second)
{
PAL_fprintf(
m_pStream,
IpAllocInfo.first != 0 ?
" 0x%x:%Iu:%Iu:%p" : " 0x%x:%Iu:%Iu",
- classIdIpAllocInfo.first,
+ classIidIpAllocInfo.first,
IpAllocInfo.second.allocCount,
IpAllocInfo.second.memSize,
IpAllocInfo.first
}
}
+ virtual void DumpGcHeapAllocTable(
+ DWORD ticksFromStart,
+ const AllocTable &allocInfoByTypes) override
+ {
+ std::lock_guard<std::mutex> streamLock(m_mStream);
+
+ PAL_fprintf(m_pStream, "gch alt %d", ticksFromStart);
+ for (const auto &classIidAllocInfo : allocInfoByTypes)
+ {
+ PAL_fprintf(
+ m_pStream,
+ " 0x%x:%Iu:%Iu",
+ classIidAllocInfo.first,
+ classIidAllocInfo.second.allocCount,
+ classIidAllocInfo.second.memSize
+ );
+ }
+ PAL_fprintf(m_pStream, "\n");
+ }
+
private:
PAL_FILE *m_pStream;
std::mutex m_mStream;
virtual void DumpSample(
InternalID threadIid,
const EventSummary &summary) = 0;
+
+ virtual void DumpGcHeapAllocTable(
+ DWORD ticksFromStart,
+ const AllocTable &allocInfoByTypes) = 0;
};
#endif // _TRACE_LOG_H_