Must be greater or equal to `PROF_MEMORY_MIN_SIZE_LIMIT` or will be disabled.
Default value is `0` (limit is disabled).
+ - `PROF_MEMORY_SAMPLING_ENABLE` - sampling memory profiling mode by objects allocation size.
+ Default value is `false` (disabled).
+
+ - `PROF_MEMORY_SAMPLING_INTERVAL` - sampling memory profiling interval (size) by objects allocation size.
+ Default value is `512`.
+
+ - `PROF_MEMORY_SAMPLING_RANDOMIZATION`
+ Poisson Point Process is used for randomization of points on line segment (https://en.wikipedia.org/wiki/Poisson_point_process).
+ Using it we'll place points randomly on line with all allocations, so that more points get to larger allocations
+ and less points get to smaller allocations. This randomization is needed for cases when there are lots of small
+ allocations, which might be stepped over with constant sampling interval `PROF_MEMORY_SAMPLING_INTERVAL`.
+ Default value is `true`.
+
- `PROF_STACK_DEPTH_LIMIT` - track changes of execution stack for first
`PROF_STACK_DEPTH_LIMIT` frames only. Default value is `0` (limit is disabled).
_Current limitation: `PROF_EXECUTION_TRACE` or `PROF_MEMORY_TRACE`
FetchValue("PROF_GC_TRACE_GEN", new_config.GcGenerationBoundsTraceEnabled);
FetchValue("PROF_GC_TRACE_ALT", new_config.GcAllocTableTraceEnabled);
+ FetchValue("PROF_MEMORY_SAMPLING_ENABLE", new_config.MemorySamplingEnabled);
+ FetchValue("PROF_MEMORY_SAMPLING_RANDOMIZATION", new_config.MemorySamplingIntervalRandomization);
+ FetchValue("PROF_MEMORY_SAMPLING_INTERVAL", new_config.MemorySamplingInterval);
+
// Apply changes to the current configuration.
config = new_config;
}
, MemoryMaxSizeLimit(0)
, GcGenerationBoundsTraceEnabled(false)
, GcAllocTableTraceEnabled(false)
+ , MemorySamplingEnabled(false)
+ , MemorySamplingIntervalRandomization(true)
+ , MemorySamplingInterval(512)
{}
void ProfilerConfig::Validate()
warnings.push_back(
"GC allocations table tracing is memory tracing option");
}
+
+ if (MemorySamplingEnabled)
+ {
+ warnings.push_back("memory sampling is memory tracing option");
+ }
}
if (MemoryMaxSizeLimit && MemoryMinSizeLimit > MemoryMaxSizeLimit)
unsigned long MemoryMaxSizeLimit;
bool GcGenerationBoundsTraceEnabled;
bool GcAllocTableTraceEnabled;
+ bool MemorySamplingEnabled;
+ bool MemorySamplingIntervalRandomization;
+ unsigned long MemorySamplingInterval;
//
// Validation and verification.
#include <exception>
#include <stdexcept>
+#include <random>
+#include <cmath>
#include "profiler.h"
#include "classstorage.h"
#include "classinfo.h"
#include "objectinfo.h"
+template<class T>
+class UniformRandomGenerator
+{
+ std::random_device rd;
+ std::default_random_engine generator;
+ std::uniform_real_distribution<T> distribution;
+
+public:
+
+ UniformRandomGenerator(T start, T end)
+ : generator(rd()), distribution(start, end)
+ {}
+
+ double get()
+ {
+ return distribution(generator);
+ }
+};
+
+// distance to next sample in bytes (value <= 0 means that it's time to take a sample)
+static thread_local intptr_t g_distanceToNextSample = 0;
+// random number generator with values uniformly distributed on the [0.0,1.0) interval
+static thread_local UniformRandomGenerator<double> randgen(0.0, 1.0);
+
__forceinline HRESULT ObjectInfo::InitializeType(
const Profiler &profiler,
ClassStorage &storage,
HRESULT ObjectInfo::Initialize(
const Profiler &profiler,
- ClassStorage &storage) noexcept
+ ClassStorage &storage,
+ ClassID classId,
+ bool &skipByMemoryLimit,
+ bool *skipByMemorySampling) noexcept
{
HRESULT hrReturn = S_OK;
HRESULT hr;
- if (this->isInitialized)
- {
- return hrReturn;
- }
-
_ASSERTE(this->id != 0);
const ProfilerInfo &info = profiler.GetProfilerInfo();
- hr = this->InitializeType(profiler, storage, info);
+ hr = this->InitializeSize(profiler, info);
if (FAILED(hr) && SUCCEEDED(hrReturn))
{
hrReturn = hr;
}
- hr = this->InitializeSize(profiler, info);
- if (FAILED(hr) && SUCCEEDED(hrReturn))
+ if (this->size < profiler.GetConfig().MemoryMinSizeLimit ||
+ (profiler.GetConfig().MemoryMaxSizeLimit && this->size > profiler.GetConfig().MemoryMaxSizeLimit))
{
- hrReturn = hr;
+ skipByMemoryLimit = true;
+ return hrReturn;
}
- this->isInitialized = true;
- return hrReturn;
-}
+ if (skipByMemorySampling && profiler.GetConfig().MemorySamplingEnabled)
+ {
+ // See dotnet/heaptrack PR #121 for more comments and code logic explanation.
-HRESULT ObjectInfo::Initialize(
- const Profiler &profiler,
- ClassStorage &storage,
- ClassID classId) noexcept
-{
- HRESULT hrReturn = S_OK;
- HRESULT hr;
+ g_distanceToNextSample -= (intptr_t) this->size;
- if (this->isInitialized)
- {
- return hrReturn;
- }
+ if (g_distanceToNextSample > 0)
+ {
+ *skipByMemorySampling = true;
+ return hrReturn;
+ }
- _ASSERTE(this->id != 0);
- const ProfilerInfo &info = profiler.GetProfilerInfo();
+ intptr_t samples = - g_distanceToNextSample / profiler.GetConfig().MemorySamplingInterval;
+ g_distanceToNextSample %= profiler.GetConfig().MemorySamplingInterval;
- hr = this->InitializeSize(profiler, info);
- if (FAILED(hr) && SUCCEEDED(hrReturn))
- {
- hrReturn = hr;
- }
+ do
+ {
+ intptr_t nextSamplingInterval = profiler.GetConfig().MemorySamplingInterval;
+ if (profiler.GetConfig().MemorySamplingIntervalRandomization)
+ {
+ double probability = randgen.get();
+ nextSamplingInterval = uintptr_t(- std::log(probability) * profiler.GetConfig().MemorySamplingInterval);
+ }
+
+ g_distanceToNextSample += nextSamplingInterval;
+ ++samples;
+ }
+ while (g_distanceToNextSample <= 0);
- if (this->size < profiler.GetConfig().MemoryMinSizeLimit ||
- (profiler.GetConfig().MemoryMaxSizeLimit && this->size > profiler.GetConfig().MemoryMaxSizeLimit))
- {
- return hrReturn;
+ this->size = samples * profiler.GetConfig().MemorySamplingInterval;
}
hr = this->InitializeTypeFromClassId(profiler, storage, classId);
hrReturn = hr;
}
- this->isInitialized = true;
return hrReturn;
}
ObjectID id;
SIZE_T size;
ClassInfo* type;
- bool isInitialized;
private:
HRESULT InitializeType(
const Profiler &profiler,
const ProfilerInfo &info) noexcept;
- HRESULT Initialize(
- const Profiler &profiler,
- ClassStorage &storage) noexcept;
-
public:
HRESULT Initialize(
const Profiler &profiler,
ClassStorage &storage,
- ClassID classId) noexcept;
+ ClassID classId,
+ bool &skipByMemoryLimit,
+ bool *skipByMemorySampling) noexcept;
};
#endif // _OBJECT_INFO_H_
ULONG cObjectIDRangeLength[])
{
LOG().Trace() << "MovedReferences()";
- return S_OK;
+
+ HRESULT hr;
+ hr = m_memoryTrace.MovedReferences(cMovedObjectIDRanges, oldObjectIDRangeStart,
+ newObjectIDRangeStart, cObjectIDRangeLength);
+
+ return hr;
}
HRESULT STDMETHODCALLTYPE Profiler::ObjectAllocated(
HRESULT hr = S_OK;
try
{
+ std::lock_guard<std::mutex> lock(m_sampledObjectsMutex);
+ std::unordered_map<ObjectID, SIZE_T> survivedSampledObjects;
+
auto storage_lock = m_profiler.GetCommonTrace().GetClassStorage();
for (const auto &objectWithClass : m_survivedObjects)
{
ObjectID objectId = objectWithClass.first;
+
+ SIZE_T sampledSize = 0;
+ if (m_profiler.GetConfig().MemorySamplingEnabled)
+ {
+ auto find = m_sampledObjects.find(objectId);
+ if (find == m_sampledObjects.end())
+ {
+ continue;
+ }
+ else
+ {
+ sampledSize = find->second;
+ survivedSampledObjects.emplace(std::move(*find));
+ }
+ }
+
ClassID classId = objectWithClass.second;
ObjectInfo objInfo = {};
objInfo.id = objectId;
+ bool skipByMemoryLimit = false;
// NOTE: it is OK to use classId == 0 here.
- hr = objInfo.Initialize(m_profiler, *storage_lock, classId);
+ hr = objInfo.Initialize(m_profiler, *storage_lock, classId, skipByMemoryLimit, nullptr);
if (FAILED(hr))
{
throw std::runtime_error(
);
}
- if (objInfo.size < m_profiler.GetConfig().MemoryMinSizeLimit ||
- (m_profiler.GetConfig().MemoryMaxSizeLimit && objInfo.size > m_profiler.GetConfig().MemoryMaxSizeLimit))
+ if (skipByMemoryLimit)
{
+ assert(!m_profiler.GetConfig().MemorySamplingEnabled && !sampledSize);
continue;
}
+ if (sampledSize)
+ {
+ objInfo.size = sampledSize;
+ }
+
_ASSERTE(objInfo.type != nullptr);
if (objInfo.type->needPrintLoadFinished)
{
allocInfo.allocCount++;
allocInfo.memSize += objInfo.size;
}
+
+ if (m_profiler.GetConfig().MemorySamplingEnabled)
+ {
+ m_sampledObjects = std::move(survivedSampledObjects);
+ }
}
catch (const std::exception &e)
{
{
ObjectInfo objInfo = {};
objInfo.id = objectId;
+ bool skipByMemoryLimit = false;
+ bool skipByMemorySampling = false;
{
auto storage_lock = m_profiler.GetCommonTrace().GetClassStorage();
- hr = objInfo.Initialize(m_profiler, *storage_lock, classId);
+ hr = objInfo.Initialize(m_profiler, *storage_lock, classId, skipByMemoryLimit, &skipByMemorySampling);
}
if (FAILED(hr))
{
);
}
- if (objInfo.size < m_profiler.GetConfig().MemoryMinSizeLimit ||
- (m_profiler.GetConfig().MemoryMaxSizeLimit && objInfo.size > m_profiler.GetConfig().MemoryMaxSizeLimit))
+ if (skipByMemoryLimit || skipByMemorySampling)
{
return hr;
}
+ if (m_profiler.GetConfig().MemorySamplingEnabled)
+ {
+ std::lock_guard<std::mutex> lock(m_sampledObjectsMutex);
+ m_sampledObjects.emplace(objectId, objInfo.size);
+ }
+
_ASSERTE(objInfo.type != nullptr);
if (objInfo.type->needPrintLoadFinished)
{
return hr;
}
+HRESULT MemoryTrace::MovedReferences(
+ ULONG cMovedObjectIDRanges,
+ ObjectID oldObjectIDRangeStart[],
+ ObjectID newObjectIDRangeStart[],
+ ULONG cObjectIDRangeLength[]) noexcept
+{
+ if (!m_profiler.GetConfig().MemorySamplingEnabled)
+ return S_OK;
+
+ std::lock_guard<std::mutex> lock(m_sampledObjectsMutex);
+
+ std::map<ObjectID, SIZE_T> oldObjects(m_sampledObjects.begin(), m_sampledObjects.end());
+ m_sampledObjects.clear();
+
+ for (ULONG i = 0; i < cMovedObjectIDRanges; i++)
+ {
+ auto start = oldObjects.lower_bound(oldObjectIDRangeStart[i]);
+
+ for (auto it = start; it != oldObjects.end(); )
+ {
+ // https://github.com/dotnet/runtime/blob/aef327f3b418bf5dd4f25083aff160c9cdf4b159/src/coreclr/inc/corprof.idl#L1666-L1669
+ if (oldObjectIDRangeStart[i] <= it->first && it->first < oldObjectIDRangeStart[i] + cObjectIDRangeLength[i])
+ {
+ m_sampledObjects.emplace(it->first - oldObjectIDRangeStart[i] + newObjectIDRangeStart[i], it->second);
+ it = oldObjects.erase(it);
+ continue;
+ }
+
+ break;
+ }
+ }
+
+ for (auto &object : oldObjects)
+ {
+ m_sampledObjects.emplace(std::move(object));
+ }
+ oldObjects.clear();
+
+ return S_OK;
+}
+
HRESULT MemoryTrace::GarbageCollectionStarted(
int cGenerations,
BOOL generationCollected[],
#include <vector>
#include <utility>
+#include <unordered_map>
#include <cor.h>
#include <corhdr.h>
ObjectID objectId,
ClassID classId) noexcept;
+ HRESULT MovedReferences(
+ ULONG cMovedObjectIDRanges,
+ ObjectID oldObjectIDRangeStart[],
+ ObjectID newObjectIDRangeStart[],
+ ULONG cObjectIDRangeLength[]) noexcept;
+
HRESULT GarbageCollectionStarted(
int cGenerations,
BOOL generationCollected[],
bool m_objectTrackingSuspended;
bool m_objectTrackingFailure;
std::vector<std::pair<ObjectID, ClassID>> m_survivedObjects;
+
+ std::mutex m_sampledObjectsMutex;
+ std::unordered_map<ObjectID, SIZE_T> m_sampledObjects;
};
#endif // _MEMORY_TRACE_H_
m_tracefmt.log("prf cfg", g_tls_ss).str("MemoryMinSizeLimit").config(config.MemoryMinSizeLimit).end();
m_tracefmt.log("prf cfg", g_tls_ss).str("MemoryMaxSizeLimit").config(config.MemoryMaxSizeLimit).end();
m_tracefmt.log("prf cfg", g_tls_ss).str("GcAllocTableTraceEnabled").config(config.GcAllocTableTraceEnabled).end();
+ m_tracefmt.log("prf cfg", g_tls_ss).str("MemorySamplingEnabled").config(config.MemorySamplingEnabled).end();
+ m_tracefmt.log("prf cfg", g_tls_ss).str("MemorySamplingIntervalRandomization").config(config.MemorySamplingIntervalRandomization).end();
+ m_tracefmt.log("prf cfg", g_tls_ss).str("MemorySamplingInterval").config(config.MemorySamplingInterval).end();
AddTLSDataToQueue();
}