0xBB, 0xAE, 0x0F, 0x02, 0x22, 0x28, 0x07, 0x1A);
MIDL_DEFINE_GUID(IID, IID_ICorProfilerCallback3, 0x4FD2ED52, 0x7731, 0x4b8d,
0x94, 0x69, 0x03, 0xD2, 0xCC, 0x30, 0x86, 0xC5);
+MIDL_DEFINE_GUID(IID, IID_ICorProfilerInfo, 0x28B5557D, 0x3F3F, 0x48b4,
+ 0x90, 0xB2, 0x5F, 0x9E, 0xEA, 0x2F, 0x6C, 0x48);
+MIDL_DEFINE_GUID(IID, IID_ICorProfilerInfo2, 0xCC0935CD, 0xA518, 0x487d,
+ 0xB0, 0xBB, 0xA9, 0x32, 0x14, 0xE6, 0x54, 0x78);
MIDL_DEFINE_GUID(IID, IID_ICorProfilerInfo3, 0xB555ED4F, 0x452A, 0x4E54, 0x8B,
0x39, 0xB5, 0x36, 0x0B, 0xAD, 0x32, 0xA0);
info->SetEventMask(
COR_PRF_MONITOR_ENTERLEAVE | COR_PRF_ENABLE_FUNCTION_ARGS |
COR_PRF_ENABLE_FUNCTION_RETVAL | COR_PRF_ENABLE_FRAME_INFO |
- COR_PRF_ENABLE_STACK_SNAPSHOT);
+ COR_PRF_ENABLE_STACK_SNAPSHOT |
+ COR_PRF_ENABLE_OBJECT_ALLOCATED | COR_PRF_MONITOR_OBJECT_ALLOCATED | COR_PRF_MONITOR_GC);
info->SetEnterLeaveFunctionHooks3WithInfo(OnFunctionEnter, OnFunctionLeave,
NULL);
info->Release();
}
HRESULT STDMETHODCALLTYPE
- Profiler::MovedReferences(ULONG cMovedObjectIDRanges,
- ObjectID oldObjectIDRangeStart[],
- ObjectID newObjectIDRangeStart[],
- ULONG cObjectIDRangeLength[]) {
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE
Profiler::ObjectAllocated(ObjectID objectId, ClassID classId) {
+
+ ICorProfilerInfo *info;
+ HRESULT hr = g_pICorProfilerInfoUnknown->QueryInterface(IID_ICorProfilerInfo,
+ (void **)&info);
+ if (hr != S_OK) {
+ assert(false && "Failed to retreive ICorProfilerInfo");
+ }
+
+ ULONG objectSize;
+
+ HRESULT hr2 = info->GetObjectSize(objectId, &objectSize);
+
+ if (hr2 != S_OK) {
+ assert (false && "Failed to get object size");
+ }
+
+ heaptrack_objectallocate((void *) objectId, objectSize);
+
return S_OK;
}
Profiler::GarbageCollectionStarted(int cGenerations,
BOOL generationCollected[],
COR_PRF_GC_REASON reason) {
+ ICorProfilerInfo2 *info;
+ HRESULT hr = g_pICorProfilerInfoUnknown->QueryInterface(IID_ICorProfilerInfo2,
+ (void **)&info);
+ if (hr != S_OK) {
+ assert(false && "failed to retreive icorprofilerinfo2");
+ }
+
+ heaptrack_startgc();
+
+ ULONG numRanges;
+
+ HRESULT hr2 = info->GetGenerationBounds(0, &numRanges, NULL);
+ if (hr2 != S_OK) {
+ assert(false && "Failed to retreive number of ranges for a generation");
+ }
+
+ COR_PRF_GC_GENERATION_RANGE *ranges = new COR_PRF_GC_GENERATION_RANGE[numRanges];
+
+ ULONG numRanges2;
+ hr2 = info->GetGenerationBounds(numRanges, &numRanges2, ranges);
+
+ if (hr2 != S_OK) {
+ assert(false && "Failed to retreive ranges for a generation");
+ }
+
+ assert(numRanges == numRanges2);
+
+ for (ULONG i = 0; i < numRanges; i++) {
+ if (generationCollected[ranges[i].generation])
+ continue;
+
+ heaptrack_gcmarksurvived((void *) ranges[i].rangeStart, (unsigned long) ranges[i].rangeLength, NULL);
+ }
+
return S_OK;
}
Profiler::SurvivingReferences(ULONG cSurvivingObjectIDRanges,
ObjectID objectIDRangeStart[],
ULONG cObjectIDRangeLength[]) {
+
+ for(ULONG i = 0; i < cSurvivingObjectIDRanges; i++) {
+ heaptrack_gcmarksurvived((void *) objectIDRangeStart[i], (unsigned long) cObjectIDRangeLength[i], NULL);
+ }
+
+ return S_OK;
+}
+
+HRESULT STDMETHODCALLTYPE
+ Profiler::MovedReferences(ULONG cMovedObjectIDRanges,
+ ObjectID oldObjectIDRangeStart[],
+ ObjectID newObjectIDRangeStart[],
+ ULONG cObjectIDRangeLength[]) {
+
+ for(ULONG i = 0; i < cMovedObjectIDRanges; i++) {
+ void *rangeMovedTo = (newObjectIDRangeStart[i] != oldObjectIDRangeStart[i]) ? (void *) newObjectIDRangeStart[i] : NULL;
+
+ heaptrack_gcmarksurvived((void *) oldObjectIDRangeStart[i], (unsigned long) cObjectIDRangeLength[i], rangeMovedTo);
+ }
+
return S_OK;
}
HRESULT STDMETHODCALLTYPE Profiler::GarbageCollectionFinished(void) {
+ heaptrack_finishgc();
+
return S_OK;
}
StackEntry *m_next;
};
+extern "C" void heaptrack_objectallocate(void *objectId, unsigned long objectSize);
+extern "C" void heaptrack_startgc();
+extern "C" void heaptrack_gcmarksurvived(void *rangeStart, unsigned long rangeLength, void *rangeMovedTo);
+extern "C" void heaptrack_finishgc();
+
#endif // STACKENTRY_H
continue;
}
info = allocationInfos[allocationIndex.index];
+ assert(!info.isManaged);
+
lastAllocationPtr = allocationIndex.index;
} else { // backwards compatibility
uint64_t ptr = 0;
cerr << "failed to parse line: " << reader.line() << endl;
continue;
}
- if (allocationInfoSet.add(info.size, info.traceIndex, &allocationIndex)) {
+ if (allocationInfoSet.add(info.size, info.traceIndex, &allocationIndex, 0)) {
allocationInfos.push_back(info);
}
pointers.addPointer(ptr, allocationIndex);
lastAllocationPtr = 0;
const auto& info = allocationInfos[allocationInfoIndex.index];
+ assert(!info.isManaged);
+
totalCost.malloc.leaked -= info.size;
if (temporary) {
++totalCost.malloc.temporary;
continue;
}
AllocationInfo info;
- if (!(reader >> info.size) || !(reader >> info.traceIndex)) {
+ if (!(reader >> info.size) || !(reader >> info.traceIndex) || !(reader >> info.isManaged)) {
cerr << "failed to parse line: " << reader.line() << endl;
continue;
}
{
uint64_t size = 0;
TraceIndex traceIndex;
+ int isManaged;
bool operator==(const AllocationInfo& rhs) const
{
- return rhs.traceIndex == traceIndex && rhs.size == size;
+ return rhs.traceIndex == traceIndex && rhs.size == size && rhs.isManaged == isManaged;
}
};
PointerMap ptrToIndex;
uint64_t lastPtr = 0;
AllocationInfoSet allocationInfos;
+ std::set<uint64_t> managedPointersSet;
+ std::set<uint64_t> gcManagedPointersSet;
+
+ bool isGCInProcess = false;
uint64_t allocations = 0;
uint64_t leakedAllocations = 0;
+ uint64_t managedAllocations = 0;
+ uint64_t leakedManagedAllocations = 0;
uint64_t temporaryAllocations = 0;
while (reader.getLine(cin)) {
const auto ipId = data.addIp(instructionPointer, is_managed);
// trace point, map current output index to parent index
fprintf(stdout, "t %zx %zx\n", ipId, parentIndex);
+ } else if (reader.mode() == '^') {
+ if (isGCInProcess) {
+ cerr << "wrong trace format (allocation during GC - according to Book of the Runtime, concurrent GC is turned off in case profiling is enabled)" << endl;
+ continue;
+ }
+
+ ++managedAllocations;
+ ++leakedManagedAllocations;
+ uint64_t size = 0;
+ TraceIndex traceId;
+ uint64_t ptr = 0;
+ if (!(reader >> traceId.index) || !(reader >> size) || !(reader >> ptr)) {
+ cerr << "failed to parse line: " << reader.line() << endl;
+ continue;
+ }
+
+ AllocationIndex index;
+ if (allocationInfos.add(size, traceId, &index, 1)) {
+ fprintf(stdout, "a %" PRIx64 " %x 1\n", size, traceId.index);
+ }
+ ptrToIndex.addPointer(ptr, index);
+ managedPointersSet.insert(ptr);
+ lastPtr = ptr;
+ fprintf(stdout, "^ %x\n", index.index);
+ } else if (reader.mode() == 'G') {
+ int isStart;
+
+ if (!(reader >> isStart) || !(isStart == 1 || isStart == 0)) {
+ cerr << "failed to parse line: " << reader.line() << endl;
+ continue;
+ }
+
+ if (isStart)
+ {
+ // GC chunk start
+ if (isGCInProcess) {
+ cerr << "wrong trace format (nested GC chunks)" << endl;
+ continue;
+ }
+
+ isGCInProcess = true;
+
+ assert (gcManagedPointersSet.size() == 0);
+ }
+ else
+ {
+ assert (!isStart);
+
+ // GC chunk end
+ if (!isGCInProcess) {
+ cerr << "wrong trace format (nested GC chunks?)" << endl;
+ continue;
+ }
+
+ isGCInProcess = false;
+
+ for (auto managedPtr : managedPointersSet) {
+ auto allocation = ptrToIndex.takePointer(managedPtr);
+
+ if (!allocation.second) {
+ cerr << "wrong trace format (unknown managed pointer) 0x" << std::hex << managedPtr << std::dec << endl;
+ continue;
+ }
+
+ fprintf(stdout, "~ %x\n", allocation.first.index);
+
+ --leakedManagedAllocations;
+ }
+
+ managedPointersSet = std::move(gcManagedPointersSet);
+
+ assert (gcManagedPointersSet.size() == 0);
+ }
+ } else if (reader.mode() == 'L') {
+ if (!isGCInProcess) {
+ cerr << "wrong trace format (range survival event when no GC is running)" << endl;
+ continue;
+ }
+
+ uint64_t rangeLength, rangeStart, rangeMovedTo;
+
+ if (!(reader >> rangeLength) || !(reader >> rangeStart) || !(reader >> rangeMovedTo)) {
+ cerr << "failed to parse line: " << reader.line() << endl;
+ continue;
+ }
+
+ uint64_t targetRangeStart = (rangeMovedTo != 0 ? rangeMovedTo : rangeStart);
+ uint64_t targetRangeEnd = targetRangeStart + rangeLength;
+
+ {
+ auto it = gcManagedPointersSet.lower_bound(targetRangeStart);
+
+ if (it != gcManagedPointersSet.end() && *it < targetRangeEnd) {
+ cerr << "wrong trace format (survival ranges are intersecting during a GC session)" << endl;
+ continue;
+ }
+ }
+
+ auto itFromBegin = managedPointersSet.lower_bound(rangeStart);
+ auto itFromEnd = managedPointersSet.lower_bound(rangeStart + rangeLength);
+
+ if (targetRangeStart == rangeStart) {
+ gcManagedPointersSet.insert(itFromBegin, itFromEnd);
+ } else {
+ for (auto itFrom = itFromBegin; itFrom != itFromEnd; ++itFrom) {
+ uint64_t source = *itFrom;
+ uint64_t target = targetRangeStart + (source - rangeStart);
+
+ assert(gcManagedPointersSet.find(target) == gcManagedPointersSet.end());
+
+ auto allocationAtTarget = ptrToIndex.takePointer(target);
+
+ if (allocationAtTarget.second) {
+ assert(managedPointersSet.find(target) != managedPointersSet.end());
+ assert(target < rangeStart || target >= rangeStart + rangeLength);
+
+ managedPointersSet.erase(target);
+
+ fprintf(stdout, "~ %x\n", allocationAtTarget.first.index);
+
+ --leakedManagedAllocations;
+ }
+
+ gcManagedPointersSet.insert(target);
+
+ auto allocation = ptrToIndex.takePointer(source);
+
+ assert(allocation.second);
+
+ ptrToIndex.addPointer(target, allocation.first);
+ }
+ }
+
+ managedPointersSet.erase(itFromBegin, itFromEnd);
+
+ for (auto ptr : managedPointersSet) {
+ if(gcManagedPointersSet.find(ptr) != gcManagedPointersSet.end()) {
+ assert(false);
+ }
+ }
+ for (auto ptr : gcManagedPointersSet) {
+ if(managedPointersSet.find(ptr) != managedPointersSet.end()) {
+ assert(false);
+ }
+ }
} else if (reader.mode() == '+') {
++allocations;
++leakedAllocations;
}
AllocationIndex index;
- if (allocationInfos.add(size, traceId, &index)) {
- fprintf(stdout, "a %" PRIx64 " %x\n", size, traceId.index);
+ if (allocationInfos.add(size, traceId, &index, 0)) {
+ fprintf(stdout, "a %" PRIx64 " %x 0\n", size, traceId.index);
}
ptrToIndex.addPointer(ptr, index);
lastPtr = ptr;
}
if (managed_name_ids.find(methodName) == managed_name_ids.end()) {
- managed_name_ids.insert(std::make_pair(methodName, 1));
+ managed_name_ids.insert(std::make_pair(methodName, 1));
} else {
int id = ++managed_name_ids[methodName];
fprintf(stderr, "heaptrack stats:\n"
"\tallocations: \t%" PRIu64 "\n"
"\tleaked allocations: \t%" PRIu64 "\n"
+ "\tmanaged allocations: \t%" PRIu64 "\n"
+ "\tmanaged leaked allocations: \t%" PRIu64 "\n"
"\ttemporary allocations:\t%" PRIu64 "\n",
- allocations, leakedAllocations, temporaryAllocations);
+ allocations, leakedAllocations, managedAllocations, leakedManagedAllocations, temporaryAllocations);
return 0;
}
if (fprintf(s_data->out, "/ %zx %" PRIxPTR "\n", alignedLength, reinterpret_cast<uintptr_t>(ptr)) < 0) {
writeError();
return;
+ }
+ }
+
+ void handleObjectAllocation(void *objectId, unsigned long objectSize, const Trace &trace)
+ {
+ if (!s_data || !s_data->out) {
+ return;
+ }
+
+ updateModuleCache();
+ const auto index = s_data->traceTree.index(trace, s_data->out);
+
+ if (fprintf(s_data->out,
+ "^ %x %lx %" PRIxPTR "\n",
+ index, objectSize, reinterpret_cast<uintptr_t>(objectId)) < 0) {
+ writeError();
+ return;
+ }
+ }
+
+ void handleStartGC()
+ {
+ if (!s_data || !s_data->out) {
+ return;
+ }
+
+ if (fprintf(s_data->out, "G 1\n") < 0) {
+ writeError();
+ return;
+ }
+ }
+
+ void handleGCSurvivedRange(void *rangeStart, unsigned long rangeLength, void *rangeMovedTo)
+ {
+ if (!s_data || !s_data->out) {
+ return;
}
+
+ if (fprintf(s_data->out,
+ "L %lx %" PRIxPTR " %" PRIxPTR "\n",
+ rangeLength, reinterpret_cast<uintptr_t>(rangeStart), reinterpret_cast<uintptr_t>(rangeMovedTo)) < 0) {
+ writeError();
+ return;
+ }
+ }
+
+ void handleFinishGC()
+ {
+ if (!s_data || !s_data->out) {
+ return;
+ }
+
+ if (fprintf(s_data->out, "G 0\n") < 0) {
+ writeError();
+ return;
+ }
}
private:
}
}
+void heaptrack_objectallocate(void *objectId, unsigned long objectSize) {
+ assert(!RecursionGuard::isActive);
+ RecursionGuard guard;
+
+ debugLog<VeryVerboseOutput>("handleObjectAllocation: %p %lu", objectId, objectSize);
+
+ Trace trace;
+ trace.fill(2);
+
+ HeapTrack heaptrack(guard);
+ heaptrack.handleObjectAllocation(objectId, objectSize, trace);
+}
+
+void heaptrack_startgc() {
+ assert(!RecursionGuard::isActive);
+ RecursionGuard guard;
+
+ debugLog<VerboseOutput>("handleStartGC");
+
+ HeapTrack heaptrack(guard);
+ heaptrack.handleStartGC();
+}
+
+void heaptrack_gcmarksurvived(void *rangeStart, unsigned long rangeLength, void *rangeMovedTo) {
+ assert(!RecursionGuard::isActive);
+ RecursionGuard guard;
+
+ debugLog<VerboseOutput>("handleGCSurvivedRange: %p %lu -> %p", rangeStart, rangeLength, rangeMovedTo);
+
+ HeapTrack heaptrack(guard);
+ heaptrack.handleGCSurvivedRange(rangeStart, rangeLength, rangeMovedTo);
+}
+
+void heaptrack_finishgc() {
+ assert(!RecursionGuard::isActive);
+ RecursionGuard guard;
+
+ debugLog<VerboseOutput>("handleFinishGC");
+
+ HeapTrack heaptrack(guard);
+ heaptrack.handleFinishGC();
+}
+
void heaptrack_invalidate_module_cache()
{
RecursionGuard guard;
uint64_t size;
TraceIndex traceIndex;
AllocationIndex allocationIndex;
+ int isManaged;
bool operator==(const IndexedAllocationInfo& rhs) const
{
- return rhs.traceIndex == traceIndex && rhs.size == size;
+ return rhs.traceIndex == traceIndex && rhs.size == size && rhs.isManaged == isManaged;
// allocationInfoIndex not compared to allow to look it up
}
};
size_t seed = 0;
boost::hash_combine(seed, info.size);
boost::hash_combine(seed, info.traceIndex.index);
+ boost::hash_combine(seed, info.isManaged);
// allocationInfoIndex not hashed to allow to look it up
return seed;
}
set.reserve(625000);
}
- bool add(uint64_t size, TraceIndex traceIndex, AllocationIndex* allocationIndex)
+ bool add(uint64_t size, TraceIndex traceIndex, AllocationIndex* allocationIndex, int isManaged)
{
allocationIndex->index = set.size();
- IndexedAllocationInfo info = {size, traceIndex, *allocationIndex};
+ IndexedAllocationInfo info = {size, traceIndex, *allocationIndex, isManaged};
auto it = set.find(info);
if (it != set.end()) {
*allocationIndex = it->allocationIndex;