<MicrosoftBclAsyncInterfacesVersion>1.1.0</MicrosoftBclAsyncInterfacesVersion>
<MicrosoftDiagnosticsRuntimeVersion>2.3.405304</MicrosoftDiagnosticsRuntimeVersion>
<MicrosoftDiaSymReaderNativePackageVersion>16.9.0-beta1.21055.5</MicrosoftDiaSymReaderNativePackageVersion>
- <MicrosoftDiagnosticsTracingTraceEventVersion>2.0.64</MicrosoftDiagnosticsTracingTraceEventVersion>
+ <MicrosoftDiagnosticsTracingTraceEventVersion>2.0.76</MicrosoftDiagnosticsTracingTraceEventVersion>
<!-- Use pinned version to avoid picking up latest (which doesn't support netcoreapp3.1) during source-build -->
<MicrosoftExtensionsLoggingPinnedVersion>2.1.1</MicrosoftExtensionsLoggingPinnedVersion>
<!-- dotnet-dsrouter needs a net6.0 version of logging -->
}
};
+ source.Clr.GCGenAwareStart += delegate (GenAwareBeginTraceData data)
+ {
+ m_seenStart = true;
+ m_ignoreEvents = false;
+ };
+
source.Clr.GCStart += delegate (GCStartTraceData data)
{
// If this GC is not part of a heap dump, ignore it.
}
};
-
-
source.Clr.GCStop += delegate (GCEndTraceData data)
{
if (m_ignoreEvents || data.ProcessID != m_processId)
}
};
+ source.Clr.GCGenAwareEnd += delegate (GenAwareEndTraceData data)
+ {
+ m_ignoreEvents = true;
+ if (m_nodeBlocks.Count == 0 && m_typeBlocks.Count == 0 && m_edgeBlocks.Count == 0)
+ {
+ m_log.WriteLine("Found no node events, looking for another GC");
+ m_seenStart = false;
+ return;
+ }
+ };
+
source.Clr.TypeBulkType += delegate (GCBulkTypeTraceData data)
{
// Don't check m_ignoreEvents here, as BulkType events can be emitted by other events...such as the GC allocation event.
case 3:
segment.Gen3End = end;
break;
+ case 4:
+ segment.Gen4End = end;
+ break;
default:
throw new Exception("Invalid generation in GCGenerationRangeTraceData");
}
/// </summary>
internal unsafe void ConvertHeapDataToGraph()
{
- int maxNodeCount = 10_000_000;
-
if (m_converted)
{
return;
Debug.Assert(!m_graph.IsDefined(nodeIdx));
m_graph.SetNode(nodeIdx, typeIdx, objSize, m_children);
-
- if (m_graph.NodeCount >= maxNodeCount)
- {
- doCompletionCheck = false;
- var userMessage = string.Format("Exceeded max node count {0}", maxNodeCount);
- m_log.WriteLine("[WARNING: ]", userMessage);
- break;
- }
}
if (doCompletionCheck && m_curEdgeBlock != null && m_curEdgeBlock.Count != m_curEdgeIdx)
}
}
+ if (obj < m_lastSegment.Gen4End)
+ {
+ return 4;
+ }
+
if (obj < m_lastSegment.Gen3End)
{
return 3;
#endregion
}
-public class GCHeapDumpSegment : IFastSerializable
+public class GCHeapDumpSegment : IFastSerializable, IFastSerializableVersion
{
public Address Start { get; internal set; }
public Address End { get; internal set; }
public Address Gen1End { get; internal set; }
public Address Gen2End { get; internal set; }
public Address Gen3End { get; internal set; }
+ public Address Gen4End { get; internal set; }
+
+ public int Version => 1;
+
+ public int MinimumVersionCanRead => 0;
+
+ public int MinimumReaderVersion => 1;
#region private
void IFastSerializable.ToStream(Serializer serializer)
serializer.Write((long)Gen1End);
serializer.Write((long)Gen2End);
serializer.Write((long)Gen3End);
+ serializer.Write((long)Gen4End);
}
void IFastSerializable.FromStream(Deserializer deserializer)
Gen1End = (Address)deserializer.ReadInt64();
Gen2End = (Address)deserializer.ReadInt64();
Gen3End = (Address)deserializer.ReadInt64();
+ if (deserializer.VersionBeingRead >= 1)
+ {
+ Gen4End = (Address)deserializer.ReadInt64();
+ }
}
#endregion
-}
\ No newline at end of file
+}
public class GCHeapDump : IFastSerializable, IFastSerializableVersion
{
public GCHeapDump(string inputFileName) :
- this(new Deserializer(inputFileName))
+ this(new Deserializer(inputFileName, new SerializationConfiguration() { StreamLabelWidth = StreamLabelWidth.FourBytes }))
{ }
public GCHeapDump(Stream inputStream, string streamName) :
- this(new Deserializer(inputStream, streamName))
+ this(new Deserializer(inputStream, streamName, new SerializationConfiguration() { StreamLabelWidth = StreamLabelWidth.FourBytes }))
{ }
/// <summary>
private void Write(string outputFileName)
{
Debug.Assert(MemoryGraph != null);
- var serializer = new Serializer(outputFileName, this);
+ var serializer = new Serializer(new IOStreamStreamWriter(outputFileName, config: new SerializationConfiguration() { StreamLabelWidth = StreamLabelWidth.FourBytes }), this);
serializer.Close();
}
///
/// TODO I can eliminate the need for AllowReading.
/// </summary>
- public Graph(int expectedNodeCount)
+ /// <remarks>if isVeryLargeGraph argument is true, then StreamLabels will be serialized as longs
+ /// too acommodate for the extra size of the graph's stream representation.</remarks>
+ public Graph(int expectedNodeCount, bool isVeryLargeGraph = false)
{
+ m_isVeryLargeGraph = isVeryLargeGraph;
m_expectedNodeCount = expectedNodeCount;
m_types = new GrowableArray<TypeInfo>(Math.Max(expectedNodeCount / 100, 2000));
m_nodes = new SegmentedList<StreamLabel>(SegmentSize, m_expectedNodeCount);
RootIndex = NodeIndex.Invalid;
if (m_writer == null)
{
- m_writer = new SegmentedMemoryStreamWriter(m_expectedNodeCount * 8);
+ m_writer = new SegmentedMemoryStreamWriter(m_expectedNodeCount * 8,
+ m_isVeryLargeGraph ? new SerializationConfiguration() { StreamLabelWidth = StreamLabelWidth.EightBytes } : null);
}
m_totalSize = 0;
}
// Write out the Nodes
- serializer.Write(m_nodes.Count);
+ if (m_isVeryLargeGraph)
+ {
+ serializer.Write(m_nodes.Count);
+ }
+ else
+ {
+ serializer.Write((int)m_nodes.Count);
+ }
+
for (int i = 0; i < m_nodes.Count; i++)
{
serializer.Write((int)m_nodes[i]);
// You can place tagged values in here always adding right before the WriteTaggedEnd
// for any new fields added after version 1
- serializer.WriteTaggedEnd(); // This insures tagged things don't read junk after the region.
+ serializer.WriteTaggedEnd(); // This ensures tagged things don't read junk after the region.
});
}
}
}
// Read in the Nodes
- int nodeCount = deserializer.ReadInt();
+ long nodeCount = m_isVeryLargeGraph ? deserializer.ReadInt64() : deserializer.ReadInt();
m_nodes = new SegmentedList<StreamLabel>(SegmentSize, nodeCount);
- for (int i = 0; i < nodeCount; i++)
+ for (long i = 0; i < nodeCount; i++)
{
m_nodes.Add((StreamLabel)(uint)deserializer.ReadInt());
}
// Read in the Blob stream.
// TODO be lazy about reading in the blobs.
int blobCount = deserializer.ReadInt();
- SegmentedMemoryStreamWriter writer = new SegmentedMemoryStreamWriter(blobCount);
+ SegmentedMemoryStreamWriter writer = new SegmentedMemoryStreamWriter(blobCount,
+ m_isVeryLargeGraph ? new SerializationConfiguration() { StreamLabelWidth = StreamLabelWidth.EightBytes } : null);
+
while (8 <= blobCount)
{
writer.Write(deserializer.ReadInt64());
}
}
- private int m_expectedNodeCount; // Initial guess at graph Size.
+ private long m_expectedNodeCount; // Initial guess at graph Size.
private long m_totalSize; // Total Size of all the nodes in the graph.
internal int m_totalRefs; // Total Number of references in the graph
internal GrowableArray<TypeInfo> m_types; // We expect only thousands of these
// There should not be any of these left as long as every node referenced
// by another node has a definition.
internal SegmentedMemoryStreamWriter m_writer; // Used only during construction to serialize the nodes.
+ protected bool m_isVeryLargeGraph;
#endregion
}
/// </summary>
public DateTime BuildTime; // From in the PE header
/// <summary>
- /// The name of hte PDB file assoicated with this module. Ma bye null if unknown
+ /// The name of the PDB file associated with this module. May be null if unknown
/// </summary>
public string PdbName;
/// <summary>
/// Given an arbitrary code:NodeIndex that identifies the node, Get a code:Node object.
///
/// This routine does not allocated but uses the space passed in by 'storage.
- /// 'storage' should be allocated with coode:AllocNodeStorage, and should be agressively reused.
+ /// 'storage' should be allocated with coode:AllocNodeStorage, and should be aggressively reused.
/// </summary>
public RefNode GetNode(NodeIndex nodeIndex, RefNode storage)
{
/// <summary>
/// A helper for AddOrphansToQueue, so we only add orphans that are not reachable from other orphans.
///
- /// Mark all decendents (but not nodeIndex itself) as being visited. Any arcs that form
- /// cycles are ignored, so nodeIndex is guarenteed to NOT be marked.
+ /// Mark all descendants (but not nodeIndex itself) as being visited. Any arcs that form
+ /// cycles are ignored, so nodeIndex is guaranteed to NOT be marked.
/// </summary>
private void MarkDecendentsIgnoringCycles(NodeIndex nodeIndex, int recursionCount)
{
stats.TotalMetric += node.Size;
}
- // Also insure that if there are a large number of types, that we sample them at least some.
+ // Also ensure that if there are a large number of types, that we sample them at least some.
if (stats.SampleCount == 0 && !mustAdd && (m_numDistictTypesWithSamples + .5F) * m_filteringRatio <= m_numDistictTypes)
{
mustAdd = true;
/// <summary>
/// This value goes in the m_newIndex[]. If we accept the node into the sampled graph, we put the node
- /// index in the NET graph in m_newIndex. If we reject the node we use the special RegjectedNode value
+ /// index in the NET graph in m_newIndex. If we reject the node we use the special RejectedNode value
/// below
/// </summary>
private const NodeIndex RejectedNode = (NodeIndex)(-2);
{
public class MemoryGraph : Graph, IFastSerializable
{
- public MemoryGraph(int expectedSize)
- : base(expectedSize)
+ public MemoryGraph(int expectedSize, bool isVeryLargeGraph = false)
+ : base(expectedSize, isVeryLargeGraph)
+ {
+ // If we have too many addresses we will reach the Dictionary's internal array's size limit and throw.
+ // Therefore use a new implementation of it that is similar in performance but that can handle the extra load.
+ if (isVeryLargeGraph)
+ {
+ m_addressToNodeIndex = new SegmentedDictionary<Address, NodeIndex>(expectedSize);
+ }
+ else
{
m_addressToNodeIndex = new Dictionary<Address, NodeIndex>(expectedSize);
+ }
+
m_nodeAddresses = new SegmentedList<Address>(SegmentSize, expectedSize);
}
/// THis table maps the ID that CLRProfiler uses (an address), to the NodeIndex we have assigned to it.
/// It is only needed while the file is being read in.
/// </summary>
- protected Dictionary<Address, NodeIndex> m_addressToNodeIndex; // This field is only used during construction
+ protected IDictionary<Address, NodeIndex> m_addressToNodeIndex; // This field is only used during construction
#endregion
#region private
{
base.ToStream(serializer);
// Write out the Memory addresses of each object
+ if (m_isVeryLargeGraph)
+ {
serializer.Write(m_nodeAddresses.Count);
+ }
+ else
+ {
+ serializer.Write((int)m_nodeAddresses.Count);
+ }
+
for (int i = 0; i < m_nodeAddresses.Count; i++)
{
serializer.Write((long)m_nodeAddresses[i]);
{
base.FromStream(deserializer);
// Read in the Memory addresses of each object
- int addressCount = deserializer.ReadInt();
+ long addressCount = m_isVeryLargeGraph ? deserializer.ReadInt64() : deserializer.ReadInt();
m_nodeAddresses = new SegmentedList<Address>(SegmentSize, addressCount);
- for (int i = 0; i < addressCount; i++)
+ for (long i = 0; i < addressCount; i++)
{
m_nodeAddresses.Add((Address)deserializer.ReadInt64());
}