Refactor code to use signed data types.
authorMilian Wolff <mail@milianw.de>
Mon, 30 May 2016 19:27:23 +0000 (21:27 +0200)
committerMilian Wolff <mail@milianw.de>
Sun, 12 Jun 2016 20:34:01 +0000 (22:34 +0200)
This is required to handle diffing between files or between
individual timestamps. Since we use 64bit data types, we
can still handle a lot of allocations this way (a bit more than
9 exabytes).

accumulatedtracedata.cpp
allocationdata.h
gui/callercalleemodel.cpp
gui/chartmodel.h
gui/costdelegate.cpp
gui/flamegraph.cpp
gui/histogrammodel.h
gui/mainwindow.cpp
gui/parser.cpp
gui/treemodel.cpp
heaptrack_print.cpp

index 7e4e957..2d589be 100644 (file)
@@ -289,7 +289,7 @@ bool AccumulatedTraceData::read(istream& in)
 
             const auto& info = allocationInfos[allocationInfoIndex.index];
             auto& allocation = findAllocation(info.traceIndex);
-            if (!allocation.allocations || allocation.leaked < info.size) {
+            if (!allocation.allocations || static_cast<uint64_t>(allocation.leaked) < info.size) {
                 if (!fromAttached) {
                     cerr << "inconsistent allocation info, underflowed allocations of " << info.traceIndex << endl;
                 }
index 34b3012..122bab9 100644 (file)
 struct AllocationData
 {
     // number of allocations
-    uint64_t allocations = 0;
+    int64_t allocations = 0;
     // number of temporary allocations
-    uint64_t temporary = 0;
+    int64_t temporary = 0;
     // bytes allocated in total
-    uint64_t allocated = 0;
+    int64_t allocated = 0;
     // amount of bytes leaked
-    uint64_t leaked = 0;
+    int64_t leaked = 0;
     // largest amount of bytes allocated
-    uint64_t peak = 0;
+    int64_t peak = 0;
 };
 
 inline AllocationData& operator+=(AllocationData& lhs, const AllocationData& rhs)
index f40008e..fb82115 100644 (file)
@@ -151,45 +151,45 @@ QVariant CallerCalleeModel::data(const QModelIndex& index, int role) const
         switch (static_cast<Columns>(index.column())) {
         case SelfAllocatedColumn:
             if (role == SortRole || role == MaxCostRole) {
-                return static_cast<quint64>(row.selfCost.allocated);
+                return static_cast<qint64>(row.selfCost.allocated);
             } else {
                 return m_format.formatByteSize(row.selfCost.allocated);
             }
         case SelfAllocationsColumn:
-            return static_cast<quint64>(row.selfCost.allocations);
+            return static_cast<qint64>(row.selfCost.allocations);
         case SelfTemporaryColumn:
-            return static_cast<quint64>(row.selfCost.temporary);
+            return static_cast<qint64>(row.selfCost.temporary);
         case SelfPeakColumn:
             if (role == SortRole || role == MaxCostRole) {
-                return static_cast<quint64>(row.selfCost.peak);
+                return static_cast<qint64>(row.selfCost.peak);
             } else {
                 return m_format.formatByteSize(row.selfCost.peak);
             }
         case SelfLeakedColumn:
             if (role == SortRole || role == MaxCostRole) {
-                return static_cast<quint64>(row.selfCost.leaked);
+                return static_cast<qint64>(row.selfCost.leaked);
             } else {
                 return m_format.formatByteSize(row.selfCost.leaked);
             }
         case InclusiveAllocatedColumn:
             if (role == SortRole || role == MaxCostRole) {
-                return static_cast<quint64>(row.inclusiveCost.allocated);
+                return static_cast<qint64>(row.inclusiveCost.allocated);
             } else {
                 return m_format.formatByteSize(row.inclusiveCost.allocated);
             }
         case InclusiveAllocationsColumn:
-            return static_cast<quint64>(row.inclusiveCost.allocations);
+            return static_cast<qint64>(row.inclusiveCost.allocations);
         case InclusiveTemporaryColumn:
-            return static_cast<quint64>(row.inclusiveCost.temporary);
+            return static_cast<qint64>(row.inclusiveCost.temporary);
         case InclusivePeakColumn:
             if (role == SortRole || role == MaxCostRole) {
-                return static_cast<quint64>(row.inclusiveCost.peak);
+                return static_cast<qint64>(row.inclusiveCost.peak);
             } else {
                 return m_format.formatByteSize(row.inclusiveCost.peak);
             }
         case InclusiveLeakedColumn:
             if (role == SortRole || role == MaxCostRole) {
-                return static_cast<quint64>(row.inclusiveCost.leaked);
+                return static_cast<qint64>(row.inclusiveCost.leaked);
             } else {
                 return m_format.formatByteSize(row.inclusiveCost.leaked);
             }
@@ -231,12 +231,12 @@ QVariant CallerCalleeModel::data(const QModelIndex& index, int role) const
         stream << '\n';
         stream << i18n("inclusive: allocated %1 over %2 calls (%3 temporary, i.e. %4%), peak at %5, leaked %6",
                        m_format.formatByteSize(row.inclusiveCost.allocated), row.inclusiveCost.allocations, row.inclusiveCost.temporary,
-                       round(float(row.inclusiveCost.temporary) * 100.f * 100.f / std::max(uint64_t(1), row.inclusiveCost.allocations)) / 100.f,
+                       round(float(row.inclusiveCost.temporary) * 100.f * 100.f / std::max(int64_t(1), row.inclusiveCost.allocations)) / 100.f,
                        m_format.formatByteSize(row.inclusiveCost.peak), m_format.formatByteSize(row.inclusiveCost.leaked));
         stream << '\n';
         stream << i18n("self: allocated %1 over %2 calls (%3 temporary, i.e. %4%), peak at %5, leaked %6",
                        m_format.formatByteSize(row.selfCost.allocated), row.selfCost.allocations, row.selfCost.temporary,
-                       round(float(row.selfCost.temporary) * 100.f * 100.f / std::max(uint64_t(1), row.selfCost.allocations)) / 100.f,
+                       round(float(row.selfCost.temporary) * 100.f * 100.f / std::max(int64_t(1), row.selfCost.allocations)) / 100.f,
                        m_format.formatByteSize(row.selfCost.peak), m_format.formatByteSize(row.selfCost.leaked));
         stream << '\n';
         stream << "</pre></qt>";
index 03ce5a8..b8ac1c3 100644 (file)
@@ -34,7 +34,7 @@ struct ChartRows
         MAX_NUM_COST = 20
     };
     quint64 timeStamp = 0;
-    std::array<quint64, MAX_NUM_COST> cost;
+    std::array<qint64, MAX_NUM_COST> cost;
 };
 Q_DECLARE_TYPEINFO(ChartRows, Q_MOVABLE_TYPE);
 
index eb68f7e..ebe4668 100644 (file)
@@ -24,6 +24,8 @@
 #include <QDebug>
 #include <QPainter>
 
+#include <cmath>
+
 CostDelegate::CostDelegate(QObject* parent)
     : QStyledItemDelegate(parent)
 {
@@ -33,15 +35,16 @@ CostDelegate::~CostDelegate() = default;
 
 void CostDelegate::paint(QPainter* painter, const QStyleOptionViewItem& option, const QModelIndex& index) const
 {
-    const uint64_t cost = index.data(TreeModel::SortRole).toULongLong();
+    // TODO: handle negative values
+    const int64_t cost = index.data(TreeModel::SortRole).toULongLong();
     if (cost == 0) {
         QStyledItemDelegate::paint(painter, option, index);
         return;
     }
 
-    const uint64_t maxCost = index.data(TreeModel::MaxCostRole).toULongLong();
+    const int64_t maxCost = index.data(TreeModel::MaxCostRole).toULongLong();
     // top-down can miscalculate the peak cost
-    const auto fraction = std::min(1.f, float(cost) / maxCost);
+    const auto fraction = std::min(1.f, std::abs(float(cost) / maxCost));
     auto rect = option.rect;
     rect.setWidth(rect.width() * fraction);
 
index 340f5e2..c066189 100644 (file)
@@ -54,11 +54,11 @@ Q_DECLARE_METATYPE(CostType)
 class FrameGraphicsItem : public QGraphicsRectItem
 {
 public:
-    FrameGraphicsItem(const quint64 cost, CostType costType, const QString& function, FrameGraphicsItem* parent = nullptr);
-    FrameGraphicsItem(const quint64 cost, const QString& function, FrameGraphicsItem* parent);
+    FrameGraphicsItem(const qint64 cost, CostType costType, const QString& function, FrameGraphicsItem* parent = nullptr);
+    FrameGraphicsItem(const qint64 cost, const QString& function, FrameGraphicsItem* parent);
 
-    quint64 cost() const;
-    void setCost(quint64 cost);
+    qint64 cost() const;
+    void setCost(qint64 cost);
     QString function() const;
 
     void paint(QPainter* painter, const QStyleOptionGraphicsItem* option, QWidget* widget = nullptr) override;
@@ -70,7 +70,7 @@ protected:
     void hoverLeaveEvent(QGraphicsSceneHoverEvent *event) override;
 
 private:
-    quint64 m_cost;
+    qint64 m_cost;
     QString m_function;
     CostType m_costType;
     bool m_isHovered;
@@ -78,7 +78,7 @@ private:
 
 Q_DECLARE_METATYPE(FrameGraphicsItem*);
 
-FrameGraphicsItem::FrameGraphicsItem(const quint64 cost, CostType costType, const QString& function, FrameGraphicsItem* parent)
+FrameGraphicsItem::FrameGraphicsItem(const qint64 cost, CostType costType, const QString& function, FrameGraphicsItem* parent)
     : QGraphicsRectItem(parent)
     , m_cost(cost)
     , m_function(function)
@@ -89,17 +89,17 @@ FrameGraphicsItem::FrameGraphicsItem(const quint64 cost, CostType costType, cons
     setAcceptHoverEvents(true);
 }
 
-FrameGraphicsItem::FrameGraphicsItem(const quint64 cost, const QString& function, FrameGraphicsItem* parent)
+FrameGraphicsItem::FrameGraphicsItem(const qint64 cost, const QString& function, FrameGraphicsItem* parent)
     : FrameGraphicsItem(cost, parent->m_costType, function, parent)
 {
 }
 
-quint64 FrameGraphicsItem::cost() const
+qint64 FrameGraphicsItem::cost() const
 {
     return m_cost;
 }
 
-void FrameGraphicsItem::setCost(quint64 cost)
+void FrameGraphicsItem::setCost(qint64 cost)
 {
     m_cost = cost;
 }
@@ -153,7 +153,7 @@ QString FrameGraphicsItem::description() const
     // we build the tooltip text on demand, which is much faster than doing that for potentially thousands of items when we load the data
     QString tooltip;
     KFormat format;
-    quint64 totalCost = 0;
+    qint64 totalCost = 0;
     {
         auto item = this;
         while (item->parentItem()) {
@@ -225,10 +225,11 @@ void layoutItems(FrameGraphicsItem *parent)
     const qreal y_margin = 2.;
     const qreal y = pos.y() - h - y_margin;
     qreal x = pos.x();
+    // TODO: check this algorithm for differential flamegraphs
 
     foreach (auto child, parent->childItems()) {
         auto frameChild = static_cast<FrameGraphicsItem*>(child);
-        const qreal w = maxWidth * double(frameChild->cost()) / parent->cost();
+        const qreal w = std::abs(maxWidth * double(frameChild->cost()) / parent->cost());
         frameChild->setVisible(w > 1);
         if (frameChild->isVisible()) {
             frameChild->setRect(QRectF(x, y, w, h));
@@ -252,7 +253,7 @@ FrameGraphicsItem* findItemByFunction(const QList<QGraphicsItem*>& items, const
 /**
  * Convert the top-down graph into a tree of FrameGraphicsItem.
  */
-void toGraphicsItems(const QVector<RowData>& data, FrameGraphicsItem *parent, uint64_t AllocationData::* member,
+void toGraphicsItems(const QVector<RowData>& data, FrameGraphicsItem *parent, int64_t AllocationData::* member,
                      const double costThreshold)
 {
     foreach (const auto& row, data) {
@@ -264,13 +265,13 @@ void toGraphicsItems(const QVector<RowData>& data, FrameGraphicsItem *parent, ui
         } else {
             item->setCost(item->cost() + row.cost.*member);
         }
-        if (item->cost() > costThreshold) {
+        if (std::abs(item->cost()) > costThreshold) {
             toGraphicsItems(row.children, item, member, costThreshold);
         }
     }
 }
 
-uint64_t AllocationData::* memberForType(CostType type)
+int64_t AllocationData::* memberForType(CostType type)
 {
     switch (type) {
     case Allocations:
@@ -289,7 +290,7 @@ uint64_t AllocationData::* memberForType(CostType type)
 
 FrameGraphicsItem* parseData(const QVector<RowData>& topDownData, CostType type, double costThreshold)
 {
-    uint64_t AllocationData::* member = memberForType(type);
+    auto member = memberForType(type);
 
     double totalCost = 0;
     foreach(const auto& frame, topDownData) {
@@ -321,7 +322,7 @@ FrameGraphicsItem* parseData(const QVector<RowData>& topDownData, CostType type,
     auto rootItem = new FrameGraphicsItem(totalCost, type, label);
     rootItem->setBrush(scheme.background());
     rootItem->setPen(pen);
-    toGraphicsItems(topDownData, rootItem, member, totalCost * costThreshold / 100.);
+    toGraphicsItems(topDownData, rootItem, member, std::abs(totalCost) * costThreshold / 100.);
     return rootItem;
 }
 
index 2ebc64b..dc324fd 100644 (file)
@@ -26,7 +26,7 @@
 
 struct HistogramColumn
 {
-    quint64 allocations;
+    qint64 allocations;
     std::shared_ptr<LocationData> location;
 };
 Q_DECLARE_TYPEINFO(HistogramColumn, Q_MOVABLE_TYPE);
index 71c5fe1..9ce6d59 100644 (file)
@@ -190,10 +190,10 @@ MainWindow::MainWindow(QWidget* parent)
                     QTextStream stream(&textCenter);
                     stream << "<qt><dl>"
                            << i18n("<dt><b>calls to allocation functions</b>:</dt><dd>%1 (%2/s)</dd>",
-                                   data.cost.allocations, quint64(data.cost.allocations / totalTimeS))
+                                   data.cost.allocations, qint64(data.cost.allocations / totalTimeS))
                            << i18n("<dt><b>temporary allocations</b>:</dt><dd>%1 (%2%, %3/s)</dd>",
                                    data.cost.temporary, std::round(float(data.cost.temporary) * 100.f * 100.f / data.cost.allocations) / 100.f,
-                                   quint64(data.cost.temporary / totalTimeS))
+                                   qint64(data.cost.temporary / totalTimeS))
                            << i18n("<dt><b>bytes allocated in total</b> (ignoring deallocations):</dt><dd>%1 (%2/s)</dd>",
                                    format.formatByteSize(data.cost.allocated, 2), format.formatByteSize(data.cost.allocated / totalTimeS))
                            << "</dl></qt>";
index cc1d4b8..7194eaf 100644 (file)
@@ -105,10 +105,10 @@ struct StringCache
 struct ChartMergeData
 {
     IpIndex ip;
-    quint64 consumed;
-    quint64 allocations;
-    quint64 allocated;
-    quint64 temporary;
+    qint64 consumed;
+    qint64 allocations;
+    qint64 allocated;
+    qint64 temporary;
     bool operator<(const IpIndex rhs) const
     {
         return ip < rhs;
@@ -164,7 +164,7 @@ struct ParserData final : public AccumulatedTraceData
             it->temporary += alloc.temporary;
         }
         // find the top hot spots for the individual data members and remember their IP and store the label
-        auto findTopChartEntries = [&] (quint64 ChartMergeData::* member, int LabelIds::* label, ChartData* data) {
+        auto findTopChartEntries = [&] (qint64 ChartMergeData::* member, int LabelIds::* label, ChartData* data) {
             sort(merged.begin(), merged.end(), [=] (const ChartMergeData& left, const ChartMergeData& right) {
                 return left.*member > right.*member;
             });
@@ -200,7 +200,7 @@ struct ParserData final : public AccumulatedTraceData
         lastTimeStamp = newStamp;
 
         // create the rows
-        auto createRow = [] (uint64_t timeStamp, uint64_t totalCost) {
+        auto createRow = [] (uint64_t timeStamp, int64_t totalCost) {
             ChartRows row;
             row.timeStamp = timeStamp;
             row.cost[0] = totalCost;
@@ -213,7 +213,7 @@ struct ParserData final : public AccumulatedTraceData
 
         // if the cost is non-zero and the ip corresponds to a hotspot function selected in the labels,
         // we add the cost to the rows column
-        auto addDataToRow = [] (uint64_t cost, int labelId, ChartRows* rows) {
+        auto addDataToRow = [] (int64_t cost, int labelId, ChartRows* rows) {
             if (!cost || labelId == -1) {
                 return;
             }
@@ -259,7 +259,7 @@ struct ParserData final : public AccumulatedTraceData
     struct CountedAllocationInfo
     {
         AllocationInfo info;
-        uint64_t allocations;
+        int64_t allocations;
         bool operator<(const CountedAllocationInfo& rhs) const
         {
             return tie(info.size, allocations)
@@ -284,7 +284,7 @@ struct ParserData final : public AccumulatedTraceData
         int temporary = -1;
     };
     QHash<IpIndex, LabelIds> labelIds;
-    uint64_t maxConsumedSinceLastTimeStamp = 0;
+    int64_t maxConsumedSinceLastTimeStamp = 0;
     uint64_t lastTimeStamp = 0;
 
     StringCache stringCache;
@@ -395,7 +395,7 @@ QVector<RowData> toTopDownData(const QVector<RowData>& bottomUpData)
 struct MergedHistogramColumnData
 {
     std::shared_ptr<LocationData> location;
-    uint64_t allocations;
+    int64_t allocations;
     bool operator<(const std::shared_ptr<LocationData>& rhs) const
     {
         return location < rhs;
index 7888040..f7e3a43 100644 (file)
@@ -150,23 +150,28 @@ QVariant TreeModel::data(const QModelIndex& index, int role) const
         switch (static_cast<Columns>(index.column())) {
         case AllocatedColumn:
             if (role == SortRole || role == MaxCostRole) {
-                return static_cast<quint64>(row->cost.allocated);
-            } else {
-                return m_format.formatByteSize(row->cost.allocated);
+                return static_cast<quint64>(abs(row->cost.allocated));
             }
+            return m_format.formatByteSize(row->cost.allocated);
         case AllocationsColumn:
-            return static_cast<quint64>(row->cost.allocations);
+            if (role == SortRole || role == MaxCostRole) {
+                return static_cast<quint64>(abs(row->cost.allocations));
+            }
+            return static_cast<qint64>(row->cost.allocations);
         case TemporaryColumn:
-            return static_cast<quint64>(row->cost.temporary);
+            if (role == SortRole || role == MaxCostRole) {
+                return static_cast<quint64>(abs(row->cost.temporary));
+            }
+            return static_cast<qint64>(row->cost.temporary);
         case PeakColumn:
             if (role == SortRole || role == MaxCostRole) {
-                return static_cast<quint64>(row->cost.peak);
+                return static_cast<quint64>(abs(row->cost.peak));
             } else {
                 return m_format.formatByteSize(row->cost.peak);
             }
         case LeakedColumn:
             if (role == SortRole || role == MaxCostRole) {
-                return static_cast<quint64>(row->cost.leaked);
+                return static_cast<quint64>(abs(row->cost.leaked));
             } else {
                 return m_format.formatByteSize(row->cost.leaked);
             }
index 7bc412f..a025021 100644 (file)
@@ -51,7 +51,7 @@ struct MergedAllocation : public AllocationData
 class formatBytes
 {
 public:
-    formatBytes(uint64_t bytes)
+    formatBytes(int64_t bytes)
         : m_bytes(bytes)
     {
     }
@@ -59,11 +59,15 @@ public:
     friend ostream& operator<<(ostream& out, const formatBytes data);
 
 private:
-    uint64_t m_bytes;
+    int64_t m_bytes;
 };
 
 ostream& operator<<(ostream& out, const formatBytes data)
 {
+    if (data.m_bytes < 0) {
+        // handle negative values
+        return out << '-' << formatBytes(-data.m_bytes);
+    }
     if (data.m_bytes < 1000) {
         // no fancy formatting for plain byte values, esp. no .00 factions
         return out << data.m_bytes << 'B';
@@ -272,7 +276,7 @@ struct Printer final : public AccumulatedTraceData
     void printMerged(T AllocationData::* member, LabelPrinter label, SubLabelPrinter sublabel)
     {
         auto sortOrder = [member] (const AllocationData& l, const AllocationData& r) {
-            return l.*member > r.*member;
+            return std::abs(l.*member) > std::abs(r.*member);
         };
         sort(mergedAllocations.begin(), mergedAllocations.end(), sortOrder);
         for (size_t i = 0; i < min(peakLimit, mergedAllocations.size()); ++i) {
@@ -284,7 +288,7 @@ struct Printer final : public AccumulatedTraceData
             printIp(allocation.ipIndex, cout);
 
             sort(allocation.traces.begin(), allocation.traces.end(), sortOrder);
-            size_t handled = 0;
+            int64_t handled = 0;
             for (size_t j = 0; j < min(subPeakLimit, allocation.traces.size()); ++j) {
                 const auto& trace = allocation.traces[j];
                 sublabel(trace);
@@ -309,7 +313,7 @@ struct Printer final : public AccumulatedTraceData
     {
         sort(allocations.begin(), allocations.end(),
             [member] (const Allocation& l, const Allocation &r) {
-                return l.*member > r.*member;
+                return std::abs(l.*member) > std::abs(r.*member);
             });
         for (size_t i = 0; i < min(peakLimit, allocations.size()); ++i) {
             const auto& allocation = allocations[i];
@@ -361,7 +365,7 @@ struct Printer final : public AccumulatedTraceData
     void writeMassifBacktrace(const vector<Allocation>& allocations, size_t heapSize, size_t threshold,
                               const IpIndex& location, size_t depth = 0)
     {
-        size_t skippedLeaked = 0;
+        int64_t skippedLeaked = 0;
         size_t numAllocs = 0;
         size_t skipped = 0;
         auto mergedAllocations = mergeAllocations(allocations);
@@ -375,13 +379,13 @@ struct Printer final : public AccumulatedTraceData
         const bool shouldStop = isStopIndex(ip.functionIndex);
         if (!shouldStop) {
             for (auto& merged : mergedAllocations) {
-                if (!merged.leaked) {
+                if (merged.leaked < 0) {
                     // list is sorted, so we can bail out now - these entries are uninteresting for massif
                     break;
                 }
 
                 // skip items below threshold
-                if (merged.leaked >= threshold) {
+                if (static_cast<size_t>(merged.leaked) >= threshold) {
                     ++numAllocs;
                     // skip the first level of the backtrace, otherwise we'd endlessly recurse
                     for (auto& alloc : merged.traces) {
@@ -429,7 +433,7 @@ struct Printer final : public AccumulatedTraceData
 
         if (!shouldStop) {
             for (const auto& merged : mergedAllocations) {
-                if (merged.leaked && merged.leaked >= threshold) {
+                if (merged.leaked > 0 && static_cast<size_t>(merged.leaked) >= threshold) {
                     if (skippedLeaked > merged.leaked) {
                         // manually inject this entry to keep the output sorted
                         writeSkipped();
@@ -447,7 +451,7 @@ struct Printer final : public AccumulatedTraceData
             ++sizeHistogram[info.size];
         }
 
-        if (totalCost.leaked > lastMassifPeak && massifOut.is_open()) {
+        if (totalCost.leaked > 0 && static_cast<size_t>(totalCost.leaked) > lastMassifPeak && massifOut.is_open()) {
             massifAllocations = allocations;
             lastMassifPeak = totalCost.leaked;
         }
@@ -675,9 +679,9 @@ int main(int argc, char** argv)
          << "bytes allocated in total (ignoring deallocations): " << formatBytes(data.totalCost.allocated)
             << " (" << formatBytes(data.totalCost.allocated / totalTimeS) << "/s)" << '\n'
          << "calls to allocation functions: " << data.totalCost.allocations
-            << " (" << size_t(data.totalCost.allocations / totalTimeS) << "/s)\n"
+            << " (" << int64_t(data.totalCost.allocations / totalTimeS) << "/s)\n"
          << "temporary memory allocations: " << data.totalCost.temporary
-            << " (" << size_t(data.totalCost.temporary / totalTimeS) << "/s)\n"
+            << " (" << int64_t(data.totalCost.temporary / totalTimeS) << "/s)\n"
          << "peak heap memory consumption: " << formatBytes(data.totalCost.peak) << '\n'
          << "peak RSS (including heaptrack overhead): " << formatBytes(data.peakRSS * data.systemInfo.pageSize) << '\n'
          << "total memory leaked: " << formatBytes(data.totalCost.leaked) << '\n';