Thanks to ymeng@fb.com for coming up with this change.
`thread trace dump info` can dump some metrics that can be useful for
analyzing the performance and quality of a trace. This diff adds a --json
option for dumping this information in json format that can be easily
understood my machines.
Differential Revision: https://reviews.llvm.org/D129332
/// \param[in] verbose
/// If \b true, print detailed info
/// If \b false, print compact info
- virtual void DumpTraceInfo(Thread &thread, Stream &s, bool verbose) = 0;
+ virtual void DumpTraceInfo(Thread &thread, Stream &s, bool verbose,
+ bool json) = 0;
/// Check if a thread is currently traced by this object.
///
m_verbose = true;
break;
}
+ case 'j': {
+ m_json = true;
+ break;
+ }
default:
llvm_unreachable("Unimplemented option");
}
void OptionParsingStarting(ExecutionContext *execution_context) override {
m_verbose = false;
+ m_json = false;
}
llvm::ArrayRef<OptionDefinition> GetDefinitions() override {
// Instance variables to hold the values for command options.
bool m_verbose;
+ bool m_json;
};
- bool DoExecute(Args &command, CommandReturnObject &result) override {
- Target &target = m_exe_ctx.GetTargetRef();
- result.GetOutputStream().Format("Trace technology: {0}\n",
- target.GetTrace()->GetPluginName());
- return CommandObjectIterateOverThreads::DoExecute(command, result);
- }
-
CommandObjectTraceDumpInfo(CommandInterpreter &interpreter)
: CommandObjectIterateOverThreads(
interpreter, "thread trace dump info",
ThreadSP thread_sp =
m_exe_ctx.GetProcessPtr()->GetThreadList().FindThreadByID(tid);
trace_sp->DumpTraceInfo(*thread_sp, result.GetOutputStream(),
- m_options.m_verbose);
+ m_options.m_verbose, m_options.m_json);
return true;
}
let Command = "thread trace dump info" in {
def thread_trace_dump_info_verbose : Option<"verbose", "v">, Group<1>,
Desc<"show verbose thread trace dump info">;
+ def thread_trace_dump_info_json: Option<"json", "j">, Group<1>,
+ Desc<"Dump in JSON format.">;
}
let Command = "type summary add" in {
return decoded_thread.takeError();
}
-void TraceIntelPT::DumpTraceInfo(Thread &thread, Stream &s, bool verbose) {
+void TraceIntelPT::DumpTraceInfo(Thread &thread, Stream &s, bool verbose,
+ bool json) {
Storage &storage = GetUpdatedStorage();
lldb::tid_t tid = thread.GetID();
+ if (json) {
+ DumpTraceInfoAsJson(thread, s, verbose);
+ return;
+ }
+
s.Format("\nthread #{0}: tid = {1}", thread.GetIndexID(), thread.GetID());
if (!IsTraced(tid)) {
s << ", not traced\n";
}
Optional<uint64_t> raw_size = *raw_size_or_error;
+ s.Format("\n Trace technology: {0}\n", GetPluginName());
+
/// Instruction stats
{
uint64_t items_count = decoded_thread_sp->GetItemsCount();
uint64_t mem_used = decoded_thread_sp->CalculateApproximateMemoryUsage();
- s.Format(" Total number of trace items: {0}\n", items_count);
+ s.Format("\n Total number of trace items: {0}\n", items_count);
s << "\n Memory usage:\n";
if (raw_size)
storage.multicpu_decoder->GetNumContinuousExecutionsForThread(tid));
s.Format(" Total number of PSB blocks found: {0}\n",
storage.multicpu_decoder->GetTotalPSBBlocksCount());
- s.Format(" Number of PSB blocks for this thread {0}\n",
+ s.Format(" Number of PSB blocks for this thread: {0}\n",
storage.multicpu_decoder->GePSBBlocksCountForThread(tid));
s.Format(" Total number of unattributed PSB blocks found: {0}\n",
storage.multicpu_decoder->GetUnattributedPSBBlocksCount());
}
}
+void TraceIntelPT::DumpTraceInfoAsJson(Thread &thread, Stream &s,
+ bool verbose) {
+ Storage &storage = GetUpdatedStorage();
+
+ lldb::tid_t tid = thread.GetID();
+ json::OStream json_str(s.AsRawOstream(), 2);
+ if (!IsTraced(tid)) {
+ s << "error: thread not traced\n";
+ return;
+ }
+
+ Expected<Optional<uint64_t>> raw_size_or_error = GetRawTraceSize(thread);
+ if (!raw_size_or_error) {
+ s << "error: " << toString(raw_size_or_error.takeError()) << "\n";
+ return;
+ }
+
+ Expected<DecodedThreadSP> decoded_thread_sp_or_err = Decode(thread);
+ if (!decoded_thread_sp_or_err) {
+ s << "error: " << toString(decoded_thread_sp_or_err.takeError()) << "\n";
+ return;
+ }
+ DecodedThreadSP &decoded_thread_sp = *decoded_thread_sp_or_err;
+
+ json_str.object([&] {
+ json_str.attribute("traceTechnology", "intel-pt");
+ json_str.attributeObject("threadStats", [&] {
+ json_str.attribute("tid", tid);
+
+ uint64_t insn_len = decoded_thread_sp->GetItemsCount();
+ json_str.attribute("traceItemsCount", insn_len);
+
+ // Instruction stats
+ uint64_t mem_used = decoded_thread_sp->CalculateApproximateMemoryUsage();
+ json_str.attributeObject("memoryUsage", [&] {
+ json_str.attribute("totalInBytes", std::to_string(mem_used));
+ Optional<double> avg;
+ if (insn_len != 0)
+ avg = double(mem_used) / insn_len;
+ json_str.attribute("avgPerItemInBytes", avg);
+ });
+
+ // Timing
+ json_str.attributeObject("timingInSeconds", [&] {
+ GetTimer().ForThread(tid).ForEachTimedTask(
+ [&](const std::string &name, std::chrono::milliseconds duration) {
+ json_str.attribute(name, duration.count() / 1000.0);
+ });
+ });
+
+ // Instruction events stats
+ const DecodedThread::EventsStats &events_stats =
+ decoded_thread_sp->GetEventsStats();
+ json_str.attributeObject("events", [&] {
+ json_str.attribute("totalCount", events_stats.total_count);
+ json_str.attributeObject("individualCounts", [&] {
+ for (const auto &event_to_count : events_stats.events_counts) {
+ json_str.attribute(
+ TraceCursor::EventKindToString(event_to_count.first),
+ event_to_count.second);
+ }
+ });
+ });
+
+ if (storage.multicpu_decoder) {
+ json_str.attribute(
+ "continuousExecutions",
+ storage.multicpu_decoder->GetNumContinuousExecutionsForThread(tid));
+ json_str.attribute(
+ "PSBBlocks",
+ storage.multicpu_decoder->GePSBBlocksCountForThread(tid));
+ }
+
+ // Errors
+ const DecodedThread::LibiptErrorsStats &tsc_errors_stats =
+ decoded_thread_sp->GetTscErrorsStats();
+ json_str.attributeObject("errorItems", [&] {
+ json_str.attribute("total", tsc_errors_stats.total_count);
+ json_str.attributeObject("individualErrors", [&] {
+ for (const auto &error_message_to_count :
+ tsc_errors_stats.libipt_errors_counts) {
+ json_str.attribute(error_message_to_count.first,
+ error_message_to_count.second);
+ }
+ });
+ });
+ });
+ json_str.attributeObject("globalStats", [&] {
+ json_str.attributeObject("timingInSeconds", [&] {
+ GetTimer().ForGlobal().ForEachTimedTask(
+ [&](const std::string &name, std::chrono::milliseconds duration) {
+ json_str.attribute(name, duration.count() / 1000.0);
+ });
+ });
+ if (storage.multicpu_decoder) {
+ json_str.attribute(
+ "totalUnattributedPSBBlocks",
+ storage.multicpu_decoder->GetUnattributedPSBBlocksCount());
+ json_str.attribute(
+ "totalCountinuosExecutions",
+ storage.multicpu_decoder->GetTotalContinuousExecutionsCount());
+ json_str.attribute("totalPSBBlocks",
+ storage.multicpu_decoder->GetTotalPSBBlocksCount());
+ json_str.attribute(
+ "totalContinuousExecutions",
+ storage.multicpu_decoder->GetTotalContinuousExecutionsCount());
+ }
+ });
+ });
+}
+
llvm::Expected<Optional<uint64_t>>
TraceIntelPT::GetRawTraceSize(Thread &thread) {
if (GetUpdatedStorage().multicpu_decoder)
llvm::Expected<lldb::TraceCursorUP> CreateNewCursor(Thread &thread) override;
- void DumpTraceInfo(Thread &thread, Stream &s, bool verbose) override;
+ void DumpTraceInfo(Thread &thread, Stream &s, bool verbose,
+ bool json) override;
llvm::Expected<llvm::Optional<uint64_t>> GetRawTraceSize(Thread &thread);
/// returned if the decoder couldn't be properly set up.
llvm::Expected<DecodedThreadSP> Decode(Thread &thread);
+ // Dump out trace info in JSON format
+ void DumpTraceInfoAsJson(Thread &thread, Stream &s, bool verbose);
+
/// We package all the data that can change upon process stops to make sure
/// this contract is very visible.
/// This variable should only be accessed directly by constructores or live
-//===-- TraceIntelPTMultiCpuDecoder.cpp ----0------------------------------===//
+//===-- TraceIntelPTMultiCpuDecoder.cpp -----------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
return 0;
size_t count = 0;
auto it = m_continuous_executions_per_thread->find(tid);
+ if (it == m_continuous_executions_per_thread->end())
+ return 0;
for (const IntelPTThreadContinousExecution &execution : it->second)
count += execution.intelpt_subtraces.size();
return count;
void DumpEvent(const TraceDumper::TraceItem &item) {
m_j.attribute("event", TraceCursor::EventKindToString(*item.event));
-<<<<<<< HEAD
-=======
if (item.event == eTraceEventCPUChanged)
m_j.attribute("cpuId", item.cpu_id);
->>>>>>> ccb96b741a18 ([trace][intel pt] Create a CPU change event and expose it in the dumper)
}
void DumpInstruction(const TraceDumper::TraceItem &item) {
m_j.attribute(
"symbol",
ToOptionalString(item.symbol_info->sc.GetFunctionName().AsCString()));
-<<<<<<< HEAD
if (item.symbol_info->instruction) {
m_j.attribute("mnemonic",
ToOptionalString(item.symbol_info->instruction->GetMnemonic(
&item.symbol_info->exe_ctx)));
}
-=======
- m_j.attribute("mnemonic",
- ToOptionalString(item.symbol_info->instruction->GetMnemonic(
- &item.symbol_info->exe_ctx)));
->>>>>>> ccb96b741a18 ([trace][intel pt] Create a CPU change event and expose it in the dumper)
if (IsLineEntryValid(item.symbol_info->sc.line_entry)) {
m_j.attribute(
if (item.event) {
DumpEvent(item);
-<<<<<<< HEAD
- return;
- }
-
- if (item.error) {
- m_j.attribute("error", *item.error);
- return;
- }
-
- // we know we are seeing an actual instruction
- DumpInstruction(item);
-=======
} else if (item.error) {
m_j.attribute("error", *item.error);
} else {
DumpInstruction(item);
}
->>>>>>> ccb96b741a18 ([trace][intel pt] Create a CPU change event and expose it in the dumper)
});
}
substrs=["intel-pt"])
self.expect("thread trace dump info",
- substrs=['''Trace technology: intel-pt
+ substrs=['''thread #1: tid = 3842849
+
+ Trace technology: intel-pt
-thread #1: tid = 3842849
Total number of trace items: 23
Memory usage:
Errors:
Number of TSC decoding errors: 0'''],
patterns=["Decoding instructions: \d.\d\ds"])
+
+ def testDumpRawTraceSizeJSON(self):
+ self.expect("trace load -v " +
+ os.path.join(self.getSourceDir(), "intelpt-trace", "trace.json"),
+ substrs=["intel-pt"])
+
+ self.expect("thread trace dump info --json ",
+ substrs=['''{
+ "traceTechnology": "intel-pt",
+ "threadStats": {
+ "tid": 3842849,
+ "traceItemsCount": 23,
+ "memoryUsage": {
+ "totalInBytes": "207",
+ "avgPerItemInBytes": 9
+ },
+ "timingInSeconds": {
+ "Decoding instructions": 0''', '''
+ },
+ "events": {
+ "totalCount": 2,
+ "individualCounts": {
+ "software disabled tracing": 2
+ }
+ },
+ "errorItems": {
+ "total": 0,
+ "individualErrors": {}
+ }
+ },
+ "globalStats": {
+ "timingInSeconds": {}
+ }
+}'''])
"totalInBytes": "0",
"avgPerItemInBytes": null
},
- "timingInSeconds": {},
+ "timingInSeconds": {
+ "Decoding instructions": ''', '''
+ },
"events": {
"totalCount": 0,
"individualCounts": {}
"traceItemsCount": 19524,
"memoryUsage": {
"totalInBytes": "175760",
- "avgPerItemInBytes": 9.00''', '''},
- "timingInSeconds": {},
+ "avgPerItemInBytes": 9.''', '''},
+ "timingInSeconds": {
+ "Decoding instructions": ''', '''
+ },
"events": {
"totalCount": 2,
"individualCounts": {
# we'll load the compact trace and make sure it works
self.traceLoad(os.path.join(compact_trace_bundle_dir, "trace.json"), substrs=["intel-pt"])
self.expect("thread trace dump instructions 2 -t",
- substrs=["19522: [tsc=40450075478109270] (error) expected tracing enabled event",
+ substrs=["19523: [tsc=40450075478109270] (error) expected tracing enabled event",
"m.out`foo() + 65 at multi_thread.cpp:12:21",
- "19520: [tsc=40450075477657246] 0x0000000000400ba7 jg 0x400bb3"])
+ "19521: [tsc=40450075477657246] 0x0000000000400ba7 jg 0x400bb3"])
self.expect("thread trace dump instructions 3 -t",
- substrs=["67911: [tsc=40450075477799536] 0x0000000000400bd7 addl $0x1, -0x4(%rbp)",
+ substrs=["67912: [tsc=40450075477799536] 0x0000000000400bd7 addl $0x1, -0x4(%rbp)",
"m.out`bar() + 26 at multi_thread.cpp:20:6"])
# This reduced the number of continuous executions to look at
# check that the Process and Thread objects were created correctly
self.expect("thread info", substrs=["tid = 3842849"])
self.expect("thread list", substrs=["Process 1234 stopped", "tid = 3842849"])
- self.expect("thread trace dump info", substrs=['''Trace technology: intel-pt
+ self.expect("thread trace dump info", substrs=['''thread #1: tid = 3842849
+
+ Trace technology: intel-pt
-thread #1: tid = 3842849
Total number of trace items: 23
Memory usage: