4 #include "util/evsel.h"
6 #include "util/cache.h"
7 #include "util/symbol.h"
8 #include "util/thread.h"
9 #include "util/header.h"
10 #include "util/session.h"
12 #include "util/parse-options.h"
13 #include "util/trace-event.h"
14 #include "util/debug.h"
15 #include "util/debugfs.h"
16 #include "util/tool.h"
17 #include "util/stat.h"
19 #include <sys/prctl.h>
21 #include <semaphore.h>
25 #if defined(__i386__) || defined(__x86_64__)
31 #define INVALID_KEY (~0ULL)
36 struct kvm_event_stats {
42 struct list_head hash_entry;
47 struct kvm_event_stats total;
49 #define DEFAULT_VCPU_NUM 8
51 struct kvm_event_stats *vcpu;
54 typedef int (*key_cmp_fun)(struct kvm_event*, struct kvm_event*, int);
56 struct kvm_event_key {
64 struct kvm_events_ops {
65 bool (*is_begin_event)(struct perf_evsel *evsel,
66 struct perf_sample *sample,
67 struct event_key *key);
68 bool (*is_end_event)(struct perf_evsel *evsel,
69 struct perf_sample *sample, struct event_key *key);
70 void (*decode_key)(struct perf_kvm_stat *kvm, struct event_key *key,
75 struct exit_reasons_table {
76 unsigned long exit_code;
80 #define EVENTS_BITS 12
81 #define EVENTS_CACHE_SIZE (1UL << EVENTS_BITS)
83 struct perf_kvm_stat {
84 struct perf_tool tool;
85 struct perf_session *session;
87 const char *file_name;
88 const char *report_event;
92 struct exit_reasons_table *exit_reasons;
93 int exit_reasons_size;
94 const char *exit_reasons_isa;
96 struct kvm_events_ops *events_ops;
98 struct list_head kvm_events_cache[EVENTS_CACHE_SIZE];
102 struct rb_root result;
106 static void exit_event_get_key(struct perf_evsel *evsel,
107 struct perf_sample *sample,
108 struct event_key *key)
111 key->key = perf_evsel__intval(evsel, sample, "exit_reason");
114 static bool kvm_exit_event(struct perf_evsel *evsel)
116 return !strcmp(evsel->name, "kvm:kvm_exit");
119 static bool exit_event_begin(struct perf_evsel *evsel,
120 struct perf_sample *sample, struct event_key *key)
122 if (kvm_exit_event(evsel)) {
123 exit_event_get_key(evsel, sample, key);
130 static bool kvm_entry_event(struct perf_evsel *evsel)
132 return !strcmp(evsel->name, "kvm:kvm_entry");
135 static bool exit_event_end(struct perf_evsel *evsel,
136 struct perf_sample *sample __maybe_unused,
137 struct event_key *key __maybe_unused)
139 return kvm_entry_event(evsel);
142 static struct exit_reasons_table vmx_exit_reasons[] = {
146 static struct exit_reasons_table svm_exit_reasons[] = {
150 static const char *get_exit_reason(struct perf_kvm_stat *kvm, u64 exit_code)
152 int i = kvm->exit_reasons_size;
153 struct exit_reasons_table *tbl = kvm->exit_reasons;
156 if (tbl->exit_code == exit_code)
161 pr_err("unknown kvm exit code:%lld on %s\n",
162 (unsigned long long)exit_code, kvm->exit_reasons_isa);
166 static void exit_event_decode_key(struct perf_kvm_stat *kvm,
167 struct event_key *key,
170 const char *exit_reason = get_exit_reason(kvm, key->key);
172 scnprintf(decode, 20, "%s", exit_reason);
175 static struct kvm_events_ops exit_events = {
176 .is_begin_event = exit_event_begin,
177 .is_end_event = exit_event_end,
178 .decode_key = exit_event_decode_key,
183 * For the mmio events, we treat:
184 * the time of MMIO write: kvm_mmio(KVM_TRACE_MMIO_WRITE...) -> kvm_entry
185 * the time of MMIO read: kvm_exit -> kvm_mmio(KVM_TRACE_MMIO_READ...).
187 static void mmio_event_get_key(struct perf_evsel *evsel, struct perf_sample *sample,
188 struct event_key *key)
190 key->key = perf_evsel__intval(evsel, sample, "gpa");
191 key->info = perf_evsel__intval(evsel, sample, "type");
194 #define KVM_TRACE_MMIO_READ_UNSATISFIED 0
195 #define KVM_TRACE_MMIO_READ 1
196 #define KVM_TRACE_MMIO_WRITE 2
198 static bool mmio_event_begin(struct perf_evsel *evsel,
199 struct perf_sample *sample, struct event_key *key)
201 /* MMIO read begin event in kernel. */
202 if (kvm_exit_event(evsel))
205 /* MMIO write begin event in kernel. */
206 if (!strcmp(evsel->name, "kvm:kvm_mmio") &&
207 perf_evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_WRITE) {
208 mmio_event_get_key(evsel, sample, key);
215 static bool mmio_event_end(struct perf_evsel *evsel, struct perf_sample *sample,
216 struct event_key *key)
218 /* MMIO write end event in kernel. */
219 if (kvm_entry_event(evsel))
222 /* MMIO read end event in kernel.*/
223 if (!strcmp(evsel->name, "kvm:kvm_mmio") &&
224 perf_evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_READ) {
225 mmio_event_get_key(evsel, sample, key);
232 static void mmio_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
233 struct event_key *key,
236 scnprintf(decode, 20, "%#lx:%s", (unsigned long)key->key,
237 key->info == KVM_TRACE_MMIO_WRITE ? "W" : "R");
240 static struct kvm_events_ops mmio_events = {
241 .is_begin_event = mmio_event_begin,
242 .is_end_event = mmio_event_end,
243 .decode_key = mmio_event_decode_key,
244 .name = "MMIO Access"
247 /* The time of emulation pio access is from kvm_pio to kvm_entry. */
248 static void ioport_event_get_key(struct perf_evsel *evsel,
249 struct perf_sample *sample,
250 struct event_key *key)
252 key->key = perf_evsel__intval(evsel, sample, "port");
253 key->info = perf_evsel__intval(evsel, sample, "rw");
256 static bool ioport_event_begin(struct perf_evsel *evsel,
257 struct perf_sample *sample,
258 struct event_key *key)
260 if (!strcmp(evsel->name, "kvm:kvm_pio")) {
261 ioport_event_get_key(evsel, sample, key);
268 static bool ioport_event_end(struct perf_evsel *evsel,
269 struct perf_sample *sample __maybe_unused,
270 struct event_key *key __maybe_unused)
272 return kvm_entry_event(evsel);
275 static void ioport_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
276 struct event_key *key,
279 scnprintf(decode, 20, "%#llx:%s", (unsigned long long)key->key,
280 key->info ? "POUT" : "PIN");
283 static struct kvm_events_ops ioport_events = {
284 .is_begin_event = ioport_event_begin,
285 .is_end_event = ioport_event_end,
286 .decode_key = ioport_event_decode_key,
287 .name = "IO Port Access"
290 static bool register_kvm_events_ops(struct perf_kvm_stat *kvm)
294 if (!strcmp(kvm->report_event, "vmexit"))
295 kvm->events_ops = &exit_events;
296 else if (!strcmp(kvm->report_event, "mmio"))
297 kvm->events_ops = &mmio_events;
298 else if (!strcmp(kvm->report_event, "ioport"))
299 kvm->events_ops = &ioport_events;
301 pr_err("Unknown report event:%s\n", kvm->report_event);
308 struct vcpu_event_record {
311 struct kvm_event *last_event;
315 static void init_kvm_event_record(struct perf_kvm_stat *kvm)
319 for (i = 0; i < EVENTS_CACHE_SIZE; i++)
320 INIT_LIST_HEAD(&kvm->kvm_events_cache[i]);
323 static int kvm_events_hash_fn(u64 key)
325 return key & (EVENTS_CACHE_SIZE - 1);
328 static bool kvm_event_expand(struct kvm_event *event, int vcpu_id)
330 int old_max_vcpu = event->max_vcpu;
332 if (vcpu_id < event->max_vcpu)
335 while (event->max_vcpu <= vcpu_id)
336 event->max_vcpu += DEFAULT_VCPU_NUM;
338 event->vcpu = realloc(event->vcpu,
339 event->max_vcpu * sizeof(*event->vcpu));
341 pr_err("Not enough memory\n");
345 memset(event->vcpu + old_max_vcpu, 0,
346 (event->max_vcpu - old_max_vcpu) * sizeof(*event->vcpu));
350 static struct kvm_event *kvm_alloc_init_event(struct event_key *key)
352 struct kvm_event *event;
354 event = zalloc(sizeof(*event));
356 pr_err("Not enough memory\n");
364 static struct kvm_event *find_create_kvm_event(struct perf_kvm_stat *kvm,
365 struct event_key *key)
367 struct kvm_event *event;
368 struct list_head *head;
370 BUG_ON(key->key == INVALID_KEY);
372 head = &kvm->kvm_events_cache[kvm_events_hash_fn(key->key)];
373 list_for_each_entry(event, head, hash_entry) {
374 if (event->key.key == key->key && event->key.info == key->info)
378 event = kvm_alloc_init_event(key);
382 list_add(&event->hash_entry, head);
386 static bool handle_begin_event(struct perf_kvm_stat *kvm,
387 struct vcpu_event_record *vcpu_record,
388 struct event_key *key, u64 timestamp)
390 struct kvm_event *event = NULL;
392 if (key->key != INVALID_KEY)
393 event = find_create_kvm_event(kvm, key);
395 vcpu_record->last_event = event;
396 vcpu_record->start_time = timestamp;
401 kvm_update_event_stats(struct kvm_event_stats *kvm_stats, u64 time_diff)
403 kvm_stats->time += time_diff;
404 update_stats(&kvm_stats->stats, time_diff);
407 static double kvm_event_rel_stddev(int vcpu_id, struct kvm_event *event)
409 struct kvm_event_stats *kvm_stats = &event->total;
412 kvm_stats = &event->vcpu[vcpu_id];
414 return rel_stddev_stats(stddev_stats(&kvm_stats->stats),
415 avg_stats(&kvm_stats->stats));
418 static bool update_kvm_event(struct kvm_event *event, int vcpu_id,
422 kvm_update_event_stats(&event->total, time_diff);
426 if (!kvm_event_expand(event, vcpu_id))
429 kvm_update_event_stats(&event->vcpu[vcpu_id], time_diff);
433 static bool handle_end_event(struct perf_kvm_stat *kvm,
434 struct vcpu_event_record *vcpu_record,
435 struct event_key *key,
438 struct kvm_event *event;
439 u64 time_begin, time_diff;
442 if (kvm->trace_vcpu == -1)
445 vcpu = vcpu_record->vcpu_id;
447 event = vcpu_record->last_event;
448 time_begin = vcpu_record->start_time;
450 /* The begin event is not caught. */
455 * In some case, the 'begin event' only records the start timestamp,
456 * the actual event is recognized in the 'end event' (e.g. mmio-event).
459 /* Both begin and end events did not get the key. */
460 if (!event && key->key == INVALID_KEY)
464 event = find_create_kvm_event(kvm, key);
469 vcpu_record->last_event = NULL;
470 vcpu_record->start_time = 0;
472 BUG_ON(timestamp < time_begin);
474 time_diff = timestamp - time_begin;
475 return update_kvm_event(event, vcpu, time_diff);
479 struct vcpu_event_record *per_vcpu_record(struct thread *thread,
480 struct perf_evsel *evsel,
481 struct perf_sample *sample)
483 /* Only kvm_entry records vcpu id. */
484 if (!thread->priv && kvm_entry_event(evsel)) {
485 struct vcpu_event_record *vcpu_record;
487 vcpu_record = zalloc(sizeof(*vcpu_record));
489 pr_err("%s: Not enough memory\n", __func__);
493 vcpu_record->vcpu_id = perf_evsel__intval(evsel, sample, "vcpu_id");
494 thread->priv = vcpu_record;
500 static bool handle_kvm_event(struct perf_kvm_stat *kvm,
501 struct thread *thread,
502 struct perf_evsel *evsel,
503 struct perf_sample *sample)
505 struct vcpu_event_record *vcpu_record;
506 struct event_key key = {.key = INVALID_KEY};
508 vcpu_record = per_vcpu_record(thread, evsel, sample);
512 /* only process events for vcpus user cares about */
513 if ((kvm->trace_vcpu != -1) &&
514 (kvm->trace_vcpu != vcpu_record->vcpu_id))
517 if (kvm->events_ops->is_begin_event(evsel, sample, &key))
518 return handle_begin_event(kvm, vcpu_record, &key, sample->time);
520 if (kvm->events_ops->is_end_event(evsel, sample, &key))
521 return handle_end_event(kvm, vcpu_record, &key, sample->time);
526 #define GET_EVENT_KEY(func, field) \
527 static u64 get_event_ ##func(struct kvm_event *event, int vcpu) \
530 return event->total.field; \
532 if (vcpu >= event->max_vcpu) \
535 return event->vcpu[vcpu].field; \
538 #define COMPARE_EVENT_KEY(func, field) \
539 GET_EVENT_KEY(func, field) \
540 static int compare_kvm_event_ ## func(struct kvm_event *one, \
541 struct kvm_event *two, int vcpu)\
543 return get_event_ ##func(one, vcpu) > \
544 get_event_ ##func(two, vcpu); \
547 GET_EVENT_KEY(time, time);
548 COMPARE_EVENT_KEY(count, stats.n);
549 COMPARE_EVENT_KEY(mean, stats.mean);
551 #define DEF_SORT_NAME_KEY(name, compare_key) \
552 { #name, compare_kvm_event_ ## compare_key }
554 static struct kvm_event_key keys[] = {
555 DEF_SORT_NAME_KEY(sample, count),
556 DEF_SORT_NAME_KEY(time, mean),
560 static bool select_key(struct perf_kvm_stat *kvm)
564 for (i = 0; keys[i].name; i++) {
565 if (!strcmp(keys[i].name, kvm->sort_key)) {
566 kvm->compare = keys[i].key;
571 pr_err("Unknown compare key:%s\n", kvm->sort_key);
575 static void insert_to_result(struct rb_root *result, struct kvm_event *event,
576 key_cmp_fun bigger, int vcpu)
578 struct rb_node **rb = &result->rb_node;
579 struct rb_node *parent = NULL;
583 p = container_of(*rb, struct kvm_event, rb);
586 if (bigger(event, p, vcpu))
587 rb = &(*rb)->rb_left;
589 rb = &(*rb)->rb_right;
592 rb_link_node(&event->rb, parent, rb);
593 rb_insert_color(&event->rb, result);
597 update_total_count(struct perf_kvm_stat *kvm, struct kvm_event *event)
599 int vcpu = kvm->trace_vcpu;
601 kvm->total_count += get_event_count(event, vcpu);
602 kvm->total_time += get_event_time(event, vcpu);
605 static bool event_is_valid(struct kvm_event *event, int vcpu)
607 return !!get_event_count(event, vcpu);
610 static void sort_result(struct perf_kvm_stat *kvm)
613 int vcpu = kvm->trace_vcpu;
614 struct kvm_event *event;
616 for (i = 0; i < EVENTS_CACHE_SIZE; i++) {
617 list_for_each_entry(event, &kvm->kvm_events_cache[i], hash_entry) {
618 if (event_is_valid(event, vcpu)) {
619 update_total_count(kvm, event);
620 insert_to_result(&kvm->result, event,
627 /* returns left most element of result, and erase it */
628 static struct kvm_event *pop_from_result(struct rb_root *result)
630 struct rb_node *node = rb_first(result);
635 rb_erase(node, result);
636 return container_of(node, struct kvm_event, rb);
639 static void print_vcpu_info(int vcpu)
641 pr_info("Analyze events for ");
644 pr_info("all VCPUs:\n\n");
646 pr_info("VCPU %d:\n\n", vcpu);
649 static void print_result(struct perf_kvm_stat *kvm)
652 struct kvm_event *event;
653 int vcpu = kvm->trace_vcpu;
656 print_vcpu_info(vcpu);
657 pr_info("%20s ", kvm->events_ops->name);
658 pr_info("%10s ", "Samples");
659 pr_info("%9s ", "Samples%");
661 pr_info("%9s ", "Time%");
662 pr_info("%16s ", "Avg time");
665 while ((event = pop_from_result(&kvm->result))) {
668 ecount = get_event_count(event, vcpu);
669 etime = get_event_time(event, vcpu);
671 kvm->events_ops->decode_key(kvm, &event->key, decode);
672 pr_info("%20s ", decode);
673 pr_info("%10llu ", (unsigned long long)ecount);
674 pr_info("%8.2f%% ", (double)ecount / kvm->total_count * 100);
675 pr_info("%8.2f%% ", (double)etime / kvm->total_time * 100);
676 pr_info("%9.2fus ( +-%7.2f%% )", (double)etime / ecount/1e3,
677 kvm_event_rel_stddev(vcpu, event));
681 pr_info("\nTotal Samples:%" PRIu64 ", Total events handled time:%.2fus.\n\n",
682 kvm->total_count, kvm->total_time / 1e3);
685 static int process_sample_event(struct perf_tool *tool,
686 union perf_event *event,
687 struct perf_sample *sample,
688 struct perf_evsel *evsel,
689 struct machine *machine)
691 struct thread *thread = machine__findnew_thread(machine, sample->tid);
692 struct perf_kvm_stat *kvm = container_of(tool, struct perf_kvm_stat,
695 if (thread == NULL) {
696 pr_debug("problem processing %d event, skipping it.\n",
701 if (!handle_kvm_event(kvm, thread, evsel, sample))
707 static int get_cpu_isa(struct perf_session *session)
709 char *cpuid = session->header.env.cpuid;
712 if (strstr(cpuid, "Intel"))
714 else if (strstr(cpuid, "AMD"))
717 pr_err("CPU %s is not supported.\n", cpuid);
724 static int read_events(struct perf_kvm_stat *kvm)
728 struct perf_tool eops = {
729 .sample = process_sample_event,
730 .comm = perf_event__process_comm,
731 .ordered_samples = true,
735 kvm->session = perf_session__new(kvm->file_name, O_RDONLY, 0, false,
738 pr_err("Initializing perf session failed\n");
742 if (!perf_session__has_traces(kvm->session, "kvm record"))
746 * Do not use 'isa' recorded in kvm_exit tracepoint since it is not
747 * traced in the old kernel.
749 ret = get_cpu_isa(kvm->session);
755 kvm->exit_reasons = vmx_exit_reasons;
756 kvm->exit_reasons_size = ARRAY_SIZE(vmx_exit_reasons);
757 kvm->exit_reasons_isa = "VMX";
760 return perf_session__process_events(kvm->session, &kvm->tool);
763 static bool verify_vcpu(int vcpu)
765 if (vcpu != -1 && vcpu < 0) {
766 pr_err("Invalid vcpu:%d.\n", vcpu);
773 static int kvm_events_report_vcpu(struct perf_kvm_stat *kvm)
776 int vcpu = kvm->trace_vcpu;
778 if (!verify_vcpu(vcpu))
781 if (!select_key(kvm))
784 if (!register_kvm_events_ops(kvm))
787 init_kvm_event_record(kvm);
790 ret = read_events(kvm);
801 static const char * const record_args[] = {
807 "-e", "kvm:kvm_entry",
808 "-e", "kvm:kvm_exit",
809 "-e", "kvm:kvm_mmio",
813 #define STRDUP_FAIL_EXIT(s) \
822 kvm_events_record(struct perf_kvm_stat *kvm, int argc, const char **argv)
824 unsigned int rec_argc, i, j;
825 const char **rec_argv;
827 rec_argc = ARRAY_SIZE(record_args) + argc + 2;
828 rec_argv = calloc(rec_argc + 1, sizeof(char *));
830 if (rec_argv == NULL)
833 for (i = 0; i < ARRAY_SIZE(record_args); i++)
834 rec_argv[i] = STRDUP_FAIL_EXIT(record_args[i]);
836 rec_argv[i++] = STRDUP_FAIL_EXIT("-o");
837 rec_argv[i++] = STRDUP_FAIL_EXIT(kvm->file_name);
839 for (j = 1; j < (unsigned int)argc; j++, i++)
840 rec_argv[i] = argv[j];
842 return cmd_record(i, rec_argv, NULL);
846 kvm_events_report(struct perf_kvm_stat *kvm, int argc, const char **argv)
848 const struct option kvm_events_report_options[] = {
849 OPT_STRING(0, "event", &kvm->report_event, "report event",
850 "event for reporting: vmexit, mmio, ioport"),
851 OPT_INTEGER(0, "vcpu", &kvm->trace_vcpu,
852 "vcpu id to report"),
853 OPT_STRING('k', "key", &kvm->sort_key, "sort-key",
854 "key for sorting: sample(sort by samples number)"
855 " time (sort by avg time)"),
859 const char * const kvm_events_report_usage[] = {
860 "perf kvm stat report [<options>]",
867 argc = parse_options(argc, argv,
868 kvm_events_report_options,
869 kvm_events_report_usage, 0);
871 usage_with_options(kvm_events_report_usage,
872 kvm_events_report_options);
875 return kvm_events_report_vcpu(kvm);
878 static void print_kvm_stat_usage(void)
880 printf("Usage: perf kvm stat <command>\n\n");
882 printf("# Available commands:\n");
883 printf("\trecord: record kvm events\n");
884 printf("\treport: report statistical data of kvm events\n");
886 printf("\nOtherwise, it is the alias of 'perf stat':\n");
889 static int kvm_cmd_stat(const char *file_name, int argc, const char **argv)
891 struct perf_kvm_stat kvm = {
892 .file_name = file_name,
895 .report_event = "vmexit",
896 .sort_key = "sample",
898 .exit_reasons = svm_exit_reasons,
899 .exit_reasons_size = ARRAY_SIZE(svm_exit_reasons),
900 .exit_reasons_isa = "SVM",
904 print_kvm_stat_usage();
908 if (!strncmp(argv[1], "rec", 3))
909 return kvm_events_record(&kvm, argc - 1, argv + 1);
911 if (!strncmp(argv[1], "rep", 3))
912 return kvm_events_report(&kvm, argc - 1 , argv + 1);
915 return cmd_stat(argc, argv, NULL);
919 static int __cmd_record(const char *file_name, int argc, const char **argv)
921 int rec_argc, i = 0, j;
922 const char **rec_argv;
925 rec_argv = calloc(rec_argc + 1, sizeof(char *));
926 rec_argv[i++] = strdup("record");
927 rec_argv[i++] = strdup("-o");
928 rec_argv[i++] = strdup(file_name);
929 for (j = 1; j < argc; j++, i++)
930 rec_argv[i] = argv[j];
932 BUG_ON(i != rec_argc);
934 return cmd_record(i, rec_argv, NULL);
937 static int __cmd_report(const char *file_name, int argc, const char **argv)
939 int rec_argc, i = 0, j;
940 const char **rec_argv;
943 rec_argv = calloc(rec_argc + 1, sizeof(char *));
944 rec_argv[i++] = strdup("report");
945 rec_argv[i++] = strdup("-i");
946 rec_argv[i++] = strdup(file_name);
947 for (j = 1; j < argc; j++, i++)
948 rec_argv[i] = argv[j];
950 BUG_ON(i != rec_argc);
952 return cmd_report(i, rec_argv, NULL);
956 __cmd_buildid_list(const char *file_name, int argc, const char **argv)
958 int rec_argc, i = 0, j;
959 const char **rec_argv;
962 rec_argv = calloc(rec_argc + 1, sizeof(char *));
963 rec_argv[i++] = strdup("buildid-list");
964 rec_argv[i++] = strdup("-i");
965 rec_argv[i++] = strdup(file_name);
966 for (j = 1; j < argc; j++, i++)
967 rec_argv[i] = argv[j];
969 BUG_ON(i != rec_argc);
971 return cmd_buildid_list(i, rec_argv, NULL);
974 int cmd_kvm(int argc, const char **argv, const char *prefix __maybe_unused)
976 const char *file_name = NULL;
977 const struct option kvm_options[] = {
978 OPT_STRING('i', "input", &file_name, "file",
980 OPT_STRING('o', "output", &file_name, "file",
982 OPT_BOOLEAN(0, "guest", &perf_guest,
983 "Collect guest os data"),
984 OPT_BOOLEAN(0, "host", &perf_host,
985 "Collect host os data"),
986 OPT_STRING(0, "guestmount", &symbol_conf.guestmount, "directory",
987 "guest mount directory under which every guest os"
988 " instance has a subdir"),
989 OPT_STRING(0, "guestvmlinux", &symbol_conf.default_guest_vmlinux_name,
990 "file", "file saving guest os vmlinux"),
991 OPT_STRING(0, "guestkallsyms", &symbol_conf.default_guest_kallsyms,
992 "file", "file saving guest os /proc/kallsyms"),
993 OPT_STRING(0, "guestmodules", &symbol_conf.default_guest_modules,
994 "file", "file saving guest os /proc/modules"),
999 const char * const kvm_usage[] = {
1000 "perf kvm [<options>] {top|record|report|diff|buildid-list|stat}",
1007 argc = parse_options(argc, argv, kvm_options, kvm_usage,
1008 PARSE_OPT_STOP_AT_NON_OPTION);
1010 usage_with_options(kvm_usage, kvm_options);
1016 if (perf_host && !perf_guest)
1017 file_name = strdup("perf.data.host");
1018 else if (!perf_host && perf_guest)
1019 file_name = strdup("perf.data.guest");
1021 file_name = strdup("perf.data.kvm");
1024 pr_err("Failed to allocate memory for filename\n");
1029 if (!strncmp(argv[0], "rec", 3))
1030 return __cmd_record(file_name, argc, argv);
1031 else if (!strncmp(argv[0], "rep", 3))
1032 return __cmd_report(file_name, argc, argv);
1033 else if (!strncmp(argv[0], "diff", 4))
1034 return cmd_diff(argc, argv, NULL);
1035 else if (!strncmp(argv[0], "top", 3))
1036 return cmd_top(argc, argv, NULL);
1037 else if (!strncmp(argv[0], "buildid-list", 12))
1038 return __cmd_buildid_list(file_name, argc, argv);
1039 #if defined(__i386__) || defined(__x86_64__)
1040 else if (!strncmp(argv[0], "stat", 4))
1041 return kvm_cmd_stat(file_name, argc, argv);
1044 usage_with_options(kvm_usage, kvm_options);