1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2022 Huawei Inc, Yang Jihong <yangjihong1@huawei.com>
10 #include "util/data.h"
11 #include "util/evlist.h"
12 #include "util/evsel.h"
13 #include "util/header.h"
14 #include "util/kwork.h"
15 #include "util/debug.h"
16 #include "util/session.h"
17 #include "util/symbol.h"
18 #include "util/thread.h"
19 #include "util/string2.h"
20 #include "util/callchain.h"
21 #include "util/evsel_fprintf.h"
22 #include "util/util.h"
24 #include <subcmd/pager.h>
25 #include <subcmd/parse-options.h>
26 #include <traceevent/event-parse.h>
31 #include <linux/err.h>
32 #include <linux/time64.h>
33 #include <linux/zalloc.h>
36 * report header elements width
38 #define PRINT_CPU_WIDTH 4
39 #define PRINT_COUNT_WIDTH 9
40 #define PRINT_RUNTIME_WIDTH 10
41 #define PRINT_LATENCY_WIDTH 10
42 #define PRINT_TIMESTAMP_WIDTH 17
43 #define PRINT_KWORK_NAME_WIDTH 30
44 #define RPINT_DECIMAL_WIDTH 3
45 #define PRINT_BRACKETPAIR_WIDTH 2
46 #define PRINT_TIME_UNIT_SEC_WIDTH 2
47 #define PRINT_TIME_UNIT_MESC_WIDTH 3
48 #define PRINT_RUNTIME_HEADER_WIDTH (PRINT_RUNTIME_WIDTH + PRINT_TIME_UNIT_MESC_WIDTH)
49 #define PRINT_LATENCY_HEADER_WIDTH (PRINT_LATENCY_WIDTH + PRINT_TIME_UNIT_MESC_WIDTH)
50 #define PRINT_TIMEHIST_CPU_WIDTH (PRINT_CPU_WIDTH + PRINT_BRACKETPAIR_WIDTH)
51 #define PRINT_TIMESTAMP_HEADER_WIDTH (PRINT_TIMESTAMP_WIDTH + PRINT_TIME_UNIT_SEC_WIDTH)
53 struct sort_dimension {
55 int (*cmp)(struct kwork_work *l, struct kwork_work *r);
56 struct list_head list;
59 static int id_cmp(struct kwork_work *l, struct kwork_work *r)
74 static int count_cmp(struct kwork_work *l, struct kwork_work *r)
76 if (l->nr_atoms > r->nr_atoms)
78 if (l->nr_atoms < r->nr_atoms)
84 static int runtime_cmp(struct kwork_work *l, struct kwork_work *r)
86 if (l->total_runtime > r->total_runtime)
88 if (l->total_runtime < r->total_runtime)
94 static int max_runtime_cmp(struct kwork_work *l, struct kwork_work *r)
96 if (l->max_runtime > r->max_runtime)
98 if (l->max_runtime < r->max_runtime)
104 static int avg_latency_cmp(struct kwork_work *l, struct kwork_work *r)
113 avgl = l->total_latency / l->nr_atoms;
114 avgr = r->total_latency / r->nr_atoms;
124 static int max_latency_cmp(struct kwork_work *l, struct kwork_work *r)
126 if (l->max_latency > r->max_latency)
128 if (l->max_latency < r->max_latency)
134 static int sort_dimension__add(struct perf_kwork *kwork __maybe_unused,
135 const char *tok, struct list_head *list)
138 static struct sort_dimension max_sort_dimension = {
140 .cmp = max_runtime_cmp,
142 static struct sort_dimension id_sort_dimension = {
146 static struct sort_dimension runtime_sort_dimension = {
150 static struct sort_dimension count_sort_dimension = {
154 static struct sort_dimension avg_sort_dimension = {
156 .cmp = avg_latency_cmp,
158 struct sort_dimension *available_sorts[] = {
161 &count_sort_dimension,
162 &runtime_sort_dimension,
166 if (kwork->report == KWORK_REPORT_LATENCY)
167 max_sort_dimension.cmp = max_latency_cmp;
169 for (i = 0; i < ARRAY_SIZE(available_sorts); i++) {
170 if (!strcmp(available_sorts[i]->name, tok)) {
171 list_add_tail(&available_sorts[i]->list, list);
179 static void setup_sorting(struct perf_kwork *kwork,
180 const struct option *options,
181 const char * const usage_msg[])
183 char *tmp, *tok, *str = strdup(kwork->sort_order);
185 for (tok = strtok_r(str, ", ", &tmp);
186 tok; tok = strtok_r(NULL, ", ", &tmp)) {
187 if (sort_dimension__add(kwork, tok, &kwork->sort_list) < 0)
188 usage_with_options_msg(usage_msg, options,
189 "Unknown --sort key: `%s'", tok);
192 pr_debug("Sort order: %s\n", kwork->sort_order);
196 static struct kwork_atom *atom_new(struct perf_kwork *kwork,
197 struct perf_sample *sample)
200 struct kwork_atom_page *page;
201 struct kwork_atom *atom = NULL;
203 list_for_each_entry(page, &kwork->atom_page_list, list) {
204 if (!bitmap_full(page->bitmap, NR_ATOM_PER_PAGE)) {
205 i = find_first_zero_bit(page->bitmap, NR_ATOM_PER_PAGE);
206 BUG_ON(i >= NR_ATOM_PER_PAGE);
207 atom = &page->atoms[i];
215 page = zalloc(sizeof(*page));
217 pr_err("Failed to zalloc kwork atom page\n");
222 atom = &page->atoms[0];
223 list_add_tail(&page->list, &kwork->atom_page_list);
226 __set_bit(i, page->bitmap);
227 atom->time = sample->time;
229 atom->page_addr = page;
230 atom->bit_inpage = i;
234 static void atom_free(struct kwork_atom *atom)
236 if (atom->prev != NULL)
237 atom_free(atom->prev);
239 __clear_bit(atom->bit_inpage,
240 ((struct kwork_atom_page *)atom->page_addr)->bitmap);
243 static void atom_del(struct kwork_atom *atom)
245 list_del(&atom->list);
249 static int work_cmp(struct list_head *list,
250 struct kwork_work *l, struct kwork_work *r)
253 struct sort_dimension *sort;
255 BUG_ON(list_empty(list));
257 list_for_each_entry(sort, list, list) {
258 ret = sort->cmp(l, r);
266 static struct kwork_work *work_search(struct rb_root_cached *root,
267 struct kwork_work *key,
268 struct list_head *sort_list)
271 struct kwork_work *work;
272 struct rb_node *node = root->rb_root.rb_node;
275 work = container_of(node, struct kwork_work, node);
276 cmp = work_cmp(sort_list, key, work);
278 node = node->rb_left;
280 node = node->rb_right;
282 if (work->name == NULL)
283 work->name = key->name;
290 static void work_insert(struct rb_root_cached *root,
291 struct kwork_work *key, struct list_head *sort_list)
294 bool leftmost = true;
295 struct kwork_work *cur;
296 struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
299 cur = container_of(*new, struct kwork_work, node);
301 cmp = work_cmp(sort_list, key, cur);
304 new = &((*new)->rb_left);
306 new = &((*new)->rb_right);
311 rb_link_node(&key->node, parent, new);
312 rb_insert_color_cached(&key->node, root, leftmost);
315 static struct kwork_work *work_new(struct kwork_work *key)
318 struct kwork_work *work = zalloc(sizeof(*work));
321 pr_err("Failed to zalloc kwork work\n");
325 for (i = 0; i < KWORK_TRACE_MAX; i++)
326 INIT_LIST_HEAD(&work->atom_list[i]);
329 work->cpu = key->cpu;
330 work->name = key->name;
331 work->class = key->class;
335 static struct kwork_work *work_findnew(struct rb_root_cached *root,
336 struct kwork_work *key,
337 struct list_head *sort_list)
339 struct kwork_work *work = work_search(root, key, sort_list);
344 work = work_new(key);
346 work_insert(root, work, sort_list);
351 static void profile_update_timespan(struct perf_kwork *kwork,
352 struct perf_sample *sample)
357 if ((kwork->timestart == 0) || (kwork->timestart > sample->time))
358 kwork->timestart = sample->time;
360 if (kwork->timeend < sample->time)
361 kwork->timeend = sample->time;
364 static bool profile_event_match(struct perf_kwork *kwork,
365 struct kwork_work *work,
366 struct perf_sample *sample)
369 u64 time = sample->time;
370 struct perf_time_interval *ptime = &kwork->ptime;
372 if ((kwork->cpu_list != NULL) && !test_bit(cpu, kwork->cpu_bitmap))
375 if (((ptime->start != 0) && (ptime->start > time)) ||
376 ((ptime->end != 0) && (ptime->end < time)))
379 if ((kwork->profile_name != NULL) &&
380 (work->name != NULL) &&
381 (strcmp(work->name, kwork->profile_name) != 0))
384 profile_update_timespan(kwork, sample);
388 static int work_push_atom(struct perf_kwork *kwork,
389 struct kwork_class *class,
390 enum kwork_trace_type src_type,
391 enum kwork_trace_type dst_type,
393 struct perf_sample *sample,
394 struct machine *machine,
395 struct kwork_work **ret_work)
397 struct kwork_atom *atom, *dst_atom;
398 struct kwork_work *work, key;
400 BUG_ON(class->work_init == NULL);
401 class->work_init(class, &key, evsel, sample, machine);
403 atom = atom_new(kwork, sample);
407 work = work_findnew(&class->work_root, &key, &kwork->cmp_id);
413 if (!profile_event_match(kwork, work, sample))
416 if (dst_type < KWORK_TRACE_MAX) {
417 dst_atom = list_last_entry_or_null(&work->atom_list[dst_type],
418 struct kwork_atom, list);
419 if (dst_atom != NULL) {
420 atom->prev = dst_atom;
421 list_del(&dst_atom->list);
425 if (ret_work != NULL)
428 list_add_tail(&atom->list, &work->atom_list[src_type]);
433 static struct kwork_atom *work_pop_atom(struct perf_kwork *kwork,
434 struct kwork_class *class,
435 enum kwork_trace_type src_type,
436 enum kwork_trace_type dst_type,
438 struct perf_sample *sample,
439 struct machine *machine,
440 struct kwork_work **ret_work)
442 struct kwork_atom *atom, *src_atom;
443 struct kwork_work *work, key;
445 BUG_ON(class->work_init == NULL);
446 class->work_init(class, &key, evsel, sample, machine);
448 work = work_findnew(&class->work_root, &key, &kwork->cmp_id);
449 if (ret_work != NULL)
455 if (!profile_event_match(kwork, work, sample))
458 atom = list_last_entry_or_null(&work->atom_list[dst_type],
459 struct kwork_atom, list);
463 src_atom = atom_new(kwork, sample);
464 if (src_atom != NULL)
465 list_add_tail(&src_atom->list, &work->atom_list[src_type]);
467 if (ret_work != NULL)
474 static void report_update_exit_event(struct kwork_work *work,
475 struct kwork_atom *atom,
476 struct perf_sample *sample)
479 u64 exit_time = sample->time;
480 u64 entry_time = atom->time;
482 if ((entry_time != 0) && (exit_time >= entry_time)) {
483 delta = exit_time - entry_time;
484 if ((delta > work->max_runtime) ||
485 (work->max_runtime == 0)) {
486 work->max_runtime = delta;
487 work->max_runtime_start = entry_time;
488 work->max_runtime_end = exit_time;
490 work->total_runtime += delta;
495 static int report_entry_event(struct perf_kwork *kwork,
496 struct kwork_class *class,
498 struct perf_sample *sample,
499 struct machine *machine)
501 return work_push_atom(kwork, class, KWORK_TRACE_ENTRY,
502 KWORK_TRACE_MAX, evsel, sample,
506 static int report_exit_event(struct perf_kwork *kwork,
507 struct kwork_class *class,
509 struct perf_sample *sample,
510 struct machine *machine)
512 struct kwork_atom *atom = NULL;
513 struct kwork_work *work = NULL;
515 atom = work_pop_atom(kwork, class, KWORK_TRACE_EXIT,
516 KWORK_TRACE_ENTRY, evsel, sample,
522 report_update_exit_event(work, atom, sample);
529 static void latency_update_entry_event(struct kwork_work *work,
530 struct kwork_atom *atom,
531 struct perf_sample *sample)
534 u64 entry_time = sample->time;
535 u64 raise_time = atom->time;
537 if ((raise_time != 0) && (entry_time >= raise_time)) {
538 delta = entry_time - raise_time;
539 if ((delta > work->max_latency) ||
540 (work->max_latency == 0)) {
541 work->max_latency = delta;
542 work->max_latency_start = raise_time;
543 work->max_latency_end = entry_time;
545 work->total_latency += delta;
550 static int latency_raise_event(struct perf_kwork *kwork,
551 struct kwork_class *class,
553 struct perf_sample *sample,
554 struct machine *machine)
556 return work_push_atom(kwork, class, KWORK_TRACE_RAISE,
557 KWORK_TRACE_MAX, evsel, sample,
561 static int latency_entry_event(struct perf_kwork *kwork,
562 struct kwork_class *class,
564 struct perf_sample *sample,
565 struct machine *machine)
567 struct kwork_atom *atom = NULL;
568 struct kwork_work *work = NULL;
570 atom = work_pop_atom(kwork, class, KWORK_TRACE_ENTRY,
571 KWORK_TRACE_RAISE, evsel, sample,
577 latency_update_entry_event(work, atom, sample);
584 static void timehist_save_callchain(struct perf_kwork *kwork,
585 struct perf_sample *sample,
587 struct machine *machine)
590 struct thread *thread;
591 struct callchain_cursor_node *node;
592 struct callchain_cursor *cursor;
594 if (!kwork->show_callchain || sample->callchain == NULL)
597 /* want main thread for process - has maps */
598 thread = machine__findnew_thread(machine, sample->pid, sample->pid);
599 if (thread == NULL) {
600 pr_debug("Failed to get thread for pid %d\n", sample->pid);
604 cursor = get_tls_callchain_cursor();
606 if (thread__resolve_callchain(thread, cursor, evsel, sample,
607 NULL, NULL, kwork->max_stack + 2) != 0) {
608 pr_debug("Failed to resolve callchain, skipping\n");
612 callchain_cursor_commit(cursor);
615 node = callchain_cursor_current(cursor);
621 if (!strcmp(sym->name, "__softirqentry_text_start") ||
622 !strcmp(sym->name, "__do_softirq"))
626 callchain_cursor_advance(cursor);
633 static void timehist_print_event(struct perf_kwork *kwork,
634 struct kwork_work *work,
635 struct kwork_atom *atom,
636 struct perf_sample *sample,
637 struct addr_location *al)
639 char entrytime[32], exittime[32];
640 char kwork_name[PRINT_KWORK_NAME_WIDTH];
645 timestamp__scnprintf_usec(atom->time,
646 entrytime, sizeof(entrytime));
647 printf(" %*s ", PRINT_TIMESTAMP_WIDTH, entrytime);
652 timestamp__scnprintf_usec(sample->time,
653 exittime, sizeof(exittime));
654 printf(" %*s ", PRINT_TIMESTAMP_WIDTH, exittime);
659 printf(" [%0*d] ", PRINT_CPU_WIDTH, work->cpu);
664 if (work->class && work->class->work_name) {
665 work->class->work_name(work, kwork_name,
666 PRINT_KWORK_NAME_WIDTH);
667 printf(" %-*s ", PRINT_KWORK_NAME_WIDTH, kwork_name);
669 printf(" %-*s ", PRINT_KWORK_NAME_WIDTH, "");
675 PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH,
676 (double)(sample->time - atom->time) / NSEC_PER_MSEC);
681 if (atom->prev != NULL)
682 printf(" %*.*f ", PRINT_LATENCY_WIDTH, RPINT_DECIMAL_WIDTH,
683 (double)(atom->time - atom->prev->time) / NSEC_PER_MSEC);
685 printf(" %*s ", PRINT_LATENCY_WIDTH, " ");
690 if (kwork->show_callchain) {
691 struct callchain_cursor *cursor = get_tls_callchain_cursor();
698 sample__fprintf_sym(sample, al, 0,
699 EVSEL__PRINT_SYM | EVSEL__PRINT_ONELINE |
700 EVSEL__PRINT_CALLCHAIN_ARROW |
701 EVSEL__PRINT_SKIP_IGNORED,
702 cursor, symbol_conf.bt_stop_list,
709 static int timehist_raise_event(struct perf_kwork *kwork,
710 struct kwork_class *class,
712 struct perf_sample *sample,
713 struct machine *machine)
715 return work_push_atom(kwork, class, KWORK_TRACE_RAISE,
716 KWORK_TRACE_MAX, evsel, sample,
720 static int timehist_entry_event(struct perf_kwork *kwork,
721 struct kwork_class *class,
723 struct perf_sample *sample,
724 struct machine *machine)
727 struct kwork_work *work = NULL;
729 ret = work_push_atom(kwork, class, KWORK_TRACE_ENTRY,
730 KWORK_TRACE_RAISE, evsel, sample,
736 timehist_save_callchain(kwork, sample, evsel, machine);
741 static int timehist_exit_event(struct perf_kwork *kwork,
742 struct kwork_class *class,
744 struct perf_sample *sample,
745 struct machine *machine)
747 struct kwork_atom *atom = NULL;
748 struct kwork_work *work = NULL;
749 struct addr_location al;
752 addr_location__init(&al);
753 if (machine__resolve(machine, &al, sample) < 0) {
754 pr_debug("Problem processing event, skipping it\n");
759 atom = work_pop_atom(kwork, class, KWORK_TRACE_EXIT,
760 KWORK_TRACE_ENTRY, evsel, sample,
769 timehist_print_event(kwork, work, atom, sample, &al);
774 addr_location__exit(&al);
778 static struct kwork_class kwork_irq;
779 static int process_irq_handler_entry_event(struct perf_tool *tool,
781 struct perf_sample *sample,
782 struct machine *machine)
784 struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
786 if (kwork->tp_handler->entry_event)
787 return kwork->tp_handler->entry_event(kwork, &kwork_irq,
788 evsel, sample, machine);
792 static int process_irq_handler_exit_event(struct perf_tool *tool,
794 struct perf_sample *sample,
795 struct machine *machine)
797 struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
799 if (kwork->tp_handler->exit_event)
800 return kwork->tp_handler->exit_event(kwork, &kwork_irq,
801 evsel, sample, machine);
805 const struct evsel_str_handler irq_tp_handlers[] = {
806 { "irq:irq_handler_entry", process_irq_handler_entry_event, },
807 { "irq:irq_handler_exit", process_irq_handler_exit_event, },
810 static int irq_class_init(struct kwork_class *class,
811 struct perf_session *session)
813 if (perf_session__set_tracepoints_handlers(session, irq_tp_handlers)) {
814 pr_err("Failed to set irq tracepoints handlers\n");
818 class->work_root = RB_ROOT_CACHED;
822 static void irq_work_init(struct kwork_class *class,
823 struct kwork_work *work,
825 struct perf_sample *sample,
826 struct machine *machine __maybe_unused)
829 work->cpu = sample->cpu;
830 work->id = evsel__intval(evsel, sample, "irq");
831 work->name = evsel__strval(evsel, sample, "name");
834 static void irq_work_name(struct kwork_work *work, char *buf, int len)
836 snprintf(buf, len, "%s:%" PRIu64 "", work->name, work->id);
839 static struct kwork_class kwork_irq = {
841 .type = KWORK_CLASS_IRQ,
843 .tp_handlers = irq_tp_handlers,
844 .class_init = irq_class_init,
845 .work_init = irq_work_init,
846 .work_name = irq_work_name,
849 static struct kwork_class kwork_softirq;
850 static int process_softirq_raise_event(struct perf_tool *tool,
852 struct perf_sample *sample,
853 struct machine *machine)
855 struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
857 if (kwork->tp_handler->raise_event)
858 return kwork->tp_handler->raise_event(kwork, &kwork_softirq,
859 evsel, sample, machine);
864 static int process_softirq_entry_event(struct perf_tool *tool,
866 struct perf_sample *sample,
867 struct machine *machine)
869 struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
871 if (kwork->tp_handler->entry_event)
872 return kwork->tp_handler->entry_event(kwork, &kwork_softirq,
873 evsel, sample, machine);
878 static int process_softirq_exit_event(struct perf_tool *tool,
880 struct perf_sample *sample,
881 struct machine *machine)
883 struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
885 if (kwork->tp_handler->exit_event)
886 return kwork->tp_handler->exit_event(kwork, &kwork_softirq,
887 evsel, sample, machine);
892 const struct evsel_str_handler softirq_tp_handlers[] = {
893 { "irq:softirq_raise", process_softirq_raise_event, },
894 { "irq:softirq_entry", process_softirq_entry_event, },
895 { "irq:softirq_exit", process_softirq_exit_event, },
898 static int softirq_class_init(struct kwork_class *class,
899 struct perf_session *session)
901 if (perf_session__set_tracepoints_handlers(session,
902 softirq_tp_handlers)) {
903 pr_err("Failed to set softirq tracepoints handlers\n");
907 class->work_root = RB_ROOT_CACHED;
911 static char *evsel__softirq_name(struct evsel *evsel, u64 num)
915 struct tep_print_flag_sym *sym = NULL;
916 struct tep_print_arg *args = evsel->tp_format->print_fmt.args;
918 if ((args == NULL) || (args->next == NULL))
921 /* skip softirq field: "REC->vec" */
922 for (sym = args->next->symbol.symbols; sym != NULL; sym = sym->next) {
923 if ((eval_flag(sym->value) == (unsigned long long)num) &&
924 (strlen(sym->str) != 0)) {
933 name = strdup(sym->str);
935 pr_err("Failed to copy symbol name\n");
941 static void softirq_work_init(struct kwork_class *class,
942 struct kwork_work *work,
944 struct perf_sample *sample,
945 struct machine *machine __maybe_unused)
947 u64 num = evsel__intval(evsel, sample, "vec");
951 work->cpu = sample->cpu;
952 work->name = evsel__softirq_name(evsel, num);
955 static void softirq_work_name(struct kwork_work *work, char *buf, int len)
957 snprintf(buf, len, "(s)%s:%" PRIu64 "", work->name, work->id);
960 static struct kwork_class kwork_softirq = {
962 .type = KWORK_CLASS_SOFTIRQ,
964 .tp_handlers = softirq_tp_handlers,
965 .class_init = softirq_class_init,
966 .work_init = softirq_work_init,
967 .work_name = softirq_work_name,
970 static struct kwork_class kwork_workqueue;
971 static int process_workqueue_activate_work_event(struct perf_tool *tool,
973 struct perf_sample *sample,
974 struct machine *machine)
976 struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
978 if (kwork->tp_handler->raise_event)
979 return kwork->tp_handler->raise_event(kwork, &kwork_workqueue,
980 evsel, sample, machine);
985 static int process_workqueue_execute_start_event(struct perf_tool *tool,
987 struct perf_sample *sample,
988 struct machine *machine)
990 struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
992 if (kwork->tp_handler->entry_event)
993 return kwork->tp_handler->entry_event(kwork, &kwork_workqueue,
994 evsel, sample, machine);
999 static int process_workqueue_execute_end_event(struct perf_tool *tool,
1000 struct evsel *evsel,
1001 struct perf_sample *sample,
1002 struct machine *machine)
1004 struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
1006 if (kwork->tp_handler->exit_event)
1007 return kwork->tp_handler->exit_event(kwork, &kwork_workqueue,
1008 evsel, sample, machine);
1013 const struct evsel_str_handler workqueue_tp_handlers[] = {
1014 { "workqueue:workqueue_activate_work", process_workqueue_activate_work_event, },
1015 { "workqueue:workqueue_execute_start", process_workqueue_execute_start_event, },
1016 { "workqueue:workqueue_execute_end", process_workqueue_execute_end_event, },
1019 static int workqueue_class_init(struct kwork_class *class,
1020 struct perf_session *session)
1022 if (perf_session__set_tracepoints_handlers(session,
1023 workqueue_tp_handlers)) {
1024 pr_err("Failed to set workqueue tracepoints handlers\n");
1028 class->work_root = RB_ROOT_CACHED;
1032 static void workqueue_work_init(struct kwork_class *class,
1033 struct kwork_work *work,
1034 struct evsel *evsel,
1035 struct perf_sample *sample,
1036 struct machine *machine)
1039 unsigned long long function_addr = evsel__intval(evsel,
1040 sample, "function");
1042 work->class = class;
1043 work->cpu = sample->cpu;
1044 work->id = evsel__intval(evsel, sample, "work");
1045 work->name = function_addr == 0 ? NULL :
1046 machine__resolve_kernel_addr(machine, &function_addr, &modp);
1049 static void workqueue_work_name(struct kwork_work *work, char *buf, int len)
1051 if (work->name != NULL)
1052 snprintf(buf, len, "(w)%s", work->name);
1054 snprintf(buf, len, "(w)0x%" PRIx64, work->id);
1057 static struct kwork_class kwork_workqueue = {
1058 .name = "workqueue",
1059 .type = KWORK_CLASS_WORKQUEUE,
1060 .nr_tracepoints = 3,
1061 .tp_handlers = workqueue_tp_handlers,
1062 .class_init = workqueue_class_init,
1063 .work_init = workqueue_work_init,
1064 .work_name = workqueue_work_name,
1067 static struct kwork_class *kwork_class_supported_list[KWORK_CLASS_MAX] = {
1068 [KWORK_CLASS_IRQ] = &kwork_irq,
1069 [KWORK_CLASS_SOFTIRQ] = &kwork_softirq,
1070 [KWORK_CLASS_WORKQUEUE] = &kwork_workqueue,
1073 static void print_separator(int len)
1075 printf(" %.*s\n", len, graph_dotted_line);
1078 static int report_print_work(struct perf_kwork *kwork, struct kwork_work *work)
1081 char kwork_name[PRINT_KWORK_NAME_WIDTH];
1082 char max_runtime_start[32], max_runtime_end[32];
1083 char max_latency_start[32], max_latency_end[32];
1090 if (work->class && work->class->work_name) {
1091 work->class->work_name(work, kwork_name,
1092 PRINT_KWORK_NAME_WIDTH);
1093 ret += printf(" %-*s |", PRINT_KWORK_NAME_WIDTH, kwork_name);
1095 ret += printf(" %-*s |", PRINT_KWORK_NAME_WIDTH, "");
1101 ret += printf(" %0*d |", PRINT_CPU_WIDTH, work->cpu);
1106 if (kwork->report == KWORK_REPORT_RUNTIME) {
1107 ret += printf(" %*.*f ms |",
1108 PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH,
1109 (double)work->total_runtime / NSEC_PER_MSEC);
1110 } else if (kwork->report == KWORK_REPORT_LATENCY) { // avg delay
1111 ret += printf(" %*.*f ms |",
1112 PRINT_LATENCY_WIDTH, RPINT_DECIMAL_WIDTH,
1113 (double)work->total_latency /
1114 work->nr_atoms / NSEC_PER_MSEC);
1120 ret += printf(" %*" PRIu64 " |", PRINT_COUNT_WIDTH, work->nr_atoms);
1123 * max runtime, max runtime start, max runtime end
1125 if (kwork->report == KWORK_REPORT_RUNTIME) {
1126 timestamp__scnprintf_usec(work->max_runtime_start,
1128 sizeof(max_runtime_start));
1129 timestamp__scnprintf_usec(work->max_runtime_end,
1131 sizeof(max_runtime_end));
1132 ret += printf(" %*.*f ms | %*s s | %*s s |",
1133 PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH,
1134 (double)work->max_runtime / NSEC_PER_MSEC,
1135 PRINT_TIMESTAMP_WIDTH, max_runtime_start,
1136 PRINT_TIMESTAMP_WIDTH, max_runtime_end);
1139 * max delay, max delay start, max delay end
1141 else if (kwork->report == KWORK_REPORT_LATENCY) {
1142 timestamp__scnprintf_usec(work->max_latency_start,
1144 sizeof(max_latency_start));
1145 timestamp__scnprintf_usec(work->max_latency_end,
1147 sizeof(max_latency_end));
1148 ret += printf(" %*.*f ms | %*s s | %*s s |",
1149 PRINT_LATENCY_WIDTH, RPINT_DECIMAL_WIDTH,
1150 (double)work->max_latency / NSEC_PER_MSEC,
1151 PRINT_TIMESTAMP_WIDTH, max_latency_start,
1152 PRINT_TIMESTAMP_WIDTH, max_latency_end);
1159 static int report_print_header(struct perf_kwork *kwork)
1164 ret = printf(" %-*s | %-*s |",
1165 PRINT_KWORK_NAME_WIDTH, "Kwork Name",
1166 PRINT_CPU_WIDTH, "Cpu");
1168 if (kwork->report == KWORK_REPORT_RUNTIME) {
1169 ret += printf(" %-*s |",
1170 PRINT_RUNTIME_HEADER_WIDTH, "Total Runtime");
1171 } else if (kwork->report == KWORK_REPORT_LATENCY) {
1172 ret += printf(" %-*s |",
1173 PRINT_LATENCY_HEADER_WIDTH, "Avg delay");
1176 ret += printf(" %-*s |", PRINT_COUNT_WIDTH, "Count");
1178 if (kwork->report == KWORK_REPORT_RUNTIME) {
1179 ret += printf(" %-*s | %-*s | %-*s |",
1180 PRINT_RUNTIME_HEADER_WIDTH, "Max runtime",
1181 PRINT_TIMESTAMP_HEADER_WIDTH, "Max runtime start",
1182 PRINT_TIMESTAMP_HEADER_WIDTH, "Max runtime end");
1183 } else if (kwork->report == KWORK_REPORT_LATENCY) {
1184 ret += printf(" %-*s | %-*s | %-*s |",
1185 PRINT_LATENCY_HEADER_WIDTH, "Max delay",
1186 PRINT_TIMESTAMP_HEADER_WIDTH, "Max delay start",
1187 PRINT_TIMESTAMP_HEADER_WIDTH, "Max delay end");
1191 print_separator(ret);
1195 static void timehist_print_header(void)
1200 printf(" %-*s %-*s %-*s %-*s %-*s %-*s\n",
1201 PRINT_TIMESTAMP_WIDTH, "Runtime start",
1202 PRINT_TIMESTAMP_WIDTH, "Runtime end",
1203 PRINT_TIMEHIST_CPU_WIDTH, "Cpu",
1204 PRINT_KWORK_NAME_WIDTH, "Kwork name",
1205 PRINT_RUNTIME_WIDTH, "Runtime",
1206 PRINT_RUNTIME_WIDTH, "Delaytime");
1211 printf(" %-*s %-*s %-*s %-*s %-*s %-*s\n",
1212 PRINT_TIMESTAMP_WIDTH, "",
1213 PRINT_TIMESTAMP_WIDTH, "",
1214 PRINT_TIMEHIST_CPU_WIDTH, "",
1215 PRINT_KWORK_NAME_WIDTH, "(TYPE)NAME:NUM",
1216 PRINT_RUNTIME_WIDTH, "(msec)",
1217 PRINT_RUNTIME_WIDTH, "(msec)");
1222 printf(" %.*s %.*s %.*s %.*s %.*s %.*s\n",
1223 PRINT_TIMESTAMP_WIDTH, graph_dotted_line,
1224 PRINT_TIMESTAMP_WIDTH, graph_dotted_line,
1225 PRINT_TIMEHIST_CPU_WIDTH, graph_dotted_line,
1226 PRINT_KWORK_NAME_WIDTH, graph_dotted_line,
1227 PRINT_RUNTIME_WIDTH, graph_dotted_line,
1228 PRINT_RUNTIME_WIDTH, graph_dotted_line);
1231 static void print_summary(struct perf_kwork *kwork)
1233 u64 time = kwork->timeend - kwork->timestart;
1235 printf(" Total count : %9" PRIu64 "\n", kwork->all_count);
1236 printf(" Total runtime (msec) : %9.3f (%.3f%% load average)\n",
1237 (double)kwork->all_runtime / NSEC_PER_MSEC,
1238 time == 0 ? 0 : (double)kwork->all_runtime / time);
1239 printf(" Total time span (msec) : %9.3f\n",
1240 (double)time / NSEC_PER_MSEC);
1243 static unsigned long long nr_list_entry(struct list_head *head)
1245 struct list_head *pos;
1246 unsigned long long n = 0;
1248 list_for_each(pos, head)
1254 static void print_skipped_events(struct perf_kwork *kwork)
1257 const char *const kwork_event_str[] = {
1258 [KWORK_TRACE_RAISE] = "raise",
1259 [KWORK_TRACE_ENTRY] = "entry",
1260 [KWORK_TRACE_EXIT] = "exit",
1263 if ((kwork->nr_skipped_events[KWORK_TRACE_MAX] != 0) &&
1264 (kwork->nr_events != 0)) {
1265 printf(" INFO: %.3f%% skipped events (%" PRIu64 " including ",
1266 (double)kwork->nr_skipped_events[KWORK_TRACE_MAX] /
1267 (double)kwork->nr_events * 100.0,
1268 kwork->nr_skipped_events[KWORK_TRACE_MAX]);
1270 for (i = 0; i < KWORK_TRACE_MAX; i++) {
1271 printf("%" PRIu64 " %s%s",
1272 kwork->nr_skipped_events[i],
1274 (i == KWORK_TRACE_MAX - 1) ? ")\n" : ", ");
1279 printf(" INFO: use %lld atom pages\n",
1280 nr_list_entry(&kwork->atom_page_list));
1283 static void print_bad_events(struct perf_kwork *kwork)
1285 if ((kwork->nr_lost_events != 0) && (kwork->nr_events != 0)) {
1286 printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
1287 (double)kwork->nr_lost_events /
1288 (double)kwork->nr_events * 100.0,
1289 kwork->nr_lost_events, kwork->nr_events,
1290 kwork->nr_lost_chunks);
1294 static void work_sort(struct perf_kwork *kwork, struct kwork_class *class)
1296 struct rb_node *node;
1297 struct kwork_work *data;
1298 struct rb_root_cached *root = &class->work_root;
1300 pr_debug("Sorting %s ...\n", class->name);
1302 node = rb_first_cached(root);
1306 rb_erase_cached(node, root);
1307 data = rb_entry(node, struct kwork_work, node);
1308 work_insert(&kwork->sorted_work_root,
1309 data, &kwork->sort_list);
1313 static void perf_kwork__sort(struct perf_kwork *kwork)
1315 struct kwork_class *class;
1317 list_for_each_entry(class, &kwork->class_list, list)
1318 work_sort(kwork, class);
1321 static int perf_kwork__check_config(struct perf_kwork *kwork,
1322 struct perf_session *session)
1325 struct evsel *evsel;
1326 struct kwork_class *class;
1328 static struct trace_kwork_handler report_ops = {
1329 .entry_event = report_entry_event,
1330 .exit_event = report_exit_event,
1332 static struct trace_kwork_handler latency_ops = {
1333 .raise_event = latency_raise_event,
1334 .entry_event = latency_entry_event,
1336 static struct trace_kwork_handler timehist_ops = {
1337 .raise_event = timehist_raise_event,
1338 .entry_event = timehist_entry_event,
1339 .exit_event = timehist_exit_event,
1342 switch (kwork->report) {
1343 case KWORK_REPORT_RUNTIME:
1344 kwork->tp_handler = &report_ops;
1346 case KWORK_REPORT_LATENCY:
1347 kwork->tp_handler = &latency_ops;
1349 case KWORK_REPORT_TIMEHIST:
1350 kwork->tp_handler = &timehist_ops;
1353 pr_debug("Invalid report type %d\n", kwork->report);
1357 list_for_each_entry(class, &kwork->class_list, list)
1358 if ((class->class_init != NULL) &&
1359 (class->class_init(class, session) != 0))
1362 if (kwork->cpu_list != NULL) {
1363 ret = perf_session__cpu_bitmap(session,
1367 pr_err("Invalid cpu bitmap\n");
1372 if (kwork->time_str != NULL) {
1373 ret = perf_time__parse_str(&kwork->ptime, kwork->time_str);
1375 pr_err("Invalid time span\n");
1380 list_for_each_entry(evsel, &session->evlist->core.entries, core.node) {
1381 if (kwork->show_callchain && !evsel__has_callchain(evsel)) {
1382 pr_debug("Samples do not have callchains\n");
1383 kwork->show_callchain = 0;
1384 symbol_conf.use_callchain = 0;
1391 static int perf_kwork__read_events(struct perf_kwork *kwork)
1394 struct perf_session *session = NULL;
1396 struct perf_data data = {
1398 .mode = PERF_DATA_MODE_READ,
1399 .force = kwork->force,
1402 session = perf_session__new(&data, &kwork->tool);
1403 if (IS_ERR(session)) {
1404 pr_debug("Error creating perf session\n");
1405 return PTR_ERR(session);
1408 symbol__init(&session->header.env);
1410 if (perf_kwork__check_config(kwork, session) != 0)
1413 if (session->tevent.pevent &&
1414 tep_set_function_resolver(session->tevent.pevent,
1415 machine__resolve_kernel_addr,
1416 &session->machines.host) < 0) {
1417 pr_err("Failed to set libtraceevent function resolver\n");
1421 if (kwork->report == KWORK_REPORT_TIMEHIST)
1422 timehist_print_header();
1424 ret = perf_session__process_events(session);
1426 pr_debug("Failed to process events, error %d\n", ret);
1430 kwork->nr_events = session->evlist->stats.nr_events[0];
1431 kwork->nr_lost_events = session->evlist->stats.total_lost;
1432 kwork->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST];
1435 perf_session__delete(session);
1439 static void process_skipped_events(struct perf_kwork *kwork,
1440 struct kwork_work *work)
1443 unsigned long long count;
1445 for (i = 0; i < KWORK_TRACE_MAX; i++) {
1446 count = nr_list_entry(&work->atom_list[i]);
1447 kwork->nr_skipped_events[i] += count;
1448 kwork->nr_skipped_events[KWORK_TRACE_MAX] += count;
1452 struct kwork_work *perf_kwork_add_work(struct perf_kwork *kwork,
1453 struct kwork_class *class,
1454 struct kwork_work *key)
1456 struct kwork_work *work = NULL;
1458 work = work_new(key);
1462 work_insert(&class->work_root, work, &kwork->cmp_id);
1466 static void sig_handler(int sig)
1469 * Simply capture termination signal so that
1470 * the program can continue after pause returns
1472 pr_debug("Captuer signal %d\n", sig);
1475 static int perf_kwork__report_bpf(struct perf_kwork *kwork)
1479 signal(SIGINT, sig_handler);
1480 signal(SIGTERM, sig_handler);
1482 ret = perf_kwork__trace_prepare_bpf(kwork);
1486 printf("Starting trace, Hit <Ctrl+C> to stop and report\n");
1488 perf_kwork__trace_start();
1491 * a simple pause, wait here for stop signal
1495 perf_kwork__trace_finish();
1497 perf_kwork__report_read_bpf(kwork);
1499 perf_kwork__report_cleanup_bpf();
1504 static int perf_kwork__report(struct perf_kwork *kwork)
1507 struct rb_node *next;
1508 struct kwork_work *work;
1511 ret = perf_kwork__report_bpf(kwork);
1513 ret = perf_kwork__read_events(kwork);
1518 perf_kwork__sort(kwork);
1522 ret = report_print_header(kwork);
1523 next = rb_first_cached(&kwork->sorted_work_root);
1525 work = rb_entry(next, struct kwork_work, node);
1526 process_skipped_events(kwork, work);
1528 if (work->nr_atoms != 0) {
1529 report_print_work(kwork, work);
1530 if (kwork->summary) {
1531 kwork->all_runtime += work->total_runtime;
1532 kwork->all_count += work->nr_atoms;
1535 next = rb_next(next);
1537 print_separator(ret);
1539 if (kwork->summary) {
1540 print_summary(kwork);
1541 print_separator(ret);
1544 print_bad_events(kwork);
1545 print_skipped_events(kwork);
1551 typedef int (*tracepoint_handler)(struct perf_tool *tool,
1552 struct evsel *evsel,
1553 struct perf_sample *sample,
1554 struct machine *machine);
1556 static int perf_kwork__process_tracepoint_sample(struct perf_tool *tool,
1557 union perf_event *event __maybe_unused,
1558 struct perf_sample *sample,
1559 struct evsel *evsel,
1560 struct machine *machine)
1564 if (evsel->handler != NULL) {
1565 tracepoint_handler f = evsel->handler;
1567 err = f(tool, evsel, sample, machine);
1573 static int perf_kwork__timehist(struct perf_kwork *kwork)
1576 * event handlers for timehist option
1578 kwork->tool.comm = perf_event__process_comm;
1579 kwork->tool.exit = perf_event__process_exit;
1580 kwork->tool.fork = perf_event__process_fork;
1581 kwork->tool.attr = perf_event__process_attr;
1582 kwork->tool.tracing_data = perf_event__process_tracing_data;
1583 kwork->tool.build_id = perf_event__process_build_id;
1584 kwork->tool.ordered_events = true;
1585 kwork->tool.ordering_requires_timestamps = true;
1586 symbol_conf.use_callchain = kwork->show_callchain;
1588 if (symbol__validate_sym_arguments()) {
1589 pr_err("Failed to validate sym arguments\n");
1595 return perf_kwork__read_events(kwork);
1598 static void setup_event_list(struct perf_kwork *kwork,
1599 const struct option *options,
1600 const char * const usage_msg[])
1603 struct kwork_class *class;
1604 char *tmp, *tok, *str;
1606 if (kwork->event_list_str == NULL)
1607 goto null_event_list_str;
1609 str = strdup(kwork->event_list_str);
1610 for (tok = strtok_r(str, ", ", &tmp);
1611 tok; tok = strtok_r(NULL, ", ", &tmp)) {
1612 for (i = 0; i < KWORK_CLASS_MAX; i++) {
1613 class = kwork_class_supported_list[i];
1614 if (strcmp(tok, class->name) == 0) {
1615 list_add_tail(&class->list, &kwork->class_list);
1619 if (i == KWORK_CLASS_MAX) {
1620 usage_with_options_msg(usage_msg, options,
1621 "Unknown --event key: `%s'", tok);
1626 null_event_list_str:
1628 * config all kwork events if not specified
1630 if (list_empty(&kwork->class_list)) {
1631 for (i = 0; i < KWORK_CLASS_MAX; i++) {
1632 list_add_tail(&kwork_class_supported_list[i]->list,
1633 &kwork->class_list);
1637 pr_debug("Config event list:");
1638 list_for_each_entry(class, &kwork->class_list, list)
1639 pr_debug(" %s", class->name);
1643 static int perf_kwork__record(struct perf_kwork *kwork,
1644 int argc, const char **argv)
1646 const char **rec_argv;
1647 unsigned int rec_argc, i, j;
1648 struct kwork_class *class;
1650 const char *const record_args[] = {
1658 rec_argc = ARRAY_SIZE(record_args) + argc - 1;
1660 list_for_each_entry(class, &kwork->class_list, list)
1661 rec_argc += 2 * class->nr_tracepoints;
1663 rec_argv = calloc(rec_argc + 1, sizeof(char *));
1664 if (rec_argv == NULL)
1667 for (i = 0; i < ARRAY_SIZE(record_args); i++)
1668 rec_argv[i] = strdup(record_args[i]);
1670 list_for_each_entry(class, &kwork->class_list, list) {
1671 for (j = 0; j < class->nr_tracepoints; j++) {
1672 rec_argv[i++] = strdup("-e");
1673 rec_argv[i++] = strdup(class->tp_handlers[j].name);
1677 for (j = 1; j < (unsigned int)argc; j++, i++)
1678 rec_argv[i] = argv[j];
1680 BUG_ON(i != rec_argc);
1682 pr_debug("record comm: ");
1683 for (j = 0; j < rec_argc; j++)
1684 pr_debug("%s ", rec_argv[j]);
1687 return cmd_record(i, rec_argv);
1690 int cmd_kwork(int argc, const char **argv)
1692 static struct perf_kwork kwork = {
1693 .class_list = LIST_HEAD_INIT(kwork.class_list),
1695 .mmap = perf_event__process_mmap,
1696 .mmap2 = perf_event__process_mmap2,
1697 .sample = perf_kwork__process_tracepoint_sample,
1699 .atom_page_list = LIST_HEAD_INIT(kwork.atom_page_list),
1700 .sort_list = LIST_HEAD_INIT(kwork.sort_list),
1701 .cmp_id = LIST_HEAD_INIT(kwork.cmp_id),
1702 .sorted_work_root = RB_ROOT_CACHED,
1704 .profile_name = NULL,
1708 .event_list_str = NULL,
1711 .show_callchain = false,
1716 .nr_lost_chunks = 0,
1717 .nr_lost_events = 0,
1720 .nr_skipped_events = { 0 },
1722 static const char default_report_sort_order[] = "runtime, max, count";
1723 static const char default_latency_sort_order[] = "avg, max, count";
1724 const struct option kwork_options[] = {
1725 OPT_INCR('v', "verbose", &verbose,
1726 "be more verbose (show symbol address, etc)"),
1727 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1728 "dump raw trace in ASCII"),
1729 OPT_STRING('k', "kwork", &kwork.event_list_str, "kwork",
1730 "list of kwork to profile (irq, softirq, workqueue, etc)"),
1731 OPT_BOOLEAN('f', "force", &kwork.force, "don't complain, do it"),
1734 const struct option report_options[] = {
1735 OPT_STRING('s', "sort", &kwork.sort_order, "key[,key2...]",
1736 "sort by key(s): runtime, max, count"),
1737 OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu",
1738 "list of cpus to profile"),
1739 OPT_STRING('n', "name", &kwork.profile_name, "name",
1740 "event name to profile"),
1741 OPT_STRING(0, "time", &kwork.time_str, "str",
1742 "Time span for analysis (start,stop)"),
1743 OPT_STRING('i', "input", &input_name, "file",
1745 OPT_BOOLEAN('S', "with-summary", &kwork.summary,
1746 "Show summary with statistics"),
1747 #ifdef HAVE_BPF_SKEL
1748 OPT_BOOLEAN('b', "use-bpf", &kwork.use_bpf,
1749 "Use BPF to measure kwork runtime"),
1751 OPT_PARENT(kwork_options)
1753 const struct option latency_options[] = {
1754 OPT_STRING('s', "sort", &kwork.sort_order, "key[,key2...]",
1755 "sort by key(s): avg, max, count"),
1756 OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu",
1757 "list of cpus to profile"),
1758 OPT_STRING('n', "name", &kwork.profile_name, "name",
1759 "event name to profile"),
1760 OPT_STRING(0, "time", &kwork.time_str, "str",
1761 "Time span for analysis (start,stop)"),
1762 OPT_STRING('i', "input", &input_name, "file",
1764 #ifdef HAVE_BPF_SKEL
1765 OPT_BOOLEAN('b', "use-bpf", &kwork.use_bpf,
1766 "Use BPF to measure kwork latency"),
1768 OPT_PARENT(kwork_options)
1770 const struct option timehist_options[] = {
1771 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
1772 "file", "vmlinux pathname"),
1773 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
1774 "file", "kallsyms pathname"),
1775 OPT_BOOLEAN('g', "call-graph", &kwork.show_callchain,
1776 "Display call chains if present"),
1777 OPT_UINTEGER(0, "max-stack", &kwork.max_stack,
1778 "Maximum number of functions to display backtrace."),
1779 OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
1780 "Look for files with symbols relative to this directory"),
1781 OPT_STRING(0, "time", &kwork.time_str, "str",
1782 "Time span for analysis (start,stop)"),
1783 OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu",
1784 "list of cpus to profile"),
1785 OPT_STRING('n', "name", &kwork.profile_name, "name",
1786 "event name to profile"),
1787 OPT_STRING('i', "input", &input_name, "file",
1789 OPT_PARENT(kwork_options)
1791 const char *kwork_usage[] = {
1795 const char * const report_usage[] = {
1796 "perf kwork report [<options>]",
1799 const char * const latency_usage[] = {
1800 "perf kwork latency [<options>]",
1803 const char * const timehist_usage[] = {
1804 "perf kwork timehist [<options>]",
1807 const char *const kwork_subcommands[] = {
1808 "record", "report", "latency", "timehist", NULL
1811 argc = parse_options_subcommand(argc, argv, kwork_options,
1812 kwork_subcommands, kwork_usage,
1813 PARSE_OPT_STOP_AT_NON_OPTION);
1815 usage_with_options(kwork_usage, kwork_options);
1817 setup_event_list(&kwork, kwork_options, kwork_usage);
1818 sort_dimension__add(&kwork, "id", &kwork.cmp_id);
1820 if (strlen(argv[0]) > 2 && strstarts("record", argv[0]))
1821 return perf_kwork__record(&kwork, argc, argv);
1822 else if (strlen(argv[0]) > 2 && strstarts("report", argv[0])) {
1823 kwork.sort_order = default_report_sort_order;
1825 argc = parse_options(argc, argv, report_options, report_usage, 0);
1827 usage_with_options(report_usage, report_options);
1829 kwork.report = KWORK_REPORT_RUNTIME;
1830 setup_sorting(&kwork, report_options, report_usage);
1831 return perf_kwork__report(&kwork);
1832 } else if (strlen(argv[0]) > 2 && strstarts("latency", argv[0])) {
1833 kwork.sort_order = default_latency_sort_order;
1835 argc = parse_options(argc, argv, latency_options, latency_usage, 0);
1837 usage_with_options(latency_usage, latency_options);
1839 kwork.report = KWORK_REPORT_LATENCY;
1840 setup_sorting(&kwork, latency_options, latency_usage);
1841 return perf_kwork__report(&kwork);
1842 } else if (strlen(argv[0]) > 2 && strstarts("timehist", argv[0])) {
1844 argc = parse_options(argc, argv, timehist_options, timehist_usage, 0);
1846 usage_with_options(timehist_usage, timehist_options);
1848 kwork.report = KWORK_REPORT_TIMEHIST;
1849 return perf_kwork__timehist(&kwork);
1851 usage_with_options(kwork_usage, kwork_options);