1 // SPDX-License-Identifier: GPL-2.0
7 #include "util/evlist.h" // for struct evsel_str_handler
8 #include "util/evsel.h"
9 #include "util/symbol.h"
10 #include "util/thread.h"
11 #include "util/header.h"
12 #include "util/target.h"
13 #include "util/callchain.h"
14 #include "util/lock-contention.h"
15 #include "util/bpf_skel/lock_data.h"
17 #include <subcmd/pager.h>
18 #include <subcmd/parse-options.h>
19 #include "util/trace-event.h"
20 #include "util/tracepoint.h"
22 #include "util/debug.h"
23 #include "util/session.h"
24 #include "util/tool.h"
25 #include "util/data.h"
26 #include "util/string2.h"
28 #include "util/util.h"
30 #include <sys/types.h>
31 #include <sys/prctl.h>
32 #include <semaphore.h>
37 #include <linux/list.h>
38 #include <linux/hash.h>
39 #include <linux/kernel.h>
40 #include <linux/zalloc.h>
41 #include <linux/err.h>
42 #include <linux/stringify.h>
44 static struct perf_session *session;
45 static struct target target;
47 /* based on kernel/lockdep.c */
48 #define LOCKHASH_BITS 12
49 #define LOCKHASH_SIZE (1UL << LOCKHASH_BITS)
51 static struct hlist_head lockhash_table[LOCKHASH_SIZE];
53 #define __lockhashfn(key) hash_long((unsigned long)key, LOCKHASH_BITS)
54 #define lockhashentry(key) (lockhash_table + __lockhashfn((key)))
56 static struct rb_root thread_stats;
58 static bool combine_locks;
59 static bool show_thread_stats;
60 static bool show_lock_addrs;
62 static unsigned long bpf_map_entries = 10240;
63 static int max_stack_depth = CONTENTION_STACK_DEPTH;
64 static int stack_skip = CONTENTION_STACK_SKIP;
65 static int print_nr_entries = INT_MAX / 2;
66 static LIST_HEAD(callstack_filters);
68 struct callstack_filter {
69 struct list_head list;
73 static struct lock_filter filters;
75 static enum lock_aggr_mode aggr_mode = LOCK_AGGR_ADDR;
77 static bool needs_callstack(void)
79 return verbose > 0 || !list_empty(&callstack_filters);
82 static struct thread_stat *thread_stat_find(u32 tid)
85 struct thread_stat *st;
87 node = thread_stats.rb_node;
89 st = container_of(node, struct thread_stat, rb);
92 else if (tid < st->tid)
95 node = node->rb_right;
101 static void thread_stat_insert(struct thread_stat *new)
103 struct rb_node **rb = &thread_stats.rb_node;
104 struct rb_node *parent = NULL;
105 struct thread_stat *p;
108 p = container_of(*rb, struct thread_stat, rb);
111 if (new->tid < p->tid)
112 rb = &(*rb)->rb_left;
113 else if (new->tid > p->tid)
114 rb = &(*rb)->rb_right;
116 BUG_ON("inserting invalid thread_stat\n");
119 rb_link_node(&new->rb, parent, rb);
120 rb_insert_color(&new->rb, &thread_stats);
123 static struct thread_stat *thread_stat_findnew_after_first(u32 tid)
125 struct thread_stat *st;
127 st = thread_stat_find(tid);
131 st = zalloc(sizeof(struct thread_stat));
133 pr_err("memory allocation failed\n");
138 INIT_LIST_HEAD(&st->seq_list);
140 thread_stat_insert(st);
145 static struct thread_stat *thread_stat_findnew_first(u32 tid);
146 static struct thread_stat *(*thread_stat_findnew)(u32 tid) =
147 thread_stat_findnew_first;
149 static struct thread_stat *thread_stat_findnew_first(u32 tid)
151 struct thread_stat *st;
153 st = zalloc(sizeof(struct thread_stat));
155 pr_err("memory allocation failed\n");
159 INIT_LIST_HEAD(&st->seq_list);
161 rb_link_node(&st->rb, NULL, &thread_stats.rb_node);
162 rb_insert_color(&st->rb, &thread_stats);
164 thread_stat_findnew = thread_stat_findnew_after_first;
168 /* build simple key function one is bigger than two */
169 #define SINGLE_KEY(member) \
170 static int lock_stat_key_ ## member(struct lock_stat *one, \
171 struct lock_stat *two) \
173 return one->member > two->member; \
176 SINGLE_KEY(nr_acquired)
177 SINGLE_KEY(nr_contended)
178 SINGLE_KEY(avg_wait_time)
179 SINGLE_KEY(wait_time_total)
180 SINGLE_KEY(wait_time_max)
182 static int lock_stat_key_wait_time_min(struct lock_stat *one,
183 struct lock_stat *two)
185 u64 s1 = one->wait_time_min;
186 u64 s2 = two->wait_time_min;
187 if (s1 == ULLONG_MAX)
189 if (s2 == ULLONG_MAX)
196 * name: the value for specify by user
197 * this should be simpler than raw name of member
198 * e.g. nr_acquired -> acquired, wait_time_total -> wait_total
201 /* header: the string printed on the header line */
203 /* len: the printing width of the field */
205 /* key: a pointer to function to compare two lock stats for sorting */
206 int (*key)(struct lock_stat*, struct lock_stat*);
207 /* print: a pointer to function to print a given lock stats */
208 void (*print)(struct lock_key*, struct lock_stat*);
209 /* list: list entry to link this */
210 struct list_head list;
213 static void lock_stat_key_print_time(unsigned long long nsec, int len)
215 static const struct {
219 { 1e9 * 3600, "h " },
227 for (int i = 0; table[i].unit; i++) {
228 if (nsec < table[i].base)
231 pr_info("%*.2f %s", len - 3, nsec / table[i].base, table[i].unit);
235 pr_info("%*llu %s", len - 3, nsec, "ns");
238 #define PRINT_KEY(member) \
239 static void lock_stat_key_print_ ## member(struct lock_key *key, \
240 struct lock_stat *ls) \
242 pr_info("%*llu", key->len, (unsigned long long)ls->member); \
245 #define PRINT_TIME(member) \
246 static void lock_stat_key_print_ ## member(struct lock_key *key, \
247 struct lock_stat *ls) \
249 lock_stat_key_print_time((unsigned long long)ls->member, key->len); \
252 PRINT_KEY(nr_acquired)
253 PRINT_KEY(nr_contended)
254 PRINT_TIME(avg_wait_time)
255 PRINT_TIME(wait_time_total)
256 PRINT_TIME(wait_time_max)
258 static void lock_stat_key_print_wait_time_min(struct lock_key *key,
259 struct lock_stat *ls)
261 u64 wait_time = ls->wait_time_min;
263 if (wait_time == ULLONG_MAX)
266 lock_stat_key_print_time(wait_time, key->len);
270 static const char *sort_key = "acquired";
272 static int (*compare)(struct lock_stat *, struct lock_stat *);
274 static struct rb_root sorted; /* place to store intermediate data */
275 static struct rb_root result; /* place to store sorted data */
277 static LIST_HEAD(lock_keys);
278 static const char *output_fields;
280 #define DEF_KEY_LOCK(name, header, fn_suffix, len) \
281 { #name, header, len, lock_stat_key_ ## fn_suffix, lock_stat_key_print_ ## fn_suffix, {} }
282 static struct lock_key report_keys[] = {
283 DEF_KEY_LOCK(acquired, "acquired", nr_acquired, 10),
284 DEF_KEY_LOCK(contended, "contended", nr_contended, 10),
285 DEF_KEY_LOCK(avg_wait, "avg wait", avg_wait_time, 12),
286 DEF_KEY_LOCK(wait_total, "total wait", wait_time_total, 12),
287 DEF_KEY_LOCK(wait_max, "max wait", wait_time_max, 12),
288 DEF_KEY_LOCK(wait_min, "min wait", wait_time_min, 12),
290 /* extra comparisons much complicated should be here */
294 static struct lock_key contention_keys[] = {
295 DEF_KEY_LOCK(contended, "contended", nr_contended, 10),
296 DEF_KEY_LOCK(wait_total, "total wait", wait_time_total, 12),
297 DEF_KEY_LOCK(wait_max, "max wait", wait_time_max, 12),
298 DEF_KEY_LOCK(wait_min, "min wait", wait_time_min, 12),
299 DEF_KEY_LOCK(avg_wait, "avg wait", avg_wait_time, 12),
301 /* extra comparisons much complicated should be here */
305 static int select_key(bool contention)
308 struct lock_key *keys = report_keys;
311 keys = contention_keys;
313 for (i = 0; keys[i].name; i++) {
314 if (!strcmp(keys[i].name, sort_key)) {
315 compare = keys[i].key;
317 /* selected key should be in the output fields */
318 if (list_empty(&keys[i].list))
319 list_add_tail(&keys[i].list, &lock_keys);
325 pr_err("Unknown compare key: %s\n", sort_key);
329 static int add_output_field(bool contention, char *name)
332 struct lock_key *keys = report_keys;
335 keys = contention_keys;
337 for (i = 0; keys[i].name; i++) {
338 if (strcmp(keys[i].name, name))
341 /* prevent double link */
342 if (list_empty(&keys[i].list))
343 list_add_tail(&keys[i].list, &lock_keys);
348 pr_err("Unknown output field: %s\n", name);
352 static int setup_output_field(bool contention, const char *str)
354 char *tok, *tmp, *orig;
356 struct lock_key *keys = report_keys;
359 keys = contention_keys;
361 /* no output field given: use all of them */
363 for (i = 0; keys[i].name; i++)
364 list_add_tail(&keys[i].list, &lock_keys);
368 for (i = 0; keys[i].name; i++)
369 INIT_LIST_HEAD(&keys[i].list);
371 orig = tmp = strdup(str);
375 while ((tok = strsep(&tmp, ",")) != NULL){
376 ret = add_output_field(contention, tok);
385 static void combine_lock_stats(struct lock_stat *st)
387 struct rb_node **rb = &sorted.rb_node;
388 struct rb_node *parent = NULL;
393 p = container_of(*rb, struct lock_stat, rb);
396 if (st->name && p->name)
397 ret = strcmp(st->name, p->name);
399 ret = !!st->name - !!p->name;
402 p->nr_acquired += st->nr_acquired;
403 p->nr_contended += st->nr_contended;
404 p->wait_time_total += st->wait_time_total;
407 p->avg_wait_time = p->wait_time_total / p->nr_contended;
409 if (p->wait_time_min > st->wait_time_min)
410 p->wait_time_min = st->wait_time_min;
411 if (p->wait_time_max < st->wait_time_max)
412 p->wait_time_max = st->wait_time_max;
414 p->broken |= st->broken;
420 rb = &(*rb)->rb_left;
422 rb = &(*rb)->rb_right;
425 rb_link_node(&st->rb, parent, rb);
426 rb_insert_color(&st->rb, &sorted);
429 static void insert_to_result(struct lock_stat *st,
430 int (*bigger)(struct lock_stat *, struct lock_stat *))
432 struct rb_node **rb = &result.rb_node;
433 struct rb_node *parent = NULL;
436 if (combine_locks && st->combined)
440 p = container_of(*rb, struct lock_stat, rb);
444 rb = &(*rb)->rb_left;
446 rb = &(*rb)->rb_right;
449 rb_link_node(&st->rb, parent, rb);
450 rb_insert_color(&st->rb, &result);
453 /* returns left most element of result, and erase it */
454 static struct lock_stat *pop_from_result(void)
456 struct rb_node *node = result.rb_node;
461 while (node->rb_left)
462 node = node->rb_left;
464 rb_erase(node, &result);
465 return container_of(node, struct lock_stat, rb);
468 static struct lock_stat *lock_stat_find(u64 addr)
470 struct hlist_head *entry = lockhashentry(addr);
471 struct lock_stat *ret;
473 hlist_for_each_entry(ret, entry, hash_entry) {
474 if (ret->addr == addr)
480 static struct lock_stat *lock_stat_findnew(u64 addr, const char *name, int flags)
482 struct hlist_head *entry = lockhashentry(addr);
483 struct lock_stat *ret, *new;
485 hlist_for_each_entry(ret, entry, hash_entry) {
486 if (ret->addr == addr)
490 new = zalloc(sizeof(struct lock_stat));
495 new->name = strdup(name);
502 new->wait_time_min = ULLONG_MAX;
504 hlist_add_head(&new->hash_entry, entry);
508 pr_err("memory allocation failed\n");
512 struct trace_lock_handler {
513 /* it's used on CONFIG_LOCKDEP */
514 int (*acquire_event)(struct evsel *evsel,
515 struct perf_sample *sample);
517 /* it's used on CONFIG_LOCKDEP && CONFIG_LOCK_STAT */
518 int (*acquired_event)(struct evsel *evsel,
519 struct perf_sample *sample);
521 /* it's used on CONFIG_LOCKDEP && CONFIG_LOCK_STAT */
522 int (*contended_event)(struct evsel *evsel,
523 struct perf_sample *sample);
525 /* it's used on CONFIG_LOCKDEP */
526 int (*release_event)(struct evsel *evsel,
527 struct perf_sample *sample);
529 /* it's used when CONFIG_LOCKDEP is off */
530 int (*contention_begin_event)(struct evsel *evsel,
531 struct perf_sample *sample);
533 /* it's used when CONFIG_LOCKDEP is off */
534 int (*contention_end_event)(struct evsel *evsel,
535 struct perf_sample *sample);
538 static struct lock_seq_stat *get_seq(struct thread_stat *ts, u64 addr)
540 struct lock_seq_stat *seq;
542 list_for_each_entry(seq, &ts->seq_list, list) {
543 if (seq->addr == addr)
547 seq = zalloc(sizeof(struct lock_seq_stat));
549 pr_err("memory allocation failed\n");
552 seq->state = SEQ_STATE_UNINITIALIZED;
555 list_add(&seq->list, &ts->seq_list);
567 static int bad_hist[BROKEN_MAX];
574 static int get_key_by_aggr_mode_simple(u64 *key, u64 addr, u32 tid)
583 case LOCK_AGGR_CALLER:
585 pr_err("Invalid aggregation mode: %d\n", aggr_mode);
591 static u64 callchain_id(struct evsel *evsel, struct perf_sample *sample);
593 static int get_key_by_aggr_mode(u64 *key, u64 addr, struct evsel *evsel,
594 struct perf_sample *sample)
596 if (aggr_mode == LOCK_AGGR_CALLER) {
597 *key = callchain_id(evsel, sample);
600 return get_key_by_aggr_mode_simple(key, addr, sample->tid);
603 static int report_lock_acquire_event(struct evsel *evsel,
604 struct perf_sample *sample)
606 struct lock_stat *ls;
607 struct thread_stat *ts;
608 struct lock_seq_stat *seq;
609 const char *name = evsel__strval(evsel, sample, "name");
610 u64 addr = evsel__intval(evsel, sample, "lockdep_addr");
611 int flag = evsel__intval(evsel, sample, "flags");
615 ret = get_key_by_aggr_mode_simple(&key, addr, sample->tid);
619 ls = lock_stat_findnew(key, name, 0);
623 ts = thread_stat_findnew(sample->tid);
627 seq = get_seq(ts, addr);
631 switch (seq->state) {
632 case SEQ_STATE_UNINITIALIZED:
633 case SEQ_STATE_RELEASED:
635 seq->state = SEQ_STATE_ACQUIRING;
639 if (flag & READ_LOCK)
641 seq->state = SEQ_STATE_READ_ACQUIRED;
646 case SEQ_STATE_READ_ACQUIRED:
647 if (flag & READ_LOCK) {
655 case SEQ_STATE_ACQUIRED:
656 case SEQ_STATE_ACQUIRING:
657 case SEQ_STATE_CONTENDED:
659 /* broken lock sequence */
662 bad_hist[BROKEN_ACQUIRE]++;
664 list_del_init(&seq->list);
668 BUG_ON("Unknown state of lock sequence found!\n");
673 seq->prev_event_time = sample->time;
678 static int report_lock_acquired_event(struct evsel *evsel,
679 struct perf_sample *sample)
681 struct lock_stat *ls;
682 struct thread_stat *ts;
683 struct lock_seq_stat *seq;
685 const char *name = evsel__strval(evsel, sample, "name");
686 u64 addr = evsel__intval(evsel, sample, "lockdep_addr");
690 ret = get_key_by_aggr_mode_simple(&key, addr, sample->tid);
694 ls = lock_stat_findnew(key, name, 0);
698 ts = thread_stat_findnew(sample->tid);
702 seq = get_seq(ts, addr);
706 switch (seq->state) {
707 case SEQ_STATE_UNINITIALIZED:
708 /* orphan event, do nothing */
710 case SEQ_STATE_ACQUIRING:
712 case SEQ_STATE_CONTENDED:
713 contended_term = sample->time - seq->prev_event_time;
714 ls->wait_time_total += contended_term;
715 if (contended_term < ls->wait_time_min)
716 ls->wait_time_min = contended_term;
717 if (ls->wait_time_max < contended_term)
718 ls->wait_time_max = contended_term;
720 case SEQ_STATE_RELEASED:
721 case SEQ_STATE_ACQUIRED:
722 case SEQ_STATE_READ_ACQUIRED:
723 /* broken lock sequence */
726 bad_hist[BROKEN_ACQUIRED]++;
728 list_del_init(&seq->list);
732 BUG_ON("Unknown state of lock sequence found!\n");
736 seq->state = SEQ_STATE_ACQUIRED;
738 ls->avg_wait_time = ls->nr_contended ? ls->wait_time_total/ls->nr_contended : 0;
739 seq->prev_event_time = sample->time;
744 static int report_lock_contended_event(struct evsel *evsel,
745 struct perf_sample *sample)
747 struct lock_stat *ls;
748 struct thread_stat *ts;
749 struct lock_seq_stat *seq;
750 const char *name = evsel__strval(evsel, sample, "name");
751 u64 addr = evsel__intval(evsel, sample, "lockdep_addr");
755 ret = get_key_by_aggr_mode_simple(&key, addr, sample->tid);
759 ls = lock_stat_findnew(key, name, 0);
763 ts = thread_stat_findnew(sample->tid);
767 seq = get_seq(ts, addr);
771 switch (seq->state) {
772 case SEQ_STATE_UNINITIALIZED:
773 /* orphan event, do nothing */
775 case SEQ_STATE_ACQUIRING:
777 case SEQ_STATE_RELEASED:
778 case SEQ_STATE_ACQUIRED:
779 case SEQ_STATE_READ_ACQUIRED:
780 case SEQ_STATE_CONTENDED:
781 /* broken lock sequence */
784 bad_hist[BROKEN_CONTENDED]++;
786 list_del_init(&seq->list);
790 BUG_ON("Unknown state of lock sequence found!\n");
794 seq->state = SEQ_STATE_CONTENDED;
796 ls->avg_wait_time = ls->wait_time_total/ls->nr_contended;
797 seq->prev_event_time = sample->time;
802 static int report_lock_release_event(struct evsel *evsel,
803 struct perf_sample *sample)
805 struct lock_stat *ls;
806 struct thread_stat *ts;
807 struct lock_seq_stat *seq;
808 const char *name = evsel__strval(evsel, sample, "name");
809 u64 addr = evsel__intval(evsel, sample, "lockdep_addr");
813 ret = get_key_by_aggr_mode_simple(&key, addr, sample->tid);
817 ls = lock_stat_findnew(key, name, 0);
821 ts = thread_stat_findnew(sample->tid);
825 seq = get_seq(ts, addr);
829 switch (seq->state) {
830 case SEQ_STATE_UNINITIALIZED:
832 case SEQ_STATE_ACQUIRED:
834 case SEQ_STATE_READ_ACQUIRED:
836 BUG_ON(seq->read_count < 0);
837 if (seq->read_count) {
842 case SEQ_STATE_ACQUIRING:
843 case SEQ_STATE_CONTENDED:
844 case SEQ_STATE_RELEASED:
845 /* broken lock sequence */
848 bad_hist[BROKEN_RELEASE]++;
852 BUG_ON("Unknown state of lock sequence found!\n");
858 list_del_init(&seq->list);
864 static int get_symbol_name_offset(struct map *map, struct symbol *sym, u64 ip,
869 if (map == NULL || sym == NULL) {
874 offset = map->map_ip(map, ip) - sym->start;
877 return scnprintf(buf, size, "%s+%#lx", sym->name, offset);
879 return strlcpy(buf, sym->name, size);
881 static int lock_contention_caller(struct evsel *evsel, struct perf_sample *sample,
884 struct thread *thread;
885 struct callchain_cursor *cursor = &callchain_cursor;
886 struct machine *machine = &session->machines.host;
891 /* lock names will be replaced to task name later */
892 if (show_thread_stats)
895 thread = machine__findnew_thread(machine, -1, sample->pid);
899 /* use caller function name from the callchain */
900 ret = thread__resolve_callchain(thread, cursor, evsel, sample,
901 NULL, NULL, max_stack_depth);
907 callchain_cursor_commit(cursor);
911 struct callchain_cursor_node *node;
913 node = callchain_cursor_current(cursor);
917 /* skip first few entries - for lock functions */
918 if (++skip <= stack_skip)
922 if (sym && !machine__is_lock_function(machine, node->ip)) {
923 get_symbol_name_offset(node->ms.map, sym, node->ip,
929 callchain_cursor_advance(cursor);
934 static u64 callchain_id(struct evsel *evsel, struct perf_sample *sample)
936 struct callchain_cursor *cursor = &callchain_cursor;
937 struct machine *machine = &session->machines.host;
938 struct thread *thread;
943 thread = machine__findnew_thread(machine, -1, sample->pid);
947 /* use caller function name from the callchain */
948 ret = thread__resolve_callchain(thread, cursor, evsel, sample,
949 NULL, NULL, max_stack_depth);
955 callchain_cursor_commit(cursor);
958 struct callchain_cursor_node *node;
960 node = callchain_cursor_current(cursor);
964 /* skip first few entries - for lock functions */
965 if (++skip <= stack_skip)
968 if (node->ms.sym && machine__is_lock_function(machine, node->ip))
971 hash ^= hash_long((unsigned long)node->ip, 64);
974 callchain_cursor_advance(cursor);
979 static u64 *get_callstack(struct perf_sample *sample, int max_stack)
985 callstack = calloc(max_stack, sizeof(*callstack));
986 if (callstack == NULL)
989 for (i = 0, c = 0; i < sample->callchain->nr && c < max_stack; i++) {
990 u64 ip = sample->callchain->ips[i];
992 if (ip >= PERF_CONTEXT_MAX)
1000 static int report_lock_contention_begin_event(struct evsel *evsel,
1001 struct perf_sample *sample)
1003 struct lock_stat *ls;
1004 struct thread_stat *ts;
1005 struct lock_seq_stat *seq;
1006 u64 addr = evsel__intval(evsel, sample, "lock_addr");
1007 unsigned int flags = evsel__intval(evsel, sample, "flags");
1010 static bool kmap_loaded;
1011 struct machine *machine = &session->machines.host;
1015 ret = get_key_by_aggr_mode(&key, addr, evsel, sample);
1020 unsigned long *addrs;
1022 /* make sure it loads the kernel map to find lock symbols */
1023 map__load(machine__kernel_map(machine));
1026 /* convert (kernel) symbols to addresses */
1027 for (i = 0; i < filters.nr_syms; i++) {
1028 sym = machine__find_kernel_symbol_by_name(machine,
1032 pr_warning("ignore unknown symbol: %s\n",
1037 addrs = realloc(filters.addrs,
1038 (filters.nr_addrs + 1) * sizeof(*addrs));
1039 if (addrs == NULL) {
1040 pr_warning("memory allocation failure\n");
1044 addrs[filters.nr_addrs++] = kmap->unmap_ip(kmap, sym->start);
1045 filters.addrs = addrs;
1049 ls = lock_stat_find(key);
1052 const char *name = "";
1054 switch (aggr_mode) {
1055 case LOCK_AGGR_ADDR:
1056 sym = machine__find_kernel_symbol(machine, key, &kmap);
1060 case LOCK_AGGR_CALLER:
1062 if (lock_contention_caller(evsel, sample, buf, sizeof(buf)) < 0)
1065 case LOCK_AGGR_TASK:
1070 ls = lock_stat_findnew(key, name, flags);
1074 if (aggr_mode == LOCK_AGGR_CALLER && needs_callstack()) {
1075 ls->callstack = get_callstack(sample, max_stack_depth);
1076 if (ls->callstack == NULL)
1081 if (filters.nr_types) {
1084 for (i = 0; i < filters.nr_types; i++) {
1085 if (flags == filters.types[i]) {
1095 if (filters.nr_addrs) {
1098 for (i = 0; i < filters.nr_addrs; i++) {
1099 if (addr == filters.addrs[i]) {
1109 ts = thread_stat_findnew(sample->tid);
1113 seq = get_seq(ts, addr);
1117 switch (seq->state) {
1118 case SEQ_STATE_UNINITIALIZED:
1119 case SEQ_STATE_ACQUIRED:
1121 case SEQ_STATE_CONTENDED:
1123 * It can have nested contention begin with mutex spinning,
1124 * then we would use the original contention begin event and
1125 * ignore the second one.
1128 case SEQ_STATE_ACQUIRING:
1129 case SEQ_STATE_READ_ACQUIRED:
1130 case SEQ_STATE_RELEASED:
1131 /* broken lock sequence */
1134 bad_hist[BROKEN_CONTENDED]++;
1136 list_del_init(&seq->list);
1140 BUG_ON("Unknown state of lock sequence found!\n");
1144 if (seq->state != SEQ_STATE_CONTENDED) {
1145 seq->state = SEQ_STATE_CONTENDED;
1146 seq->prev_event_time = sample->time;
1153 static int report_lock_contention_end_event(struct evsel *evsel,
1154 struct perf_sample *sample)
1156 struct lock_stat *ls;
1157 struct thread_stat *ts;
1158 struct lock_seq_stat *seq;
1160 u64 addr = evsel__intval(evsel, sample, "lock_addr");
1164 ret = get_key_by_aggr_mode(&key, addr, evsel, sample);
1168 ls = lock_stat_find(key);
1172 ts = thread_stat_find(sample->tid);
1176 seq = get_seq(ts, addr);
1180 switch (seq->state) {
1181 case SEQ_STATE_UNINITIALIZED:
1183 case SEQ_STATE_CONTENDED:
1184 contended_term = sample->time - seq->prev_event_time;
1185 ls->wait_time_total += contended_term;
1186 if (contended_term < ls->wait_time_min)
1187 ls->wait_time_min = contended_term;
1188 if (ls->wait_time_max < contended_term)
1189 ls->wait_time_max = contended_term;
1191 case SEQ_STATE_ACQUIRING:
1192 case SEQ_STATE_ACQUIRED:
1193 case SEQ_STATE_READ_ACQUIRED:
1194 case SEQ_STATE_RELEASED:
1195 /* broken lock sequence */
1198 bad_hist[BROKEN_ACQUIRED]++;
1200 list_del_init(&seq->list);
1204 BUG_ON("Unknown state of lock sequence found!\n");
1208 seq->state = SEQ_STATE_ACQUIRED;
1210 ls->avg_wait_time = ls->wait_time_total/ls->nr_acquired;
1215 /* lock oriented handlers */
1216 /* TODO: handlers for CPU oriented, thread oriented */
1217 static struct trace_lock_handler report_lock_ops = {
1218 .acquire_event = report_lock_acquire_event,
1219 .acquired_event = report_lock_acquired_event,
1220 .contended_event = report_lock_contended_event,
1221 .release_event = report_lock_release_event,
1222 .contention_begin_event = report_lock_contention_begin_event,
1223 .contention_end_event = report_lock_contention_end_event,
1226 static struct trace_lock_handler contention_lock_ops = {
1227 .contention_begin_event = report_lock_contention_begin_event,
1228 .contention_end_event = report_lock_contention_end_event,
1232 static struct trace_lock_handler *trace_handler;
1234 static int evsel__process_lock_acquire(struct evsel *evsel, struct perf_sample *sample)
1236 if (trace_handler->acquire_event)
1237 return trace_handler->acquire_event(evsel, sample);
1241 static int evsel__process_lock_acquired(struct evsel *evsel, struct perf_sample *sample)
1243 if (trace_handler->acquired_event)
1244 return trace_handler->acquired_event(evsel, sample);
1248 static int evsel__process_lock_contended(struct evsel *evsel, struct perf_sample *sample)
1250 if (trace_handler->contended_event)
1251 return trace_handler->contended_event(evsel, sample);
1255 static int evsel__process_lock_release(struct evsel *evsel, struct perf_sample *sample)
1257 if (trace_handler->release_event)
1258 return trace_handler->release_event(evsel, sample);
1262 static int evsel__process_contention_begin(struct evsel *evsel, struct perf_sample *sample)
1264 if (trace_handler->contention_begin_event)
1265 return trace_handler->contention_begin_event(evsel, sample);
1269 static int evsel__process_contention_end(struct evsel *evsel, struct perf_sample *sample)
1271 if (trace_handler->contention_end_event)
1272 return trace_handler->contention_end_event(evsel, sample);
1276 static void print_bad_events(int bad, int total)
1278 /* Output for debug, this have to be removed */
1281 const char *name[4] =
1282 { "acquire", "acquired", "contended", "release" };
1284 for (i = 0; i < BROKEN_MAX; i++)
1285 broken += bad_hist[i];
1287 if (quiet || (broken == 0 && verbose <= 0))
1290 pr_info("\n=== output for debug===\n\n");
1291 pr_info("bad: %d, total: %d\n", bad, total);
1292 pr_info("bad rate: %.2f %%\n", (double)bad / (double)total * 100);
1293 pr_info("histogram of events caused bad sequence\n");
1294 for (i = 0; i < BROKEN_MAX; i++)
1295 pr_info(" %10s: %d\n", name[i], bad_hist[i]);
1298 /* TODO: various way to print, coloring, nano or milli sec */
1299 static void print_result(void)
1301 struct lock_stat *st;
1302 struct lock_key *key;
1304 int bad, total, printed;
1307 pr_info("%20s ", "Name");
1308 list_for_each_entry(key, &lock_keys, list)
1309 pr_info("%*s ", key->len, key->header);
1313 bad = total = printed = 0;
1314 while ((st = pop_from_result())) {
1318 if (!st->nr_acquired)
1321 bzero(cut_name, 20);
1323 if (strlen(st->name) < 20) {
1324 /* output raw name */
1325 const char *name = st->name;
1327 if (show_thread_stats) {
1330 /* st->addr contains tid of thread */
1331 t = perf_session__findnew(session, st->addr);
1332 name = thread__comm_str(t);
1335 pr_info("%20s ", name);
1337 strncpy(cut_name, st->name, 16);
1341 cut_name[19] = '\0';
1342 /* cut off name for saving output style */
1343 pr_info("%20s ", cut_name);
1346 list_for_each_entry(key, &lock_keys, list) {
1347 key->print(key, st);
1352 if (++printed >= print_nr_entries)
1356 print_bad_events(bad, total);
1359 static bool info_threads, info_map;
1361 static void dump_threads(void)
1363 struct thread_stat *st;
1364 struct rb_node *node;
1367 pr_info("%10s: comm\n", "Thread ID");
1369 node = rb_first(&thread_stats);
1371 st = container_of(node, struct thread_stat, rb);
1372 t = perf_session__findnew(session, st->tid);
1373 pr_info("%10d: %s\n", st->tid, thread__comm_str(t));
1374 node = rb_next(node);
1379 static int compare_maps(struct lock_stat *a, struct lock_stat *b)
1383 if (a->name && b->name)
1384 ret = strcmp(a->name, b->name);
1386 ret = !!a->name - !!b->name;
1389 return a->addr < b->addr;
1394 static void dump_map(void)
1397 struct lock_stat *st;
1399 pr_info("Address of instance: name of class\n");
1400 for (i = 0; i < LOCKHASH_SIZE; i++) {
1401 hlist_for_each_entry(st, &lockhash_table[i], hash_entry) {
1402 insert_to_result(st, compare_maps);
1406 while ((st = pop_from_result()))
1407 pr_info(" %#llx: %s\n", (unsigned long long)st->addr, st->name);
1410 static int dump_info(void)
1420 pr_err("Unknown type of information\n");
1426 static const struct evsel_str_handler lock_tracepoints[] = {
1427 { "lock:lock_acquire", evsel__process_lock_acquire, }, /* CONFIG_LOCKDEP */
1428 { "lock:lock_acquired", evsel__process_lock_acquired, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */
1429 { "lock:lock_contended", evsel__process_lock_contended, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */
1430 { "lock:lock_release", evsel__process_lock_release, }, /* CONFIG_LOCKDEP */
1433 static const struct evsel_str_handler contention_tracepoints[] = {
1434 { "lock:contention_begin", evsel__process_contention_begin, },
1435 { "lock:contention_end", evsel__process_contention_end, },
1438 static int process_event_update(struct perf_tool *tool,
1439 union perf_event *event,
1440 struct evlist **pevlist)
1444 ret = perf_event__process_event_update(tool, event, pevlist);
1448 /* this can return -EEXIST since we call it for each evsel */
1449 perf_session__set_tracepoints_handlers(session, lock_tracepoints);
1450 perf_session__set_tracepoints_handlers(session, contention_tracepoints);
1454 typedef int (*tracepoint_handler)(struct evsel *evsel,
1455 struct perf_sample *sample);
1457 static int process_sample_event(struct perf_tool *tool __maybe_unused,
1458 union perf_event *event,
1459 struct perf_sample *sample,
1460 struct evsel *evsel,
1461 struct machine *machine)
1464 struct thread *thread = machine__findnew_thread(machine, sample->pid,
1467 if (thread == NULL) {
1468 pr_debug("problem processing %d event, skipping it.\n",
1469 event->header.type);
1473 if (evsel->handler != NULL) {
1474 tracepoint_handler f = evsel->handler;
1475 err = f(evsel, sample);
1478 thread__put(thread);
1483 static void combine_result(void)
1486 struct lock_stat *st;
1491 for (i = 0; i < LOCKHASH_SIZE; i++) {
1492 hlist_for_each_entry(st, &lockhash_table[i], hash_entry) {
1493 combine_lock_stats(st);
1498 static void sort_result(void)
1501 struct lock_stat *st;
1503 for (i = 0; i < LOCKHASH_SIZE; i++) {
1504 hlist_for_each_entry(st, &lockhash_table[i], hash_entry) {
1505 insert_to_result(st, compare);
1510 static const struct {
1513 } lock_type_table[] = {
1515 { LCB_F_SPIN, "spinlock" },
1516 { LCB_F_SPIN | LCB_F_READ, "rwlock:R" },
1517 { LCB_F_SPIN | LCB_F_WRITE, "rwlock:W"},
1518 { LCB_F_READ, "rwsem:R" },
1519 { LCB_F_WRITE, "rwsem:W" },
1520 { LCB_F_RT, "rtmutex" },
1521 { LCB_F_RT | LCB_F_READ, "rwlock-rt:R" },
1522 { LCB_F_RT | LCB_F_WRITE, "rwlock-rt:W"},
1523 { LCB_F_PERCPU | LCB_F_READ, "pcpu-sem:R" },
1524 { LCB_F_PERCPU | LCB_F_WRITE, "pcpu-sem:W" },
1525 { LCB_F_MUTEX, "mutex" },
1526 { LCB_F_MUTEX | LCB_F_SPIN, "mutex" },
1527 /* alias for get_type_flag() */
1528 { LCB_F_MUTEX | LCB_F_SPIN, "mutex-spin" },
1531 static const char *get_type_str(unsigned int flags)
1533 for (unsigned int i = 0; i < ARRAY_SIZE(lock_type_table); i++) {
1534 if (lock_type_table[i].flags == flags)
1535 return lock_type_table[i].name;
1540 static unsigned int get_type_flag(const char *str)
1542 for (unsigned int i = 0; i < ARRAY_SIZE(lock_type_table); i++) {
1543 if (!strcmp(lock_type_table[i].name, str))
1544 return lock_type_table[i].flags;
1549 static void lock_filter_finish(void)
1551 zfree(&filters.types);
1552 filters.nr_types = 0;
1554 zfree(&filters.addrs);
1555 filters.nr_addrs = 0;
1557 for (int i = 0; i < filters.nr_syms; i++)
1558 free(filters.syms[i]);
1560 zfree(&filters.syms);
1561 filters.nr_syms = 0;
1564 static void sort_contention_result(void)
1569 static void print_contention_result(struct lock_contention *con)
1571 struct lock_stat *st;
1572 struct lock_key *key;
1573 int bad, total, printed;
1576 list_for_each_entry(key, &lock_keys, list)
1577 pr_info("%*s ", key->len, key->header);
1579 switch (aggr_mode) {
1580 case LOCK_AGGR_TASK:
1581 pr_info(" %10s %s\n\n", "pid", "comm");
1583 case LOCK_AGGR_CALLER:
1584 pr_info(" %10s %s\n\n", "type", "caller");
1586 case LOCK_AGGR_ADDR:
1587 pr_info(" %16s %s\n\n", "address", "symbol");
1594 bad = total = printed = 0;
1596 bad = bad_hist[BROKEN_CONTENDED];
1598 while ((st = pop_from_result())) {
1602 total += use_bpf ? st->nr_contended : 1;
1606 if (!st->wait_time_total)
1609 if (aggr_mode == LOCK_AGGR_CALLER && !list_empty(&callstack_filters)) {
1614 for (int i = 0; i < max_stack_depth; i++) {
1615 struct callstack_filter *filter;
1617 if (!st->callstack || !st->callstack[i])
1620 ip = st->callstack[i];
1621 sym = machine__find_kernel_symbol(con->machine, ip, &kmap);
1625 list_for_each_entry(filter, &callstack_filters, list) {
1626 if (strstr(sym->name, filter->name))
1634 list_for_each_entry(key, &lock_keys, list) {
1635 key->print(key, st);
1639 switch (aggr_mode) {
1640 case LOCK_AGGR_CALLER:
1641 pr_info(" %10s %s\n", get_type_str(st->flags), st->name);
1643 case LOCK_AGGR_TASK:
1645 t = perf_session__findnew(session, pid);
1646 pr_info(" %10d %s\n", pid, thread__comm_str(t));
1648 case LOCK_AGGR_ADDR:
1649 pr_info(" %016llx %s\n", (unsigned long long)st->addr,
1656 if (aggr_mode == LOCK_AGGR_CALLER && verbose > 0) {
1662 for (int i = 0; i < max_stack_depth; i++) {
1663 if (!st->callstack || !st->callstack[i])
1666 ip = st->callstack[i];
1667 sym = machine__find_kernel_symbol(con->machine, ip, &kmap);
1668 get_symbol_name_offset(kmap, sym, ip, buf, sizeof(buf));
1669 pr_info("\t\t\t%#lx %s\n", (unsigned long)ip, buf);
1673 if (++printed >= print_nr_entries)
1677 print_bad_events(bad, total);
1682 static int __cmd_report(bool display_info)
1685 struct perf_tool eops = {
1686 .attr = perf_event__process_attr,
1687 .event_update = process_event_update,
1688 .sample = process_sample_event,
1689 .comm = perf_event__process_comm,
1690 .mmap = perf_event__process_mmap,
1691 .namespaces = perf_event__process_namespaces,
1692 .tracing_data = perf_event__process_tracing_data,
1693 .ordered_events = true,
1695 struct perf_data data = {
1697 .mode = PERF_DATA_MODE_READ,
1701 session = perf_session__new(&data, &eops);
1702 if (IS_ERR(session)) {
1703 pr_err("Initializing perf session failed\n");
1704 return PTR_ERR(session);
1707 /* for lock function check */
1708 symbol_conf.sort_by_name = true;
1709 symbol_conf.allow_aliases = true;
1710 symbol__init(&session->header.env);
1712 if (!data.is_pipe) {
1713 if (!perf_session__has_traces(session, "lock record"))
1716 if (perf_session__set_tracepoints_handlers(session, lock_tracepoints)) {
1717 pr_err("Initializing perf session tracepoint handlers failed\n");
1721 if (perf_session__set_tracepoints_handlers(session, contention_tracepoints)) {
1722 pr_err("Initializing perf session tracepoint handlers failed\n");
1727 if (setup_output_field(false, output_fields))
1730 if (select_key(false))
1733 if (show_thread_stats)
1734 aggr_mode = LOCK_AGGR_TASK;
1736 err = perf_session__process_events(session);
1741 if (display_info) /* used for info subcommand */
1750 perf_session__delete(session);
1754 static void sighandler(int sig __maybe_unused)
1758 static int __cmd_contention(int argc, const char **argv)
1761 struct perf_tool eops = {
1762 .attr = perf_event__process_attr,
1763 .event_update = process_event_update,
1764 .sample = process_sample_event,
1765 .comm = perf_event__process_comm,
1766 .mmap = perf_event__process_mmap,
1767 .tracing_data = perf_event__process_tracing_data,
1768 .ordered_events = true,
1770 struct perf_data data = {
1772 .mode = PERF_DATA_MODE_READ,
1775 struct lock_contention con = {
1777 .result = &lockhash_table[0],
1778 .map_nr_entries = bpf_map_entries,
1779 .max_stack = max_stack_depth,
1780 .stack_skip = stack_skip,
1781 .filters = &filters,
1782 .save_callstack = needs_callstack(),
1785 session = perf_session__new(use_bpf ? NULL : &data, &eops);
1786 if (IS_ERR(session)) {
1787 pr_err("Initializing perf session failed\n");
1788 return PTR_ERR(session);
1791 con.machine = &session->machines.host;
1793 con.aggr_mode = aggr_mode = show_thread_stats ? LOCK_AGGR_TASK :
1794 show_lock_addrs ? LOCK_AGGR_ADDR : LOCK_AGGR_CALLER;
1796 /* for lock function check */
1797 symbol_conf.sort_by_name = true;
1798 symbol_conf.allow_aliases = true;
1799 symbol__init(&session->header.env);
1802 err = target__validate(&target);
1806 target__strerror(&target, err, errbuf, 512);
1807 pr_err("%s\n", errbuf);
1811 signal(SIGINT, sighandler);
1812 signal(SIGCHLD, sighandler);
1813 signal(SIGTERM, sighandler);
1815 con.evlist = evlist__new();
1816 if (con.evlist == NULL) {
1821 err = evlist__create_maps(con.evlist, &target);
1826 err = evlist__prepare_workload(con.evlist, &target,
1832 if (lock_contention_prepare(&con) < 0) {
1833 pr_err("lock contention BPF setup failed\n");
1836 } else if (!data.is_pipe) {
1837 if (!perf_session__has_traces(session, "lock record"))
1840 if (!evlist__find_evsel_by_str(session->evlist,
1841 "lock:contention_begin")) {
1842 pr_err("lock contention evsel not found\n");
1846 if (perf_session__set_tracepoints_handlers(session,
1847 contention_tracepoints)) {
1848 pr_err("Initializing perf session tracepoint handlers failed\n");
1853 if (setup_output_field(true, output_fields))
1856 if (select_key(true))
1860 lock_contention_start();
1862 evlist__start_workload(con.evlist);
1864 /* wait for signal */
1867 lock_contention_stop();
1868 lock_contention_read(&con);
1870 /* abuse bad hist stats for lost entries */
1871 bad_hist[BROKEN_CONTENDED] = con.lost;
1873 err = perf_session__process_events(session);
1880 sort_contention_result();
1881 print_contention_result(&con);
1884 lock_filter_finish();
1885 evlist__delete(con.evlist);
1886 lock_contention_finish();
1887 perf_session__delete(session);
1892 static int __cmd_record(int argc, const char **argv)
1894 const char *record_args[] = {
1895 "record", "-R", "-m", "1024", "-c", "1", "--synth", "task",
1897 const char *callgraph_args[] = {
1898 "--call-graph", "fp," __stringify(CONTENTION_STACK_DEPTH),
1900 unsigned int rec_argc, i, j, ret;
1901 unsigned int nr_tracepoints;
1902 unsigned int nr_callgraph_args = 0;
1903 const char **rec_argv;
1904 bool has_lock_stat = true;
1906 for (i = 0; i < ARRAY_SIZE(lock_tracepoints); i++) {
1907 if (!is_valid_tracepoint(lock_tracepoints[i].name)) {
1908 pr_debug("tracepoint %s is not enabled. "
1909 "Are CONFIG_LOCKDEP and CONFIG_LOCK_STAT enabled?\n",
1910 lock_tracepoints[i].name);
1911 has_lock_stat = false;
1919 for (i = 0; i < ARRAY_SIZE(contention_tracepoints); i++) {
1920 if (!is_valid_tracepoint(contention_tracepoints[i].name)) {
1921 pr_err("tracepoint %s is not enabled.\n",
1922 contention_tracepoints[i].name);
1927 nr_callgraph_args = ARRAY_SIZE(callgraph_args);
1930 rec_argc = ARRAY_SIZE(record_args) + nr_callgraph_args + argc - 1;
1933 nr_tracepoints = ARRAY_SIZE(lock_tracepoints);
1935 nr_tracepoints = ARRAY_SIZE(contention_tracepoints);
1937 /* factor of 2 is for -e in front of each tracepoint */
1938 rec_argc += 2 * nr_tracepoints;
1940 rec_argv = calloc(rec_argc + 1, sizeof(char *));
1944 for (i = 0; i < ARRAY_SIZE(record_args); i++)
1945 rec_argv[i] = strdup(record_args[i]);
1947 for (j = 0; j < nr_tracepoints; j++) {
1948 const char *ev_name;
1951 ev_name = strdup(lock_tracepoints[j].name);
1953 ev_name = strdup(contention_tracepoints[j].name);
1958 rec_argv[i++] = "-e";
1959 rec_argv[i++] = ev_name;
1962 for (j = 0; j < nr_callgraph_args; j++, i++)
1963 rec_argv[i] = callgraph_args[j];
1965 for (j = 1; j < (unsigned int)argc; j++, i++)
1966 rec_argv[i] = argv[j];
1968 BUG_ON(i != rec_argc);
1970 ret = cmd_record(i, rec_argv);
1975 static int parse_map_entry(const struct option *opt, const char *str,
1976 int unset __maybe_unused)
1978 unsigned long *len = (unsigned long *)opt->value;
1983 val = strtoul(str, &endptr, 0);
1984 if (*endptr != '\0' || errno != 0) {
1985 pr_err("invalid BPF map length: %s\n", str);
1993 static int parse_max_stack(const struct option *opt, const char *str,
1994 int unset __maybe_unused)
1996 unsigned long *len = (unsigned long *)opt->value;
2001 val = strtol(str, &endptr, 0);
2002 if (*endptr != '\0' || errno != 0) {
2003 pr_err("invalid max stack depth: %s\n", str);
2007 if (val < 0 || val > sysctl__max_stack()) {
2008 pr_err("invalid max stack depth: %ld\n", val);
2016 static bool add_lock_type(unsigned int flags)
2020 tmp = realloc(filters.types, (filters.nr_types + 1) * sizeof(*filters.types));
2024 tmp[filters.nr_types++] = flags;
2025 filters.types = tmp;
2029 static int parse_lock_type(const struct option *opt __maybe_unused, const char *str,
2030 int unset __maybe_unused)
2032 char *s, *tmp, *tok;
2039 for (tok = strtok_r(s, ", ", &tmp); tok; tok = strtok_r(NULL, ", ", &tmp)) {
2040 unsigned int flags = get_type_flag(tok);
2045 if (strchr(tok, ':'))
2048 /* try :R and :W suffixes for rwlock, rwsem, ... */
2049 scnprintf(buf, sizeof(buf), "%s:R", tok);
2050 flags = get_type_flag(buf);
2051 if (flags != UINT_MAX) {
2052 if (!add_lock_type(flags)) {
2058 scnprintf(buf, sizeof(buf), "%s:W", tok);
2059 flags = get_type_flag(buf);
2060 if (flags != UINT_MAX) {
2061 if (!add_lock_type(flags)) {
2069 if (!add_lock_type(flags)) {
2074 if (!strcmp(tok, "mutex")) {
2075 flags = get_type_flag("mutex-spin");
2076 if (flags != UINT_MAX) {
2077 if (!add_lock_type(flags)) {
2089 static bool add_lock_addr(unsigned long addr)
2093 tmp = realloc(filters.addrs, (filters.nr_addrs + 1) * sizeof(*filters.addrs));
2095 pr_err("Memory allocation failure\n");
2099 tmp[filters.nr_addrs++] = addr;
2100 filters.addrs = tmp;
2104 static bool add_lock_sym(char *name)
2107 char *sym = strdup(name);
2110 pr_err("Memory allocation failure\n");
2114 tmp = realloc(filters.syms, (filters.nr_syms + 1) * sizeof(*filters.syms));
2116 pr_err("Memory allocation failure\n");
2121 tmp[filters.nr_syms++] = sym;
2126 static int parse_lock_addr(const struct option *opt __maybe_unused, const char *str,
2127 int unset __maybe_unused)
2129 char *s, *tmp, *tok;
2137 for (tok = strtok_r(s, ", ", &tmp); tok; tok = strtok_r(NULL, ", ", &tmp)) {
2140 addr = strtoul(tok, &end, 16);
2142 if (!add_lock_addr(addr)) {
2150 * At this moment, we don't have kernel symbols. Save the symbols
2151 * in a separate list and resolve them to addresses later.
2153 if (!add_lock_sym(tok)) {
2163 static int parse_call_stack(const struct option *opt __maybe_unused, const char *str,
2164 int unset __maybe_unused)
2166 char *s, *tmp, *tok;
2173 for (tok = strtok_r(s, ", ", &tmp); tok; tok = strtok_r(NULL, ", ", &tmp)) {
2174 struct callstack_filter *entry;
2176 entry = malloc(sizeof(*entry) + strlen(tok) + 1);
2177 if (entry == NULL) {
2178 pr_err("Memory allocation failure\n");
2182 strcpy(entry->name, tok);
2183 list_add_tail(&entry->list, &callstack_filters);
2190 int cmd_lock(int argc, const char **argv)
2192 const struct option lock_options[] = {
2193 OPT_STRING('i', "input", &input_name, "file", "input file name"),
2194 OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"),
2195 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"),
2196 OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
2197 OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
2198 "file", "vmlinux pathname"),
2199 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
2200 "file", "kallsyms pathname"),
2201 OPT_BOOLEAN('q', "quiet", &quiet, "Do not show any warnings or messages"),
2205 const struct option info_options[] = {
2206 OPT_BOOLEAN('t', "threads", &info_threads,
2207 "dump thread list in perf.data"),
2208 OPT_BOOLEAN('m', "map", &info_map,
2209 "map of lock instances (address:name table)"),
2210 OPT_PARENT(lock_options)
2213 const struct option report_options[] = {
2214 OPT_STRING('k', "key", &sort_key, "acquired",
2215 "key for sorting (acquired / contended / avg_wait / wait_total / wait_max / wait_min)"),
2216 OPT_STRING('F', "field", &output_fields, NULL,
2217 "output fields (acquired / contended / avg_wait / wait_total / wait_max / wait_min)"),
2219 OPT_BOOLEAN('c', "combine-locks", &combine_locks,
2220 "combine locks in the same class"),
2221 OPT_BOOLEAN('t', "threads", &show_thread_stats,
2222 "show per-thread lock stats"),
2223 OPT_INTEGER('E', "entries", &print_nr_entries, "display this many functions"),
2224 OPT_PARENT(lock_options)
2227 struct option contention_options[] = {
2228 OPT_STRING('k', "key", &sort_key, "wait_total",
2229 "key for sorting (contended / wait_total / wait_max / wait_min / avg_wait)"),
2230 OPT_STRING('F', "field", &output_fields, "contended,wait_total,wait_max,avg_wait",
2231 "output fields (contended / wait_total / wait_max / wait_min / avg_wait)"),
2232 OPT_BOOLEAN('t', "threads", &show_thread_stats,
2233 "show per-thread lock stats"),
2234 OPT_BOOLEAN('b', "use-bpf", &use_bpf, "use BPF program to collect lock contention stats"),
2235 OPT_BOOLEAN('a', "all-cpus", &target.system_wide,
2236 "System-wide collection from all CPUs"),
2237 OPT_STRING('C', "cpu", &target.cpu_list, "cpu",
2238 "List of cpus to monitor"),
2239 OPT_STRING('p', "pid", &target.pid, "pid",
2240 "Trace on existing process id"),
2241 OPT_STRING(0, "tid", &target.tid, "tid",
2242 "Trace on existing thread id (exclusive to --pid)"),
2243 OPT_CALLBACK(0, "map-nr-entries", &bpf_map_entries, "num",
2244 "Max number of BPF map entries", parse_map_entry),
2245 OPT_CALLBACK(0, "max-stack", &max_stack_depth, "num",
2246 "Set the maximum stack depth when collecting lopck contention, "
2247 "Default: " __stringify(CONTENTION_STACK_DEPTH), parse_max_stack),
2248 OPT_INTEGER(0, "stack-skip", &stack_skip,
2249 "Set the number of stack depth to skip when finding a lock caller, "
2250 "Default: " __stringify(CONTENTION_STACK_SKIP)),
2251 OPT_INTEGER('E', "entries", &print_nr_entries, "display this many functions"),
2252 OPT_BOOLEAN('l', "lock-addr", &show_lock_addrs, "show lock stats by address"),
2253 OPT_CALLBACK('Y', "type-filter", NULL, "FLAGS",
2254 "Filter specific type of locks", parse_lock_type),
2255 OPT_CALLBACK('L', "lock-filter", NULL, "ADDRS/NAMES",
2256 "Filter specific address/symbol of locks", parse_lock_addr),
2257 OPT_CALLBACK('S', "callstack-filter", NULL, "NAMES",
2258 "Filter specific function in the callstack", parse_call_stack),
2259 OPT_PARENT(lock_options)
2262 const char * const info_usage[] = {
2263 "perf lock info [<options>]",
2266 const char *const lock_subcommands[] = { "record", "report", "script",
2267 "info", "contention", NULL };
2268 const char *lock_usage[] = {
2272 const char * const report_usage[] = {
2273 "perf lock report [<options>]",
2276 const char * const contention_usage[] = {
2277 "perf lock contention [<options>]",
2283 for (i = 0; i < LOCKHASH_SIZE; i++)
2284 INIT_HLIST_HEAD(lockhash_table + i);
2286 argc = parse_options_subcommand(argc, argv, lock_options, lock_subcommands,
2287 lock_usage, PARSE_OPT_STOP_AT_NON_OPTION);
2289 usage_with_options(lock_usage, lock_options);
2291 if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
2292 return __cmd_record(argc, argv);
2293 } else if (strlen(argv[0]) > 2 && strstarts("report", argv[0])) {
2294 trace_handler = &report_lock_ops;
2296 argc = parse_options(argc, argv,
2297 report_options, report_usage, 0);
2299 usage_with_options(report_usage, report_options);
2301 rc = __cmd_report(false);
2302 } else if (!strcmp(argv[0], "script")) {
2303 /* Aliased to 'perf script' */
2304 return cmd_script(argc, argv);
2305 } else if (!strcmp(argv[0], "info")) {
2307 argc = parse_options(argc, argv,
2308 info_options, info_usage, 0);
2310 usage_with_options(info_usage, info_options);
2312 /* recycling report_lock_ops */
2313 trace_handler = &report_lock_ops;
2314 rc = __cmd_report(true);
2315 } else if (strlen(argv[0]) > 2 && strstarts("contention", argv[0])) {
2316 trace_handler = &contention_lock_ops;
2317 sort_key = "wait_total";
2318 output_fields = "contended,wait_total,wait_max,avg_wait";
2320 #ifndef HAVE_BPF_SKEL
2321 set_option_nobuild(contention_options, 'b', "use-bpf",
2322 "no BUILD_BPF_SKEL=1", false);
2325 argc = parse_options(argc, argv, contention_options,
2326 contention_usage, 0);
2329 if (show_thread_stats && show_lock_addrs) {
2330 pr_err("Cannot use thread and addr mode together\n");
2331 parse_options_usage(contention_usage, contention_options,
2333 parse_options_usage(NULL, contention_options,
2338 rc = __cmd_contention(argc, argv);
2340 usage_with_options(lock_usage, lock_options);