1 // SPDX-License-Identifier: GPL-2.0
2 #include "util/debug.h"
3 #include "util/evlist.h"
4 #include "util/machine.h"
6 #include "util/symbol.h"
7 #include "util/target.h"
8 #include "util/thread.h"
9 #include "util/thread_map.h"
10 #include "util/lock-contention.h"
11 #include <linux/zalloc.h>
12 #include <linux/string.h>
15 #include "bpf_skel/lock_contention.skel.h"
16 #include "bpf_skel/lock_data.h"
18 static struct lock_contention_bpf *skel;
20 int lock_contention_prepare(struct lock_contention *con)
23 int ncpus = 1, ntasks = 1, ntypes = 1, naddrs = 1;
24 struct evlist *evlist = con->evlist;
25 struct target *target = con->target;
27 skel = lock_contention_bpf__open();
29 pr_err("Failed to open lock-contention BPF skeleton\n");
33 bpf_map__set_value_size(skel->maps.stacks, con->max_stack * sizeof(u64));
34 bpf_map__set_max_entries(skel->maps.lock_stat, con->map_nr_entries);
35 bpf_map__set_max_entries(skel->maps.tstamp, con->map_nr_entries);
37 if (con->aggr_mode == LOCK_AGGR_TASK)
38 bpf_map__set_max_entries(skel->maps.task_data, con->map_nr_entries);
40 bpf_map__set_max_entries(skel->maps.task_data, 1);
42 if (con->save_callstack)
43 bpf_map__set_max_entries(skel->maps.stacks, con->map_nr_entries);
45 bpf_map__set_max_entries(skel->maps.stacks, 1);
47 if (target__has_cpu(target))
48 ncpus = perf_cpu_map__nr(evlist->core.user_requested_cpus);
49 if (target__has_task(target))
50 ntasks = perf_thread_map__nr(evlist->core.threads);
51 if (con->filters->nr_types)
52 ntypes = con->filters->nr_types;
54 /* resolve lock name filters to addr */
55 if (con->filters->nr_syms) {
60 for (i = 0; i < con->filters->nr_syms; i++) {
61 sym = machine__find_kernel_symbol_by_name(con->machine,
62 con->filters->syms[i],
65 pr_warning("ignore unknown symbol: %s\n",
66 con->filters->syms[i]);
70 addrs = realloc(con->filters->addrs,
71 (con->filters->nr_addrs + 1) * sizeof(*addrs));
73 pr_warning("memory allocation failure\n");
77 addrs[con->filters->nr_addrs++] = kmap->unmap_ip(kmap, sym->start);
78 con->filters->addrs = addrs;
80 naddrs = con->filters->nr_addrs;
83 bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus);
84 bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
85 bpf_map__set_max_entries(skel->maps.type_filter, ntypes);
86 bpf_map__set_max_entries(skel->maps.addr_filter, naddrs);
88 if (lock_contention_bpf__load(skel) < 0) {
89 pr_err("Failed to load lock-contention BPF skeleton\n");
93 if (target__has_cpu(target)) {
97 skel->bss->has_cpu = 1;
98 fd = bpf_map__fd(skel->maps.cpu_filter);
100 for (i = 0; i < ncpus; i++) {
101 cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, i).cpu;
102 bpf_map_update_elem(fd, &cpu, &val, BPF_ANY);
106 if (target__has_task(target)) {
110 skel->bss->has_task = 1;
111 fd = bpf_map__fd(skel->maps.task_filter);
113 for (i = 0; i < ntasks; i++) {
114 pid = perf_thread_map__pid(evlist->core.threads, i);
115 bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
119 if (target__none(target) && evlist->workload.pid > 0) {
120 u32 pid = evlist->workload.pid;
123 skel->bss->has_task = 1;
124 fd = bpf_map__fd(skel->maps.task_filter);
125 bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
128 if (con->filters->nr_types) {
131 skel->bss->has_type = 1;
132 fd = bpf_map__fd(skel->maps.type_filter);
134 for (i = 0; i < con->filters->nr_types; i++)
135 bpf_map_update_elem(fd, &con->filters->types[i], &val, BPF_ANY);
138 if (con->filters->nr_addrs) {
141 skel->bss->has_addr = 1;
142 fd = bpf_map__fd(skel->maps.addr_filter);
144 for (i = 0; i < con->filters->nr_addrs; i++)
145 bpf_map_update_elem(fd, &con->filters->addrs[i], &val, BPF_ANY);
148 /* these don't work well if in the rodata section */
149 skel->bss->stack_skip = con->stack_skip;
150 skel->bss->aggr_mode = con->aggr_mode;
151 skel->bss->needs_callstack = con->save_callstack;
153 lock_contention_bpf__attach(skel);
157 int lock_contention_start(void)
159 skel->bss->enabled = 1;
163 int lock_contention_stop(void)
165 skel->bss->enabled = 0;
169 static const char *lock_contention_get_name(struct lock_contention *con,
170 struct contention_key *key,
175 const char *name = "";
176 static char name_buf[KSYM_NAME_LEN];
179 struct machine *machine = con->machine;
181 if (con->aggr_mode == LOCK_AGGR_TASK) {
182 struct contention_task_data task;
184 int task_fd = bpf_map__fd(skel->maps.task_data);
186 /* do not update idle comm which contains CPU number */
188 struct thread *t = __machine__findnew_thread(machine, /*pid=*/-1, pid);
192 if (!bpf_map_lookup_elem(task_fd, &pid, &task) &&
193 thread__set_comm(t, task.comm, /*timestamp=*/0))
199 if (con->aggr_mode == LOCK_AGGR_ADDR) {
200 sym = machine__find_kernel_symbol(machine, key->lock_addr, &kmap);
206 /* LOCK_AGGR_CALLER: skip lock internal functions */
207 while (machine__is_lock_function(machine, stack_trace[idx]) &&
208 idx < con->max_stack - 1)
211 addr = stack_trace[idx];
212 sym = machine__find_kernel_symbol(machine, addr, &kmap);
215 unsigned long offset;
217 offset = kmap->map_ip(kmap, addr) - sym->start;
222 snprintf(name_buf, sizeof(name_buf), "%s+%#lx", sym->name, offset);
224 snprintf(name_buf, sizeof(name_buf), "%#lx", (unsigned long)addr);
230 int lock_contention_read(struct lock_contention *con)
232 int fd, stack, err = 0;
233 struct contention_key *prev_key, key;
234 struct contention_data data = {};
235 struct lock_stat *st = NULL;
236 struct machine *machine = con->machine;
238 size_t stack_size = con->max_stack * sizeof(*stack_trace);
240 fd = bpf_map__fd(skel->maps.lock_stat);
241 stack = bpf_map__fd(skel->maps.stacks);
243 con->lost = skel->bss->lost;
245 stack_trace = zalloc(stack_size);
246 if (stack_trace == NULL)
249 if (con->aggr_mode == LOCK_AGGR_TASK) {
250 struct thread *idle = __machine__findnew_thread(machine,
253 thread__set_comm(idle, "swapper", /*timestamp=*/0);
256 /* make sure it loads the kernel map */
257 map__load(maps__first(machine->kmaps));
260 while (!bpf_map_get_next_key(fd, prev_key, &key)) {
264 /* to handle errors in the loop body */
267 bpf_map_lookup_elem(fd, &key, &data);
268 if (con->save_callstack) {
269 bpf_map_lookup_elem(stack, &key.stack_id, stack_trace);
271 if (!match_callstack_filter(machine, stack_trace))
275 switch (con->aggr_mode) {
276 case LOCK_AGGR_CALLER:
277 ls_key = key.stack_id;
283 ls_key = key.lock_addr;
289 st = lock_stat_find(ls_key);
291 st->wait_time_total += data.total_time;
292 if (st->wait_time_max < data.max_time)
293 st->wait_time_max = data.max_time;
294 if (st->wait_time_min > data.min_time)
295 st->wait_time_min = data.min_time;
297 st->nr_contended += data.count;
298 if (st->nr_contended)
299 st->avg_wait_time = st->wait_time_total / st->nr_contended;
303 name = lock_contention_get_name(con, &key, stack_trace);
304 st = lock_stat_findnew(ls_key, name, data.flags);
308 st->nr_contended = data.count;
309 st->wait_time_total = data.total_time;
310 st->wait_time_max = data.max_time;
311 st->wait_time_min = data.min_time;
314 st->avg_wait_time = data.total_time / data.count;
316 if (con->save_callstack) {
317 st->callstack = memdup(stack_trace, stack_size);
318 if (st->callstack == NULL)
325 /* we're fine now, reset the error */
334 int lock_contention_finish(void)
337 skel->bss->enabled = 0;
338 lock_contention_bpf__destroy(skel);