1 // SPDX-License-Identifier: GPL-2.0
2 #include "util/bpf_counter.h"
3 #include "util/debug.h"
4 #include "util/evsel.h"
5 #include "util/evlist.h"
6 #include "util/off_cpu.h"
7 #include "util/perf-hooks.h"
8 #include "util/record.h"
9 #include "util/session.h"
10 #include "util/target.h"
11 #include "util/cpumap.h"
12 #include "util/thread_map.h"
13 #include "util/cgroup.h"
14 #include "util/strlist.h"
17 #include "bpf_skel/off_cpu.skel.h"
21 /* we don't need actual timestamp, just want to put the samples at last */
22 #define OFF_CPU_TIMESTAMP (~0ull << 32)
24 static struct off_cpu_bpf *skel;
35 struct perf_event_header hdr;
36 u64 array[1024 / sizeof(u64)];
39 static int off_cpu_config(struct evlist *evlist)
42 struct perf_event_attr attr = {
43 .type = PERF_TYPE_SOFTWARE,
44 .config = PERF_COUNT_SW_BPF_OUTPUT,
45 .size = sizeof(attr), /* to capture ABI version */
47 char *evname = strdup(OFFCPU_EVENT);
52 evsel = evsel__new(&attr);
58 evsel->core.attr.freq = 1;
59 evsel->core.attr.sample_period = 1;
60 /* off-cpu analysis depends on stack trace */
61 evsel->core.attr.sample_type = PERF_SAMPLE_CALLCHAIN;
63 evlist__add(evlist, evsel);
71 static void off_cpu_start(void *arg)
73 struct evlist *evlist = arg;
75 /* update task filter for the given workload */
76 if (!skel->bss->has_cpu && !skel->bss->has_task &&
77 perf_thread_map__pid(evlist->core.threads, 0) != -1) {
82 skel->bss->has_task = 1;
83 skel->bss->uses_tgid = 1;
84 fd = bpf_map__fd(skel->maps.task_filter);
85 pid = perf_thread_map__pid(evlist->core.threads, 0);
86 bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
89 skel->bss->enabled = 1;
92 static void off_cpu_finish(void *arg __maybe_unused)
94 skel->bss->enabled = 0;
95 off_cpu_bpf__destroy(skel);
98 /* v5.18 kernel added prev_state arg, so it needs to check the signature */
99 static void check_sched_switch_args(void)
101 const struct btf *btf = bpf_object__btf(skel->obj);
102 const struct btf_type *t1, *t2, *t3;
105 type_id = btf__find_by_name_kind(btf, "btf_trace_sched_switch",
107 if ((s32)type_id < 0)
110 t1 = btf__type_by_id(btf, type_id);
114 t2 = btf__type_by_id(btf, t1->type);
115 if (t2 == NULL || !btf_is_ptr(t2))
118 t3 = btf__type_by_id(btf, t2->type);
119 if (t3 && btf_is_func_proto(t3) && btf_vlen(t3) == 4) {
120 /* new format: pass prev_state as 4th arg */
121 skel->rodata->has_prev_state = true;
125 int off_cpu_prepare(struct evlist *evlist, struct target *target,
126 struct record_opts *opts)
129 int ncpus = 1, ntasks = 1, ncgrps = 1;
130 struct strlist *pid_slist = NULL;
131 struct str_node *pos;
133 if (off_cpu_config(evlist) < 0) {
134 pr_err("Failed to config off-cpu BPF event\n");
138 skel = off_cpu_bpf__open();
140 pr_err("Failed to open off-cpu BPF skeleton\n");
144 /* don't need to set cpu filter for system-wide mode */
145 if (target->cpu_list) {
146 ncpus = perf_cpu_map__nr(evlist->core.user_requested_cpus);
147 bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus);
151 pid_slist = strlist__new(target->pid, NULL);
153 pr_err("Failed to create a strlist for pid\n");
158 strlist__for_each_entry(pos, pid_slist) {
160 int pid = strtol(pos->s, &end_ptr, 10);
162 if (pid == INT_MIN || pid == INT_MAX ||
163 (*end_ptr != '\0' && *end_ptr != ','))
169 if (ntasks < MAX_PROC)
172 bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
173 } else if (target__has_task(target)) {
174 ntasks = perf_thread_map__nr(evlist->core.threads);
175 bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
176 } else if (target__none(target)) {
177 bpf_map__set_max_entries(skel->maps.task_filter, MAX_PROC);
180 if (evlist__first(evlist)->cgrp) {
181 ncgrps = evlist->core.nr_entries - 1; /* excluding a dummy */
182 bpf_map__set_max_entries(skel->maps.cgroup_filter, ncgrps);
184 if (!cgroup_is_v2("perf_event"))
185 skel->rodata->uses_cgroup_v1 = true;
188 if (opts->record_cgroup) {
189 skel->rodata->needs_cgroup = true;
191 if (!cgroup_is_v2("perf_event"))
192 skel->rodata->uses_cgroup_v1 = true;
196 check_sched_switch_args();
198 err = off_cpu_bpf__load(skel);
200 pr_err("Failed to load off-cpu skeleton\n");
204 if (target->cpu_list) {
208 skel->bss->has_cpu = 1;
209 fd = bpf_map__fd(skel->maps.cpu_filter);
211 for (i = 0; i < ncpus; i++) {
212 cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, i).cpu;
213 bpf_map_update_elem(fd, &cpu, &val, BPF_ANY);
220 skel->bss->has_task = 1;
221 skel->bss->uses_tgid = 1;
222 fd = bpf_map__fd(skel->maps.task_filter);
224 strlist__for_each_entry(pos, pid_slist) {
227 int pid = strtol(pos->s, &end_ptr, 10);
229 if (pid == INT_MIN || pid == INT_MAX ||
230 (*end_ptr != '\0' && *end_ptr != ','))
234 bpf_map_update_elem(fd, &tgid, &val, BPF_ANY);
236 } else if (target__has_task(target)) {
240 skel->bss->has_task = 1;
241 fd = bpf_map__fd(skel->maps.task_filter);
243 for (i = 0; i < ntasks; i++) {
244 pid = perf_thread_map__pid(evlist->core.threads, i);
245 bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
249 if (evlist__first(evlist)->cgrp) {
253 skel->bss->has_cgroup = 1;
254 fd = bpf_map__fd(skel->maps.cgroup_filter);
256 evlist__for_each_entry(evlist, evsel) {
257 struct cgroup *cgrp = evsel->cgrp;
262 if (!cgrp->id && read_cgroup_id(cgrp) < 0) {
263 pr_err("Failed to read cgroup id of %s\n",
268 bpf_map_update_elem(fd, &cgrp->id, &val, BPF_ANY);
272 err = off_cpu_bpf__attach(skel);
274 pr_err("Failed to attach off-cpu BPF skeleton\n");
278 if (perf_hooks__set_hook("record_start", off_cpu_start, evlist) ||
279 perf_hooks__set_hook("record_end", off_cpu_finish, evlist)) {
280 pr_err("Failed to attach off-cpu skeleton\n");
287 off_cpu_bpf__destroy(skel);
291 int off_cpu_write(struct perf_session *session)
295 u64 sample_type, val, sid = 0;
297 struct perf_data_file *file = &session->data->file;
298 struct off_cpu_key prev, key;
299 union off_cpu_data data = {
301 .type = PERF_RECORD_SAMPLE,
302 .misc = PERF_RECORD_MISC_USER,
305 u64 tstamp = OFF_CPU_TIMESTAMP;
307 skel->bss->enabled = 0;
309 evsel = evlist__find_evsel_by_str(session->evlist, OFFCPU_EVENT);
311 pr_err("%s evsel not found\n", OFFCPU_EVENT);
315 sample_type = evsel->core.attr.sample_type;
317 if (sample_type & ~OFFCPU_SAMPLE_TYPES) {
318 pr_err("not supported sample type: %llx\n",
319 (unsigned long long)sample_type);
323 if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER)) {
325 sid = evsel->core.id[0];
328 fd = bpf_map__fd(skel->maps.off_cpu);
329 stack = bpf_map__fd(skel->maps.stacks);
330 memset(&prev, 0, sizeof(prev));
332 while (!bpf_map_get_next_key(fd, &prev, &key)) {
333 int n = 1; /* start from perf_event_header */
336 bpf_map_lookup_elem(fd, &key, &val);
338 if (sample_type & PERF_SAMPLE_IDENTIFIER)
339 data.array[n++] = sid;
340 if (sample_type & PERF_SAMPLE_IP) {
342 data.array[n++] = 0; /* will be updated */
344 if (sample_type & PERF_SAMPLE_TID)
345 data.array[n++] = (u64)key.pid << 32 | key.tgid;
346 if (sample_type & PERF_SAMPLE_TIME)
347 data.array[n++] = tstamp;
348 if (sample_type & PERF_SAMPLE_ID)
349 data.array[n++] = sid;
350 if (sample_type & PERF_SAMPLE_CPU)
352 if (sample_type & PERF_SAMPLE_PERIOD)
353 data.array[n++] = val;
354 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
357 /* data.array[n] is callchain->nr (updated later) */
358 data.array[n + 1] = PERF_CONTEXT_USER;
359 data.array[n + 2] = 0;
361 bpf_map_lookup_elem(stack, &key.stack_id, &data.array[n + 2]);
362 while (data.array[n + 2 + len])
365 /* update length of callchain */
366 data.array[n] = len + 1;
368 /* update sample ip with the first callchain entry */
370 data.array[ip_pos] = data.array[n + 2];
372 /* calculate sample callchain data array length */
375 if (sample_type & PERF_SAMPLE_CGROUP)
376 data.array[n++] = key.cgroup_id;
378 size = n * sizeof(u64);
379 data.hdr.size = size;
382 if (perf_data_file__write(file, &data, size) < 0) {
383 pr_err("failed to write perf data, error: %m\n");
388 /* increase dummy timestamp to sort later samples */