selftests/bpf: Test skipping stacktrace
authorNamhyung Kim <namhyung@kernel.org>
Mon, 14 Mar 2022 18:20:42 +0000 (11:20 -0700)
committerAlexei Starovoitov <ast@kernel.org>
Mon, 21 Mar 2022 02:16:50 +0000 (19:16 -0700)
Add a test case for stacktrace with skip > 0 using a small sized
buffer.  It didn't support skipping entries greater than or equal to
the size of buffer and filled the skipped part with 0.

Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Yonghong Song <yhs@fb.com>
Link: https://lore.kernel.org/bpf/20220314182042.71025-2-namhyung@kernel.org
tools/testing/selftests/bpf/prog_tests/stacktrace_map_skip.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/stacktrace_map_skip.c [new file with mode: 0644]

diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_map_skip.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_map_skip.c
new file mode 100644 (file)
index 0000000..1932b1e
--- /dev/null
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include "stacktrace_map_skip.skel.h"
+
+#define TEST_STACK_DEPTH  2
+
+void test_stacktrace_map_skip(void)
+{
+       struct stacktrace_map_skip *skel;
+       int stackid_hmap_fd, stackmap_fd, stack_amap_fd;
+       int err, stack_trace_len;
+
+       skel = stacktrace_map_skip__open_and_load();
+       if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
+               return;
+
+       /* find map fds */
+       stackid_hmap_fd = bpf_map__fd(skel->maps.stackid_hmap);
+       if (!ASSERT_GE(stackid_hmap_fd, 0, "stackid_hmap fd"))
+               goto out;
+
+       stackmap_fd = bpf_map__fd(skel->maps.stackmap);
+       if (!ASSERT_GE(stackmap_fd, 0, "stackmap fd"))
+               goto out;
+
+       stack_amap_fd = bpf_map__fd(skel->maps.stack_amap);
+       if (!ASSERT_GE(stack_amap_fd, 0, "stack_amap fd"))
+               goto out;
+
+       skel->bss->pid = getpid();
+
+       err = stacktrace_map_skip__attach(skel);
+       if (!ASSERT_OK(err, "skel_attach"))
+               goto out;
+
+       /* give some time for bpf program run */
+       sleep(1);
+
+       /* disable stack trace collection */
+       skel->bss->control = 1;
+
+       /* for every element in stackid_hmap, we can find a corresponding one
+        * in stackmap, and vise versa.
+        */
+       err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
+       if (!ASSERT_OK(err, "compare_map_keys stackid_hmap vs. stackmap"))
+               goto out;
+
+       err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
+       if (!ASSERT_OK(err, "compare_map_keys stackmap vs. stackid_hmap"))
+               goto out;
+
+       stack_trace_len = TEST_STACK_DEPTH * sizeof(__u64);
+       err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
+       if (!ASSERT_OK(err, "compare_stack_ips stackmap vs. stack_amap"))
+               goto out;
+
+       if (!ASSERT_EQ(skel->bss->failed, 0, "skip_failed"))
+               goto out;
+
+out:
+       stacktrace_map_skip__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/progs/stacktrace_map_skip.c b/tools/testing/selftests/bpf/progs/stacktrace_map_skip.c
new file mode 100644 (file)
index 0000000..2eb297d
--- /dev/null
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+
+#define TEST_STACK_DEPTH       2
+#define TEST_MAX_ENTRIES       16384
+
+typedef __u64 stack_trace_t[TEST_STACK_DEPTH];
+
+struct {
+       __uint(type, BPF_MAP_TYPE_STACK_TRACE);
+       __uint(max_entries, TEST_MAX_ENTRIES);
+       __type(key, __u32);
+       __type(value, stack_trace_t);
+} stackmap SEC(".maps");
+
+struct {
+       __uint(type, BPF_MAP_TYPE_HASH);
+       __uint(max_entries, TEST_MAX_ENTRIES);
+       __type(key, __u32);
+       __type(value, __u32);
+} stackid_hmap SEC(".maps");
+
+struct {
+       __uint(type, BPF_MAP_TYPE_ARRAY);
+       __uint(max_entries, TEST_MAX_ENTRIES);
+       __type(key, __u32);
+       __type(value, stack_trace_t);
+} stack_amap SEC(".maps");
+
+int pid = 0;
+int control = 0;
+int failed = 0;
+
+SEC("tracepoint/sched/sched_switch")
+int oncpu(struct trace_event_raw_sched_switch *ctx)
+{
+       __u32 max_len = TEST_STACK_DEPTH * sizeof(__u64);
+       __u32 key = 0, val = 0;
+       __u64 *stack_p;
+
+       if (pid != (bpf_get_current_pid_tgid() >> 32))
+               return 0;
+
+       if (control)
+               return 0;
+
+       /* it should allow skipping whole buffer size entries */
+       key = bpf_get_stackid(ctx, &stackmap, TEST_STACK_DEPTH);
+       if ((int)key >= 0) {
+               /* The size of stackmap and stack_amap should be the same */
+               bpf_map_update_elem(&stackid_hmap, &key, &val, 0);
+               stack_p = bpf_map_lookup_elem(&stack_amap, &key);
+               if (stack_p) {
+                       bpf_get_stack(ctx, stack_p, max_len, TEST_STACK_DEPTH);
+                       /* it wrongly skipped all the entries and filled zero */
+                       if (stack_p[0] == 0)
+                               failed = 1;
+               }
+       } else {
+               /* old kernel doesn't support skipping that many entries */
+               failed = 2;
+       }
+
+       return 0;
+}
+
+char _license[] SEC("license") = "GPL";