From ed7c13776e20c74486b0939a3c1de984c5efb6aa Mon Sep 17 00:00:00 2001 From: Feng Zhou Date: Wed, 11 May 2022 17:38:54 +0800 Subject: [PATCH] selftests/bpf: add test case for bpf_map_lookup_percpu_elem test_progs: Tests new ebpf helpers bpf_map_lookup_percpu_elem. Signed-off-by: Feng Zhou Link: https://lore.kernel.org/r/20220511093854.411-3-zhoufeng.zf@bytedance.com Signed-off-by: Alexei Starovoitov --- .../bpf/prog_tests/map_lookup_percpu_elem.c | 46 ++++++++++++++++++ .../bpf/progs/test_map_lookup_percpu_elem.c | 54 ++++++++++++++++++++++ 2 files changed, 100 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/map_lookup_percpu_elem.c create mode 100644 tools/testing/selftests/bpf/progs/test_map_lookup_percpu_elem.c diff --git a/tools/testing/selftests/bpf/prog_tests/map_lookup_percpu_elem.c b/tools/testing/selftests/bpf/prog_tests/map_lookup_percpu_elem.c new file mode 100644 index 0000000..58b24c2 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/map_lookup_percpu_elem.c @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2022 Bytedance + +#include + +#include "test_map_lookup_percpu_elem.skel.h" + +#define TEST_VALUE 1 + +void test_map_lookup_percpu_elem(void) +{ + struct test_map_lookup_percpu_elem *skel; + int key = 0, ret; + int nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); + int *buf; + + buf = (int *)malloc(nr_cpus*sizeof(int)); + if (!ASSERT_OK_PTR(buf, "malloc")) + return; + memset(buf, 0, nr_cpus*sizeof(int)); + buf[0] = TEST_VALUE; + + skel = test_map_lookup_percpu_elem__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_map_lookup_percpu_elem__open_and_load")) + return; + ret = test_map_lookup_percpu_elem__attach(skel); + ASSERT_OK(ret, "test_map_lookup_percpu_elem__attach"); + + ret = bpf_map_update_elem(bpf_map__fd(skel->maps.percpu_array_map), &key, buf, 0); + ASSERT_OK(ret, "percpu_array_map update"); + + ret = bpf_map_update_elem(bpf_map__fd(skel->maps.percpu_hash_map), &key, buf, 0); + ASSERT_OK(ret, "percpu_hash_map update"); + + ret = bpf_map_update_elem(bpf_map__fd(skel->maps.percpu_lru_hash_map), &key, buf, 0); + ASSERT_OK(ret, "percpu_lru_hash_map update"); + + syscall(__NR_getuid); + + ret = skel->bss->percpu_array_elem_val == TEST_VALUE && + skel->bss->percpu_hash_elem_val == TEST_VALUE && + skel->bss->percpu_lru_hash_elem_val == TEST_VALUE; + ASSERT_OK(!ret, "bpf_map_lookup_percpu_elem success"); + + test_map_lookup_percpu_elem__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/progs/test_map_lookup_percpu_elem.c b/tools/testing/selftests/bpf/progs/test_map_lookup_percpu_elem.c new file mode 100644 index 0000000..5d4ef86 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_map_lookup_percpu_elem.c @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2022 Bytedance + +#include "vmlinux.h" +#include + +int percpu_array_elem_val = 0; +int percpu_hash_elem_val = 0; +int percpu_lru_hash_elem_val = 0; + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u32); +} percpu_array_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_HASH); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u32); +} percpu_hash_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u32); +} percpu_lru_hash_map SEC(".maps"); + +SEC("tp/syscalls/sys_enter_getuid") +int sysenter_getuid(const void *ctx) +{ + __u32 key = 0; + __u32 cpu = 0; + __u32 *value; + + value = bpf_map_lookup_percpu_elem(&percpu_array_map, &key, cpu); + if (value) + percpu_array_elem_val = *value; + + value = bpf_map_lookup_percpu_elem(&percpu_hash_map, &key, cpu); + if (value) + percpu_hash_elem_val = *value; + + value = bpf_map_lookup_percpu_elem(&percpu_lru_hash_map, &key, cpu); + if (value) + percpu_lru_hash_elem_val = *value; + + return 0; +} + +char _license[] SEC("license") = "GPL"; -- 2.7.4