selftests/bpf: extend existing map resize tests for per-cpu use case
authorAndrii Nakryiko <andrii@kernel.org>
Tue, 11 Jul 2023 23:24:00 +0000 (16:24 -0700)
committerAlexei Starovoitov <ast@kernel.org>
Wed, 12 Jul 2023 14:57:18 +0000 (07:57 -0700)
Add a per-cpu array resizing use case and demonstrate how
bpf_get_smp_processor_id() can be used to directly access proper data
with no extra checks.

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20230711232400.1658562-2-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
tools/testing/selftests/bpf/prog_tests/global_map_resize.c
tools/testing/selftests/bpf/progs/test_global_map_resize.c

index fd41425..56b5bae 100644 (file)
@@ -22,7 +22,7 @@ static void global_map_resize_bss_subtest(void)
        struct test_global_map_resize *skel;
        struct bpf_map *map;
        const __u32 desired_sz = sizeof(skel->bss->sum) + sysconf(_SC_PAGE_SIZE) * 2;
-       size_t array_len, actual_sz;
+       size_t array_len, actual_sz, new_sz;
 
        skel = test_global_map_resize__open();
        if (!ASSERT_OK_PTR(skel, "test_global_map_resize__open"))
@@ -42,6 +42,10 @@ static void global_map_resize_bss_subtest(void)
        if (!ASSERT_EQ(bpf_map__value_size(map), desired_sz, "resize"))
                goto teardown;
 
+       new_sz = sizeof(skel->data_percpu_arr->percpu_arr[0]) * libbpf_num_possible_cpus();
+       err = bpf_map__set_value_size(skel->maps.data_percpu_arr, new_sz);
+       ASSERT_OK(err, "percpu_arr_resize");
+
        /* set the expected number of elements based on the resized array */
        array_len = (desired_sz - sizeof(skel->bss->sum)) / sizeof(skel->bss->array[0]);
        if (!ASSERT_GT(array_len, 1, "array_len"))
@@ -84,11 +88,11 @@ teardown:
 
 static void global_map_resize_data_subtest(void)
 {
-       int err;
        struct test_global_map_resize *skel;
        struct bpf_map *map;
        const __u32 desired_sz = sysconf(_SC_PAGE_SIZE) * 2;
-       size_t array_len, actual_sz;
+       size_t array_len, actual_sz, new_sz;
+       int err;
 
        skel = test_global_map_resize__open();
        if (!ASSERT_OK_PTR(skel, "test_global_map_resize__open"))
@@ -108,6 +112,10 @@ static void global_map_resize_data_subtest(void)
        if (!ASSERT_EQ(bpf_map__value_size(map), desired_sz, "resize"))
                goto teardown;
 
+       new_sz = sizeof(skel->data_percpu_arr->percpu_arr[0]) * libbpf_num_possible_cpus();
+       err = bpf_map__set_value_size(skel->maps.data_percpu_arr, new_sz);
+       ASSERT_OK(err, "percpu_arr_resize");
+
        /* set the expected number of elements based on the resized array */
        array_len = (desired_sz - sizeof(skel->bss->sum)) / sizeof(skel->data_custom->my_array[0]);
        if (!ASSERT_GT(array_len, 1, "array_len"))
index 2588f23..1fbb73d 100644 (file)
@@ -29,13 +29,16 @@ int my_int SEC(".data.non_array");
 int my_array_first[1] SEC(".data.array_not_last");
 int my_int_last SEC(".data.array_not_last");
 
+int percpu_arr[1] SEC(".data.percpu_arr");
+
 SEC("tp/syscalls/sys_enter_getpid")
 int bss_array_sum(void *ctx)
 {
        if (pid != (bpf_get_current_pid_tgid() >> 32))
                return 0;
 
-       sum = 0;
+       /* this will be zero, we just rely on verifier not rejecting this */
+       sum = percpu_arr[bpf_get_smp_processor_id()];
 
        for (size_t i = 0; i < bss_array_len; ++i)
                sum += array[i];
@@ -49,7 +52,8 @@ int data_array_sum(void *ctx)
        if (pid != (bpf_get_current_pid_tgid() >> 32))
                return 0;
 
-       sum = 0;
+       /* this will be zero, we just rely on verifier not rejecting this */
+       sum = percpu_arr[bpf_get_smp_processor_id()];
 
        for (size_t i = 0; i < data_array_len; ++i)
                sum += my_array[i];