bpf: Reenable bpf_refcount_acquire
authorDave Marchevsky <davemarchevsky@fb.com>
Mon, 21 Aug 2023 19:33:08 +0000 (12:33 -0700)
committerAlexei Starovoitov <ast@kernel.org>
Fri, 25 Aug 2023 16:23:16 +0000 (09:23 -0700)
Now that all reported issues are fixed, bpf_refcount_acquire can be
turned back on. Also reenable all bpf_refcount-related tests which were
disabled.

This a revert of:
 * commit f3514a5d6740 ("selftests/bpf: Disable newly-added 'owner' field test until refcount re-enabled")
 * commit 7deca5eae833 ("bpf: Disable bpf_refcount_acquire kfunc calls until race conditions are fixed")

Signed-off-by: Dave Marchevsky <davemarchevsky@fb.com>
Acked-by: Yonghong Song <yonghong.song@linux.dev>
Link: https://lore.kernel.org/r/20230821193311.3290257-5-davemarchevsky@fb.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
kernel/bpf/verifier.c
tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c

index 5a61089..b875f51 100644 (file)
@@ -11217,10 +11217,7 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
                                verbose(env, "arg#%d doesn't point to a type with bpf_refcount field\n", i);
                                return -EINVAL;
                        }
-                       if (rec->refcount_off >= 0) {
-                               verbose(env, "bpf_refcount_acquire calls are disabled for now\n");
-                               return -EINVAL;
-                       }
+
                        meta->arg_btf = reg->btf;
                        meta->arg_btf_id = reg->btf_id;
                        break;
index 7423983..d6bd5e1 100644 (file)
@@ -9,12 +9,38 @@
 
 void test_refcounted_kptr(void)
 {
+       RUN_TESTS(refcounted_kptr);
 }
 
 void test_refcounted_kptr_fail(void)
 {
+       RUN_TESTS(refcounted_kptr_fail);
 }
 
 void test_refcounted_kptr_wrong_owner(void)
 {
+       LIBBPF_OPTS(bpf_test_run_opts, opts,
+                   .data_in = &pkt_v4,
+                   .data_size_in = sizeof(pkt_v4),
+                   .repeat = 1,
+       );
+       struct refcounted_kptr *skel;
+       int ret;
+
+       skel = refcounted_kptr__open_and_load();
+       if (!ASSERT_OK_PTR(skel, "refcounted_kptr__open_and_load"))
+               return;
+
+       ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_wrong_owner_remove_fail_a1), &opts);
+       ASSERT_OK(ret, "rbtree_wrong_owner_remove_fail_a1");
+       ASSERT_OK(opts.retval, "rbtree_wrong_owner_remove_fail_a1 retval");
+
+       ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_wrong_owner_remove_fail_b), &opts);
+       ASSERT_OK(ret, "rbtree_wrong_owner_remove_fail_b");
+       ASSERT_OK(opts.retval, "rbtree_wrong_owner_remove_fail_b retval");
+
+       ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_wrong_owner_remove_fail_a2), &opts);
+       ASSERT_OK(ret, "rbtree_wrong_owner_remove_fail_a2");
+       ASSERT_OK(opts.retval, "rbtree_wrong_owner_remove_fail_a2 retval");
+       refcounted_kptr__destroy(skel);
 }