selftests/bpf: Add rbtree selftests
authorDave Marchevsky <davemarchevsky@fb.com>
Tue, 14 Feb 2023 00:40:16 +0000 (16:40 -0800)
committerAlexei Starovoitov <ast@kernel.org>
Tue, 14 Feb 2023 03:40:53 +0000 (19:40 -0800)
This patch adds selftests exercising the logic changed/added in the
previous patches in the series. A variety of successful and unsuccessful
rbtree usages are validated:

Success:
  * Add some nodes, let map_value bpf_rbtree_root destructor clean them
    up
  * Add some nodes, remove one using the non-owning ref leftover by
    successful rbtree_add() call
  * Add some nodes, remove one using the non-owning ref returned by
    rbtree_first() call

Failure:
  * BTF where bpf_rb_root owns bpf_list_node should fail to load
  * BTF where node of type X is added to tree containing nodes of type Y
    should fail to load
  * No calling rbtree api functions in 'less' callback for rbtree_add
  * No releasing lock in 'less' callback for rbtree_add
  * No removing a node which hasn't been added to any tree
  * No adding a node which has already been added to a tree
  * No escaping of non-owning references past their lock's
    critical section
  * No escaping of non-owning references past other invalidation points
    (rbtree_remove)

These tests mostly focus on rbtree-specific additions, but some of the
failure cases revalidate scenarios common to both linked_list and rbtree
which are covered in the former's tests. Better to be a bit redundant in
case linked_list and rbtree semantics deviate over time.

Signed-off-by: Dave Marchevsky <davemarchevsky@fb.com>
Link: https://lore.kernel.org/r/20230214004017.2534011-8-davemarchevsky@fb.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
tools/testing/selftests/bpf/prog_tests/rbtree.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/rbtree.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/rbtree_btf_fail__add_wrong_type.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/rbtree_btf_fail__wrong_node_type.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/rbtree_fail.c [new file with mode: 0644]

diff --git a/tools/testing/selftests/bpf/prog_tests/rbtree.c b/tools/testing/selftests/bpf/prog_tests/rbtree.c
new file mode 100644 (file)
index 0000000..156fa95
--- /dev/null
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <test_progs.h>
+#include <network_helpers.h>
+
+#include "rbtree.skel.h"
+#include "rbtree_fail.skel.h"
+#include "rbtree_btf_fail__wrong_node_type.skel.h"
+#include "rbtree_btf_fail__add_wrong_type.skel.h"
+
+static void test_rbtree_add_nodes(void)
+{
+       LIBBPF_OPTS(bpf_test_run_opts, opts,
+                   .data_in = &pkt_v4,
+                   .data_size_in = sizeof(pkt_v4),
+                   .repeat = 1,
+       );
+       struct rbtree *skel;
+       int ret;
+
+       skel = rbtree__open_and_load();
+       if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load"))
+               return;
+
+       ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_add_nodes), &opts);
+       ASSERT_OK(ret, "rbtree_add_nodes run");
+       ASSERT_OK(opts.retval, "rbtree_add_nodes retval");
+       ASSERT_EQ(skel->data->less_callback_ran, 1, "rbtree_add_nodes less_callback_ran");
+
+       rbtree__destroy(skel);
+}
+
+static void test_rbtree_add_and_remove(void)
+{
+       LIBBPF_OPTS(bpf_test_run_opts, opts,
+                   .data_in = &pkt_v4,
+                   .data_size_in = sizeof(pkt_v4),
+                   .repeat = 1,
+       );
+       struct rbtree *skel;
+       int ret;
+
+       skel = rbtree__open_and_load();
+       if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load"))
+               return;
+
+       ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_add_and_remove), &opts);
+       ASSERT_OK(ret, "rbtree_add_and_remove");
+       ASSERT_OK(opts.retval, "rbtree_add_and_remove retval");
+       ASSERT_EQ(skel->data->removed_key, 5, "rbtree_add_and_remove first removed key");
+
+       rbtree__destroy(skel);
+}
+
+static void test_rbtree_first_and_remove(void)
+{
+       LIBBPF_OPTS(bpf_test_run_opts, opts,
+                   .data_in = &pkt_v4,
+                   .data_size_in = sizeof(pkt_v4),
+                   .repeat = 1,
+       );
+       struct rbtree *skel;
+       int ret;
+
+       skel = rbtree__open_and_load();
+       if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load"))
+               return;
+
+       ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_first_and_remove), &opts);
+       ASSERT_OK(ret, "rbtree_first_and_remove");
+       ASSERT_OK(opts.retval, "rbtree_first_and_remove retval");
+       ASSERT_EQ(skel->data->first_data[0], 2, "rbtree_first_and_remove first rbtree_first()");
+       ASSERT_EQ(skel->data->removed_key, 1, "rbtree_first_and_remove first removed key");
+       ASSERT_EQ(skel->data->first_data[1], 4, "rbtree_first_and_remove second rbtree_first()");
+
+       rbtree__destroy(skel);
+}
+
+void test_rbtree_success(void)
+{
+       if (test__start_subtest("rbtree_add_nodes"))
+               test_rbtree_add_nodes();
+       if (test__start_subtest("rbtree_add_and_remove"))
+               test_rbtree_add_and_remove();
+       if (test__start_subtest("rbtree_first_and_remove"))
+               test_rbtree_first_and_remove();
+}
+
+#define BTF_FAIL_TEST(suffix)                                                                  \
+void test_rbtree_btf_fail__##suffix(void)                                                      \
+{                                                                                              \
+       struct rbtree_btf_fail__##suffix *skel;                                                 \
+                                                                                               \
+       skel = rbtree_btf_fail__##suffix##__open_and_load();                                    \
+       if (!ASSERT_ERR_PTR(skel,                                                               \
+                           "rbtree_btf_fail__" #suffix "__open_and_load unexpected success"))  \
+               rbtree_btf_fail__##suffix##__destroy(skel);                                     \
+}
+
+#define RUN_BTF_FAIL_TEST(suffix)                              \
+       if (test__start_subtest("rbtree_btf_fail__" #suffix))   \
+               test_rbtree_btf_fail__##suffix();
+
+BTF_FAIL_TEST(wrong_node_type);
+BTF_FAIL_TEST(add_wrong_type);
+
+void test_rbtree_btf_fail(void)
+{
+       RUN_BTF_FAIL_TEST(wrong_node_type);
+       RUN_BTF_FAIL_TEST(add_wrong_type);
+}
+
+void test_rbtree_fail(void)
+{
+       RUN_TESTS(rbtree_fail);
+}
diff --git a/tools/testing/selftests/bpf/progs/rbtree.c b/tools/testing/selftests/bpf/progs/rbtree.c
new file mode 100644 (file)
index 0000000..e5db1a4
--- /dev/null
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_experimental.h"
+
+struct node_data {
+       long key;
+       long data;
+       struct bpf_rb_node node;
+};
+
+long less_callback_ran = -1;
+long removed_key = -1;
+long first_data[2] = {-1, -1};
+
+#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
+private(A) struct bpf_spin_lock glock;
+private(A) struct bpf_rb_root groot __contains(node_data, node);
+
+static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
+{
+       struct node_data *node_a;
+       struct node_data *node_b;
+
+       node_a = container_of(a, struct node_data, node);
+       node_b = container_of(b, struct node_data, node);
+       less_callback_ran = 1;
+
+       return node_a->key < node_b->key;
+}
+
+static long __add_three(struct bpf_rb_root *root, struct bpf_spin_lock *lock)
+{
+       struct node_data *n, *m;
+
+       n = bpf_obj_new(typeof(*n));
+       if (!n)
+               return 1;
+       n->key = 5;
+
+       m = bpf_obj_new(typeof(*m));
+       if (!m) {
+               bpf_obj_drop(n);
+               return 2;
+       }
+       m->key = 1;
+
+       bpf_spin_lock(&glock);
+       bpf_rbtree_add(&groot, &n->node, less);
+       bpf_rbtree_add(&groot, &m->node, less);
+       bpf_spin_unlock(&glock);
+
+       n = bpf_obj_new(typeof(*n));
+       if (!n)
+               return 3;
+       n->key = 3;
+
+       bpf_spin_lock(&glock);
+       bpf_rbtree_add(&groot, &n->node, less);
+       bpf_spin_unlock(&glock);
+       return 0;
+}
+
+SEC("tc")
+long rbtree_add_nodes(void *ctx)
+{
+       return __add_three(&groot, &glock);
+}
+
+SEC("tc")
+long rbtree_add_and_remove(void *ctx)
+{
+       struct bpf_rb_node *res = NULL;
+       struct node_data *n, *m;
+
+       n = bpf_obj_new(typeof(*n));
+       if (!n)
+               goto err_out;
+       n->key = 5;
+
+       m = bpf_obj_new(typeof(*m));
+       if (!m)
+               goto err_out;
+       m->key = 3;
+
+       bpf_spin_lock(&glock);
+       bpf_rbtree_add(&groot, &n->node, less);
+       bpf_rbtree_add(&groot, &m->node, less);
+       res = bpf_rbtree_remove(&groot, &n->node);
+       bpf_spin_unlock(&glock);
+
+       n = container_of(res, struct node_data, node);
+       removed_key = n->key;
+
+       bpf_obj_drop(n);
+
+       return 0;
+err_out:
+       if (n)
+               bpf_obj_drop(n);
+       if (m)
+               bpf_obj_drop(m);
+       return 1;
+}
+
+SEC("tc")
+long rbtree_first_and_remove(void *ctx)
+{
+       struct bpf_rb_node *res = NULL;
+       struct node_data *n, *m, *o;
+
+       n = bpf_obj_new(typeof(*n));
+       if (!n)
+               return 1;
+       n->key = 3;
+       n->data = 4;
+
+       m = bpf_obj_new(typeof(*m));
+       if (!m)
+               goto err_out;
+       m->key = 5;
+       m->data = 6;
+
+       o = bpf_obj_new(typeof(*o));
+       if (!o)
+               goto err_out;
+       o->key = 1;
+       o->data = 2;
+
+       bpf_spin_lock(&glock);
+       bpf_rbtree_add(&groot, &n->node, less);
+       bpf_rbtree_add(&groot, &m->node, less);
+       bpf_rbtree_add(&groot, &o->node, less);
+
+       res = bpf_rbtree_first(&groot);
+       if (!res) {
+               bpf_spin_unlock(&glock);
+               return 2;
+       }
+
+       o = container_of(res, struct node_data, node);
+       first_data[0] = o->data;
+
+       res = bpf_rbtree_remove(&groot, &o->node);
+       bpf_spin_unlock(&glock);
+
+       o = container_of(res, struct node_data, node);
+       removed_key = o->key;
+
+       bpf_obj_drop(o);
+
+       bpf_spin_lock(&glock);
+       res = bpf_rbtree_first(&groot);
+       if (!res) {
+               bpf_spin_unlock(&glock);
+               return 3;
+       }
+
+       o = container_of(res, struct node_data, node);
+       first_data[1] = o->data;
+       bpf_spin_unlock(&glock);
+
+       return 0;
+err_out:
+       if (n)
+               bpf_obj_drop(n);
+       if (m)
+               bpf_obj_drop(m);
+       return 1;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/rbtree_btf_fail__add_wrong_type.c b/tools/testing/selftests/bpf/progs/rbtree_btf_fail__add_wrong_type.c
new file mode 100644 (file)
index 0000000..60079b2
--- /dev/null
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_experimental.h"
+
+struct node_data {
+       int key;
+       int data;
+       struct bpf_rb_node node;
+};
+
+struct node_data2 {
+       int key;
+       struct bpf_rb_node node;
+       int data;
+};
+
+static bool less2(struct bpf_rb_node *a, const struct bpf_rb_node *b)
+{
+       struct node_data2 *node_a;
+       struct node_data2 *node_b;
+
+       node_a = container_of(a, struct node_data2, node);
+       node_b = container_of(b, struct node_data2, node);
+
+       return node_a->key < node_b->key;
+}
+
+#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
+private(A) struct bpf_spin_lock glock;
+private(A) struct bpf_rb_root groot __contains(node_data, node);
+
+SEC("tc")
+long rbtree_api_add__add_wrong_type(void *ctx)
+{
+       struct node_data2 *n;
+
+       n = bpf_obj_new(typeof(*n));
+       if (!n)
+               return 1;
+
+       bpf_spin_lock(&glock);
+       bpf_rbtree_add(&groot, &n->node, less2);
+       bpf_spin_unlock(&glock);
+       return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/rbtree_btf_fail__wrong_node_type.c b/tools/testing/selftests/bpf/progs/rbtree_btf_fail__wrong_node_type.c
new file mode 100644 (file)
index 0000000..340f97d
--- /dev/null
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_experimental.h"
+
+/* BTF load should fail as bpf_rb_root __contains this type and points to
+ * 'node', but 'node' is not a bpf_rb_node
+ */
+struct node_data {
+       int key;
+       int data;
+       struct bpf_list_node node;
+};
+
+static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
+{
+       struct node_data *node_a;
+       struct node_data *node_b;
+
+       node_a = container_of(a, struct node_data, node);
+       node_b = container_of(b, struct node_data, node);
+
+       return node_a->key < node_b->key;
+}
+
+#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
+private(A) struct bpf_spin_lock glock;
+private(A) struct bpf_rb_root groot __contains(node_data, node);
+
+SEC("tc")
+long rbtree_api_add__wrong_node_type(void *ctx)
+{
+       struct node_data *n;
+
+       n = bpf_obj_new(typeof(*n));
+       if (!n)
+               return 1;
+
+       bpf_spin_lock(&glock);
+       bpf_rbtree_first(&groot);
+       bpf_spin_unlock(&glock);
+       return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/rbtree_fail.c b/tools/testing/selftests/bpf/progs/rbtree_fail.c
new file mode 100644 (file)
index 0000000..bf3cba1
--- /dev/null
@@ -0,0 +1,322 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_experimental.h"
+#include "bpf_misc.h"
+
+struct node_data {
+       long key;
+       long data;
+       struct bpf_rb_node node;
+};
+
+#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
+private(A) struct bpf_spin_lock glock;
+private(A) struct bpf_rb_root groot __contains(node_data, node);
+private(A) struct bpf_rb_root groot2 __contains(node_data, node);
+
+static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
+{
+       struct node_data *node_a;
+       struct node_data *node_b;
+
+       node_a = container_of(a, struct node_data, node);
+       node_b = container_of(b, struct node_data, node);
+
+       return node_a->key < node_b->key;
+}
+
+SEC("?tc")
+__failure __msg("bpf_spin_lock at off=16 must be held for bpf_rb_root")
+long rbtree_api_nolock_add(void *ctx)
+{
+       struct node_data *n;
+
+       n = bpf_obj_new(typeof(*n));
+       if (!n)
+               return 1;
+
+       bpf_rbtree_add(&groot, &n->node, less);
+       return 0;
+}
+
+SEC("?tc")
+__failure __msg("bpf_spin_lock at off=16 must be held for bpf_rb_root")
+long rbtree_api_nolock_remove(void *ctx)
+{
+       struct node_data *n;
+
+       n = bpf_obj_new(typeof(*n));
+       if (!n)
+               return 1;
+
+       bpf_spin_lock(&glock);
+       bpf_rbtree_add(&groot, &n->node, less);
+       bpf_spin_unlock(&glock);
+
+       bpf_rbtree_remove(&groot, &n->node);
+       return 0;
+}
+
+SEC("?tc")
+__failure __msg("bpf_spin_lock at off=16 must be held for bpf_rb_root")
+long rbtree_api_nolock_first(void *ctx)
+{
+       bpf_rbtree_first(&groot);
+       return 0;
+}
+
+SEC("?tc")
+__failure __msg("rbtree_remove node input must be non-owning ref")
+long rbtree_api_remove_unadded_node(void *ctx)
+{
+       struct node_data *n, *m;
+       struct bpf_rb_node *res;
+
+       n = bpf_obj_new(typeof(*n));
+       if (!n)
+               return 1;
+
+       m = bpf_obj_new(typeof(*m));
+       if (!m) {
+               bpf_obj_drop(n);
+               return 1;
+       }
+
+       bpf_spin_lock(&glock);
+       bpf_rbtree_add(&groot, &n->node, less);
+
+       /* This remove should pass verifier */
+       res = bpf_rbtree_remove(&groot, &n->node);
+       n = container_of(res, struct node_data, node);
+
+       /* This remove shouldn't, m isn't in an rbtree */
+       res = bpf_rbtree_remove(&groot, &m->node);
+       m = container_of(res, struct node_data, node);
+       bpf_spin_unlock(&glock);
+
+       if (n)
+               bpf_obj_drop(n);
+       if (m)
+               bpf_obj_drop(m);
+       return 0;
+}
+
+SEC("?tc")
+__failure __msg("Unreleased reference id=2 alloc_insn=11")
+long rbtree_api_remove_no_drop(void *ctx)
+{
+       struct bpf_rb_node *res;
+       struct node_data *n;
+
+       bpf_spin_lock(&glock);
+       res = bpf_rbtree_first(&groot);
+       if (!res)
+               goto unlock_err;
+
+       res = bpf_rbtree_remove(&groot, res);
+
+       n = container_of(res, struct node_data, node);
+       bpf_spin_unlock(&glock);
+
+       /* bpf_obj_drop(n) is missing here */
+       return 0;
+
+unlock_err:
+       bpf_spin_unlock(&glock);
+       return 1;
+}
+
+SEC("?tc")
+__failure __msg("arg#1 expected pointer to allocated object")
+long rbtree_api_add_to_multiple_trees(void *ctx)
+{
+       struct node_data *n;
+
+       n = bpf_obj_new(typeof(*n));
+       if (!n)
+               return 1;
+
+       bpf_spin_lock(&glock);
+       bpf_rbtree_add(&groot, &n->node, less);
+
+       /* This add should fail since n already in groot's tree */
+       bpf_rbtree_add(&groot2, &n->node, less);
+       bpf_spin_unlock(&glock);
+       return 0;
+}
+
+SEC("?tc")
+__failure __msg("rbtree_remove node input must be non-owning ref")
+long rbtree_api_add_release_unlock_escape(void *ctx)
+{
+       struct node_data *n;
+
+       n = bpf_obj_new(typeof(*n));
+       if (!n)
+               return 1;
+
+       bpf_spin_lock(&glock);
+       bpf_rbtree_add(&groot, &n->node, less);
+       bpf_spin_unlock(&glock);
+
+       bpf_spin_lock(&glock);
+       /* After add() in previous critical section, n should be
+        * release_on_unlock and released after previous spin_unlock,
+        * so should not be possible to use it here
+        */
+       bpf_rbtree_remove(&groot, &n->node);
+       bpf_spin_unlock(&glock);
+       return 0;
+}
+
+SEC("?tc")
+__failure __msg("rbtree_remove node input must be non-owning ref")
+long rbtree_api_release_aliasing(void *ctx)
+{
+       struct node_data *n, *m, *o;
+       struct bpf_rb_node *res;
+
+       n = bpf_obj_new(typeof(*n));
+       if (!n)
+               return 1;
+
+       bpf_spin_lock(&glock);
+       bpf_rbtree_add(&groot, &n->node, less);
+       bpf_spin_unlock(&glock);
+
+       bpf_spin_lock(&glock);
+
+       /* m and o point to the same node,
+        * but verifier doesn't know this
+        */
+       res = bpf_rbtree_first(&groot);
+       if (!res)
+               return 1;
+       o = container_of(res, struct node_data, node);
+
+       res = bpf_rbtree_first(&groot);
+       if (!res)
+               return 1;
+       m = container_of(res, struct node_data, node);
+
+       bpf_rbtree_remove(&groot, &m->node);
+       /* This second remove shouldn't be possible. Retval of previous
+        * remove returns owning reference to m, which is the same
+        * node o's non-owning ref is pointing at
+        *
+        * In order to preserve property
+        *   * owning ref must not be in rbtree
+        *   * non-owning ref must be in rbtree
+        *
+        * o's ref must be invalidated after previous remove. Otherwise
+        * we'd have non-owning ref to node that isn't in rbtree, and
+        * verifier wouldn't be able to use type system to prevent remove
+        * of ref that already isn't in any tree. Would have to do runtime
+        * checks in that case.
+        */
+       bpf_rbtree_remove(&groot, &o->node);
+
+       bpf_spin_unlock(&glock);
+       return 0;
+}
+
+SEC("?tc")
+__failure __msg("rbtree_remove node input must be non-owning ref")
+long rbtree_api_first_release_unlock_escape(void *ctx)
+{
+       struct bpf_rb_node *res;
+       struct node_data *n;
+
+       bpf_spin_lock(&glock);
+       res = bpf_rbtree_first(&groot);
+       if (res)
+               n = container_of(res, struct node_data, node);
+       bpf_spin_unlock(&glock);
+
+       bpf_spin_lock(&glock);
+       /* After first() in previous critical section, n should be
+        * release_on_unlock and released after previous spin_unlock,
+        * so should not be possible to use it here
+        */
+       bpf_rbtree_remove(&groot, &n->node);
+       bpf_spin_unlock(&glock);
+       return 0;
+}
+
+static bool less__bad_fn_call_add(struct bpf_rb_node *a, const struct bpf_rb_node *b)
+{
+       struct node_data *node_a;
+       struct node_data *node_b;
+
+       node_a = container_of(a, struct node_data, node);
+       node_b = container_of(b, struct node_data, node);
+       bpf_rbtree_add(&groot, &node_a->node, less);
+
+       return node_a->key < node_b->key;
+}
+
+static bool less__bad_fn_call_remove(struct bpf_rb_node *a, const struct bpf_rb_node *b)
+{
+       struct node_data *node_a;
+       struct node_data *node_b;
+
+       node_a = container_of(a, struct node_data, node);
+       node_b = container_of(b, struct node_data, node);
+       bpf_rbtree_remove(&groot, &node_a->node);
+
+       return node_a->key < node_b->key;
+}
+
+static bool less__bad_fn_call_first_unlock_after(struct bpf_rb_node *a, const struct bpf_rb_node *b)
+{
+       struct node_data *node_a;
+       struct node_data *node_b;
+
+       node_a = container_of(a, struct node_data, node);
+       node_b = container_of(b, struct node_data, node);
+       bpf_rbtree_first(&groot);
+       bpf_spin_unlock(&glock);
+
+       return node_a->key < node_b->key;
+}
+
+static __always_inline
+long add_with_cb(bool (cb)(struct bpf_rb_node *a, const struct bpf_rb_node *b))
+{
+       struct node_data *n;
+
+       n = bpf_obj_new(typeof(*n));
+       if (!n)
+               return 1;
+
+       bpf_spin_lock(&glock);
+       bpf_rbtree_add(&groot, &n->node, cb);
+       bpf_spin_unlock(&glock);
+       return 0;
+}
+
+SEC("?tc")
+__failure __msg("arg#1 expected pointer to allocated object")
+long rbtree_api_add_bad_cb_bad_fn_call_add(void *ctx)
+{
+       return add_with_cb(less__bad_fn_call_add);
+}
+
+SEC("?tc")
+__failure __msg("rbtree_remove not allowed in rbtree cb")
+long rbtree_api_add_bad_cb_bad_fn_call_remove(void *ctx)
+{
+       return add_with_cb(less__bad_fn_call_remove);
+}
+
+SEC("?tc")
+__failure __msg("can't spin_{lock,unlock} in rbtree cb")
+long rbtree_api_add_bad_cb_bad_fn_call_first_unlock_after(void *ctx)
+{
+       return add_with_cb(less__bad_fn_call_first_unlock_after);
+}
+
+char _license[] SEC("license") = "GPL";