bpf: Add new bpf_task_storage_delete proto with no deadlock detection
authorMartin KaFai Lau <martin.lau@kernel.org>
Tue, 25 Oct 2022 18:45:22 +0000 (11:45 -0700)
committerAlexei Starovoitov <ast@kernel.org>
Wed, 26 Oct 2022 06:11:46 +0000 (23:11 -0700)
The bpf_lsm and bpf_iter do not recur that will cause a deadlock.
The situation is similar to the bpf_pid_task_storage_delete_elem()
which is called from the syscall map_delete_elem.  It does not need
deadlock detection.  Otherwise, it will cause unnecessary failure
when calling the bpf_task_storage_delete() helper.

This patch adds bpf_task_storage_delete proto that does not do deadlock
detection.  It will be used by bpf_lsm and bpf_iter program.

Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Link: https://lore.kernel.org/r/20221025184524.3526117-8-martin.lau@linux.dev
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
include/linux/bpf.h
kernel/bpf/bpf_task_storage.c
kernel/trace/bpf_trace.c

index ef3f98a..a5dbac8 100644 (file)
@@ -2522,6 +2522,7 @@ extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto;
 extern const struct bpf_func_proto bpf_task_storage_get_recur_proto;
 extern const struct bpf_func_proto bpf_task_storage_get_proto;
 extern const struct bpf_func_proto bpf_task_storage_delete_recur_proto;
+extern const struct bpf_func_proto bpf_task_storage_delete_proto;
 extern const struct bpf_func_proto bpf_for_each_map_elem_proto;
 extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto;
 extern const struct bpf_func_proto bpf_sk_setsockopt_proto;
index f3f79b6..ba3fe72 100644 (file)
@@ -311,6 +311,25 @@ BPF_CALL_2(bpf_task_storage_delete_recur, struct bpf_map *, map, struct task_str
        return ret;
 }
 
+BPF_CALL_2(bpf_task_storage_delete, struct bpf_map *, map, struct task_struct *,
+          task)
+{
+       int ret;
+
+       WARN_ON_ONCE(!bpf_rcu_lock_held());
+       if (!task)
+               return -EINVAL;
+
+       bpf_task_storage_lock();
+       /* This helper must only be called from places where the lifetime of the task
+        * is guaranteed. Either by being refcounted or by being protected
+        * by an RCU read-side critical section.
+        */
+       ret = task_storage_delete(task, map, true);
+       bpf_task_storage_unlock();
+       return ret;
+}
+
 static int notsupp_get_next_key(struct bpf_map *map, void *key, void *next_key)
 {
        return -ENOTSUPP;
@@ -382,3 +401,12 @@ const struct bpf_func_proto bpf_task_storage_delete_recur_proto = {
        .arg2_type = ARG_PTR_TO_BTF_ID,
        .arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
 };
+
+const struct bpf_func_proto bpf_task_storage_delete_proto = {
+       .func = bpf_task_storage_delete,
+       .gpl_only = false,
+       .ret_type = RET_INTEGER,
+       .arg1_type = ARG_CONST_MAP_PTR,
+       .arg2_type = ARG_PTR_TO_BTF_ID,
+       .arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
+};
index e9759b0..eed1bd9 100644 (file)
@@ -1493,7 +1493,9 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
                        return &bpf_task_storage_get_recur_proto;
                return &bpf_task_storage_get_proto;
        case BPF_FUNC_task_storage_delete:
-               return &bpf_task_storage_delete_recur_proto;
+               if (bpf_prog_check_recur(prog))
+                       return &bpf_task_storage_delete_recur_proto;
+               return &bpf_task_storage_delete_proto;
        case BPF_FUNC_for_each_map_elem:
                return &bpf_for_each_map_elem_proto;
        case BPF_FUNC_snprintf: