bpf: Add new bpf_task_storage_get proto with no deadlock detection
authorMartin KaFai Lau <martin.lau@kernel.org>
Tue, 25 Oct 2022 18:45:20 +0000 (11:45 -0700)
committerAlexei Starovoitov <ast@kernel.org>
Wed, 26 Oct 2022 06:11:46 +0000 (23:11 -0700)
The bpf_lsm and bpf_iter do not recur that will cause a deadlock.
The situation is similar to the bpf_pid_task_storage_lookup_elem()
which is called from the syscall map_lookup_elem.  It does not need
deadlock detection.  Otherwise, it will cause unnecessary failure
when calling the bpf_task_storage_get() helper.

This patch adds bpf_task_storage_get proto that does not do deadlock
detection.  It will be used by bpf_lsm and bpf_iter programs.

Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Link: https://lore.kernel.org/r/20221025184524.3526117-6-martin.lau@linux.dev
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
include/linux/bpf.h
kernel/bpf/bpf_task_storage.c
kernel/trace/bpf_trace.c

index b04fe3f..ef3f98a 100644 (file)
@@ -2520,6 +2520,7 @@ extern const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto;
 extern const struct bpf_func_proto bpf_sock_from_file_proto;
 extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto;
 extern const struct bpf_func_proto bpf_task_storage_get_recur_proto;
+extern const struct bpf_func_proto bpf_task_storage_get_proto;
 extern const struct bpf_func_proto bpf_task_storage_delete_recur_proto;
 extern const struct bpf_func_proto bpf_for_each_map_elem_proto;
 extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto;
index bc52bc8..c3a841b 100644 (file)
@@ -269,6 +269,23 @@ BPF_CALL_5(bpf_task_storage_get_recur, struct bpf_map *, map, struct task_struct
        return (unsigned long)data;
 }
 
+/* *gfp_flags* is a hidden argument provided by the verifier */
+BPF_CALL_5(bpf_task_storage_get, struct bpf_map *, map, struct task_struct *,
+          task, void *, value, u64, flags, gfp_t, gfp_flags)
+{
+       void *data;
+
+       WARN_ON_ONCE(!bpf_rcu_lock_held());
+       if (flags & ~BPF_LOCAL_STORAGE_GET_F_CREATE || !task)
+               return (unsigned long)NULL;
+
+       bpf_task_storage_lock();
+       data = __bpf_task_storage_get(map, task, value, flags,
+                                     gfp_flags, true);
+       bpf_task_storage_unlock();
+       return (unsigned long)data;
+}
+
 BPF_CALL_2(bpf_task_storage_delete_recur, struct bpf_map *, map, struct task_struct *,
           task)
 {
@@ -342,6 +359,17 @@ const struct bpf_func_proto bpf_task_storage_get_recur_proto = {
        .arg4_type = ARG_ANYTHING,
 };
 
+const struct bpf_func_proto bpf_task_storage_get_proto = {
+       .func = bpf_task_storage_get,
+       .gpl_only = false,
+       .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
+       .arg1_type = ARG_CONST_MAP_PTR,
+       .arg2_type = ARG_PTR_TO_BTF_ID,
+       .arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
+       .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
+       .arg4_type = ARG_ANYTHING,
+};
+
 const struct bpf_func_proto bpf_task_storage_delete_recur_proto = {
        .func = bpf_task_storage_delete_recur,
        .gpl_only = false,
index 83b9b9a..e9759b0 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/types.h>
 #include <linux/slab.h>
 #include <linux/bpf.h>
+#include <linux/bpf_verifier.h>
 #include <linux/bpf_perf_event.h>
 #include <linux/btf.h>
 #include <linux/filter.h>
@@ -1488,7 +1489,9 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
        case BPF_FUNC_this_cpu_ptr:
                return &bpf_this_cpu_ptr_proto;
        case BPF_FUNC_task_storage_get:
-               return &bpf_task_storage_get_recur_proto;
+               if (bpf_prog_check_recur(prog))
+                       return &bpf_task_storage_get_recur_proto;
+               return &bpf_task_storage_get_proto;
        case BPF_FUNC_task_storage_delete:
                return &bpf_task_storage_delete_recur_proto;
        case BPF_FUNC_for_each_map_elem: