perf env: Avoid recursively taking env->bpf_progs.lock
authorIan Rogers <irogers@google.com>
Thu, 7 Dec 2023 01:46:55 +0000 (17:46 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 25 Jan 2024 23:35:54 +0000 (15:35 -0800)
[ Upstream commit 9c51f8788b5d4e9f46afbcf563255cfd355690b3 ]

Add variants of perf_env__insert_bpf_prog_info(), perf_env__insert_btf()
and perf_env__find_btf prefixed with __ to indicate the
env->bpf_progs.lock is assumed held.

Call these variants when the lock is held to avoid recursively taking it
and potentially having a thread deadlock with itself.

Fixes: f8dfeae009effc0b ("perf bpf: Show more BPF program info in print_bpf_prog_info()")
Signed-off-by: Ian Rogers <irogers@google.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Acked-by: Song Liu <song@kernel.org>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Huacai Chen <chenhuacai@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: K Prateek Nayak <kprateek.nayak@amd.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Ming Wang <wangming01@loongson.cn>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ravi Bangoria <ravi.bangoria@amd.com>
Link: https://lore.kernel.org/r/20231207014655.1252484-1-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
tools/perf/util/bpf-event.c
tools/perf/util/bpf-event.h
tools/perf/util/env.c
tools/perf/util/env.h
tools/perf/util/header.c

index 38fcf3b..b00b5a2 100644 (file)
@@ -542,9 +542,9 @@ int evlist__add_bpf_sb_event(struct evlist *evlist, struct perf_env *env)
        return evlist__add_sb_event(evlist, &attr, bpf_event__sb_cb, env);
 }
 
-void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
-                                   struct perf_env *env,
-                                   FILE *fp)
+void __bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
+                                     struct perf_env *env,
+                                     FILE *fp)
 {
        __u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens);
        __u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms);
@@ -560,7 +560,7 @@ void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
        if (info->btf_id) {
                struct btf_node *node;
 
-               node = perf_env__find_btf(env, info->btf_id);
+               node = __perf_env__find_btf(env, info->btf_id);
                if (node)
                        btf = btf__new((__u8 *)(node->data),
                                       node->data_size);
index 1bcbd4f..e2f0420 100644 (file)
@@ -33,9 +33,9 @@ struct btf_node {
 int machine__process_bpf(struct machine *machine, union perf_event *event,
                         struct perf_sample *sample);
 int evlist__add_bpf_sb_event(struct evlist *evlist, struct perf_env *env);
-void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
-                                   struct perf_env *env,
-                                   FILE *fp);
+void __bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
+                                     struct perf_env *env,
+                                     FILE *fp);
 #else
 static inline int machine__process_bpf(struct machine *machine __maybe_unused,
                                       union perf_event *event __maybe_unused,
@@ -50,9 +50,9 @@ static inline int evlist__add_bpf_sb_event(struct evlist *evlist __maybe_unused,
        return 0;
 }
 
-static inline void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info __maybe_unused,
-                                                 struct perf_env *env __maybe_unused,
-                                                 FILE *fp __maybe_unused)
+static inline void __bpf_event__print_bpf_prog_info(struct bpf_prog_info *info __maybe_unused,
+                                                   struct perf_env *env __maybe_unused,
+                                                   FILE *fp __maybe_unused)
 {
 
 }
index a164164..d2c7b6e 100644 (file)
@@ -23,12 +23,18 @@ struct perf_env perf_env;
 void perf_env__insert_bpf_prog_info(struct perf_env *env,
                                    struct bpf_prog_info_node *info_node)
 {
+       down_write(&env->bpf_progs.lock);
+       __perf_env__insert_bpf_prog_info(env, info_node);
+       up_write(&env->bpf_progs.lock);
+}
+
+void __perf_env__insert_bpf_prog_info(struct perf_env *env, struct bpf_prog_info_node *info_node)
+{
        __u32 prog_id = info_node->info_linear->info.id;
        struct bpf_prog_info_node *node;
        struct rb_node *parent = NULL;
        struct rb_node **p;
 
-       down_write(&env->bpf_progs.lock);
        p = &env->bpf_progs.infos.rb_node;
 
        while (*p != NULL) {
@@ -40,15 +46,13 @@ void perf_env__insert_bpf_prog_info(struct perf_env *env,
                        p = &(*p)->rb_right;
                } else {
                        pr_debug("duplicated bpf prog info %u\n", prog_id);
-                       goto out;
+                       return;
                }
        }
 
        rb_link_node(&info_node->rb_node, parent, p);
        rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos);
        env->bpf_progs.infos_cnt++;
-out:
-       up_write(&env->bpf_progs.lock);
 }
 
 struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
@@ -78,13 +82,21 @@ out:
 
 bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
 {
+       bool ret;
+
+       down_write(&env->bpf_progs.lock);
+       ret = __perf_env__insert_btf(env, btf_node);
+       up_write(&env->bpf_progs.lock);
+       return ret;
+}
+
+bool __perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
+{
        struct rb_node *parent = NULL;
        __u32 btf_id = btf_node->id;
        struct btf_node *node;
        struct rb_node **p;
-       bool ret = true;
 
-       down_write(&env->bpf_progs.lock);
        p = &env->bpf_progs.btfs.rb_node;
 
        while (*p != NULL) {
@@ -96,25 +108,31 @@ bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
                        p = &(*p)->rb_right;
                } else {
                        pr_debug("duplicated btf %u\n", btf_id);
-                       ret = false;
-                       goto out;
+                       return false;
                }
        }
 
        rb_link_node(&btf_node->rb_node, parent, p);
        rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs);
        env->bpf_progs.btfs_cnt++;
-out:
-       up_write(&env->bpf_progs.lock);
-       return ret;
+       return true;
 }
 
 struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
 {
+       struct btf_node *res;
+
+       down_read(&env->bpf_progs.lock);
+       res = __perf_env__find_btf(env, btf_id);
+       up_read(&env->bpf_progs.lock);
+       return res;
+}
+
+struct btf_node *__perf_env__find_btf(struct perf_env *env, __u32 btf_id)
+{
        struct btf_node *node = NULL;
        struct rb_node *n;
 
-       down_read(&env->bpf_progs.lock);
        n = env->bpf_progs.btfs.rb_node;
 
        while (n) {
@@ -124,13 +142,9 @@ struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
                else if (btf_id > node->id)
                        n = n->rb_right;
                else
-                       goto out;
+                       return node;
        }
-       node = NULL;
-
-out:
-       up_read(&env->bpf_progs.lock);
-       return node;
+       return NULL;
 }
 
 /* purge data in bpf_progs.infos tree */
index 4566c51..359eff5 100644 (file)
@@ -164,12 +164,16 @@ const char *perf_env__raw_arch(struct perf_env *env);
 int perf_env__nr_cpus_avail(struct perf_env *env);
 
 void perf_env__init(struct perf_env *env);
+void __perf_env__insert_bpf_prog_info(struct perf_env *env,
+                                     struct bpf_prog_info_node *info_node);
 void perf_env__insert_bpf_prog_info(struct perf_env *env,
                                    struct bpf_prog_info_node *info_node);
 struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
                                                        __u32 prog_id);
 bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
+bool __perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
 struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id);
+struct btf_node *__perf_env__find_btf(struct perf_env *env, __u32 btf_id);
 
 int perf_env__numa_node(struct perf_env *env, struct perf_cpu cpu);
 char *perf_env__find_pmu_cap(struct perf_env *env, const char *pmu_name,
index f6035c2..1482567 100644 (file)
@@ -1849,8 +1849,8 @@ static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp)
                node = rb_entry(next, struct bpf_prog_info_node, rb_node);
                next = rb_next(&node->rb_node);
 
-               bpf_event__print_bpf_prog_info(&node->info_linear->info,
-                                              env, fp);
+               __bpf_event__print_bpf_prog_info(&node->info_linear->info,
+                                                env, fp);
        }
 
        up_read(&env->bpf_progs.lock);
@@ -3177,7 +3177,7 @@ static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused)
                /* after reading from file, translate offset to address */
                bpil_offs_to_addr(info_linear);
                info_node->info_linear = info_linear;
-               perf_env__insert_bpf_prog_info(env, info_node);
+               __perf_env__insert_bpf_prog_info(env, info_node);
        }
 
        up_write(&env->bpf_progs.lock);
@@ -3224,7 +3224,7 @@ static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
                if (__do_read(ff, node->data, data_size))
                        goto out;
 
-               perf_env__insert_btf(env, node);
+               __perf_env__insert_btf(env, node);
                node = NULL;
        }