perf env: Avoid recursively taking env->bpf_progs.lock
[platform/kernel/linux-starfive.git] / tools / perf / util / header.c
index 99c61cd..1482567 100644 (file)
@@ -1444,7 +1444,9 @@ static int build_mem_topology(struct memory_node **nodesp, u64 *cntp)
                        nodes = new_nodes;
                        size += 4;
                }
-               ret = memory_node__read(&nodes[cnt++], idx);
+               ret = memory_node__read(&nodes[cnt], idx);
+               if (!ret)
+                       cnt += 1;
        }
 out:
        closedir(dir);
@@ -1847,8 +1849,8 @@ static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp)
                node = rb_entry(next, struct bpf_prog_info_node, rb_node);
                next = rb_next(&node->rb_node);
 
-               bpf_event__print_bpf_prog_info(&node->info_linear->info,
-                                              env, fp);
+               __bpf_event__print_bpf_prog_info(&node->info_linear->info,
+                                                env, fp);
        }
 
        up_read(&env->bpf_progs.lock);
@@ -3175,7 +3177,7 @@ static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused)
                /* after reading from file, translate offset to address */
                bpil_offs_to_addr(info_linear);
                info_node->info_linear = info_linear;
-               perf_env__insert_bpf_prog_info(env, info_node);
+               __perf_env__insert_bpf_prog_info(env, info_node);
        }
 
        up_write(&env->bpf_progs.lock);
@@ -3222,7 +3224,7 @@ static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
                if (__do_read(ff, node->data, data_size))
                        goto out;
 
-               perf_env__insert_btf(env, node);
+               __perf_env__insert_btf(env, node);
                node = NULL;
        }
 
@@ -4361,9 +4363,10 @@ size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
                ret += fprintf(fp, "... ");
 
                map = cpu_map__new_data(&ev->cpus.cpus);
-               if (map)
+               if (map) {
                        ret += cpu_map__fprintf(map, fp);
-               else
+                       perf_cpu_map__put(map);
+               } else
                        ret += fprintf(fp, "failed to get cpus\n");
                break;
        default:
@@ -4378,7 +4381,8 @@ int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
                             union perf_event *event,
                             struct evlist **pevlist)
 {
-       u32 i, ids, n_ids;
+       u32 i, n_ids;
+       u64 *ids;
        struct evsel *evsel;
        struct evlist *evlist = *pevlist;
 
@@ -4394,9 +4398,8 @@ int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
 
        evlist__add(evlist, evsel);
 
-       ids = event->header.size;
-       ids -= (void *)&event->attr.id - (void *)event;
-       n_ids = ids / sizeof(u64);
+       n_ids = event->header.size - sizeof(event->header) - event->attr.attr.size;
+       n_ids = n_ids / sizeof(u64);
        /*
         * We don't have the cpu and thread maps on the header, so
         * for allocating the perf_sample_id table we fake 1 cpu and
@@ -4405,8 +4408,9 @@ int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
        if (perf_evsel__alloc_id(&evsel->core, 1, n_ids))
                return -ENOMEM;
 
+       ids = perf_record_header_attr_id(event);
        for (i = 0; i < n_ids; i++) {
-               perf_evlist__id_add(&evlist->core, &evsel->core, 0, i, event->attr.id[i]);
+               perf_evlist__id_add(&evlist->core, &evsel->core, 0, i, ids[i]);
        }
 
        return 0;