perf env: Avoid recursively taking env->bpf_progs.lock
[platform/kernel/linux-starfive.git] / tools / perf / util / header.c
index 52fbf52..1482567 100644 (file)
@@ -456,6 +456,8 @@ static int write_cpudesc(struct feat_fd *ff,
 #define CPUINFO_PROC   { "Processor", }
 #elif defined(__xtensa__)
 #define CPUINFO_PROC   { "core ID", }
+#elif defined(__loongarch__)
+#define CPUINFO_PROC   { "Model Name", }
 #else
 #define CPUINFO_PROC   { "model name", }
 #endif
@@ -746,20 +748,14 @@ static int write_pmu_mappings(struct feat_fd *ff,
         * Do a first pass to count number of pmu to avoid lseek so this
         * works in pipe mode as well.
         */
-       while ((pmu = perf_pmus__scan(pmu))) {
-               if (!pmu->name)
-                       continue;
+       while ((pmu = perf_pmus__scan(pmu)))
                pmu_num++;
-       }
 
        ret = do_write(ff, &pmu_num, sizeof(pmu_num));
        if (ret < 0)
                return ret;
 
        while ((pmu = perf_pmus__scan(pmu))) {
-               if (!pmu->name)
-                       continue;
-
                ret = do_write(ff, &pmu->type, sizeof(pmu->type));
                if (ret < 0)
                        return ret;
@@ -1448,7 +1444,9 @@ static int build_mem_topology(struct memory_node **nodesp, u64 *cntp)
                        nodes = new_nodes;
                        size += 4;
                }
-               ret = memory_node__read(&nodes[cnt++], idx);
+               ret = memory_node__read(&nodes[cnt], idx);
+               if (!ret)
+                       cnt += 1;
        }
 out:
        closedir(dir);
@@ -1605,8 +1603,15 @@ static int write_pmu_caps(struct feat_fd *ff,
        int ret;
 
        while ((pmu = perf_pmus__scan(pmu))) {
-               if (!pmu->name || !strcmp(pmu->name, "cpu") ||
-                   perf_pmu__caps_parse(pmu) <= 0)
+               if (!strcmp(pmu->name, "cpu")) {
+                       /*
+                        * The "cpu" PMU is special and covered by
+                        * HEADER_CPU_PMU_CAPS. Note, core PMUs are
+                        * counted/written here for ARM, s390 and Intel hybrid.
+                        */
+                       continue;
+               }
+               if (perf_pmu__caps_parse(pmu) <= 0)
                        continue;
                nr_pmu++;
        }
@@ -1619,23 +1624,17 @@ static int write_pmu_caps(struct feat_fd *ff,
                return 0;
 
        /*
-        * Write hybrid pmu caps first to maintain compatibility with
-        * older perf tool.
+        * Note older perf tools assume core PMUs come first, this is a property
+        * of perf_pmus__scan.
         */
-       if (perf_pmus__num_core_pmus() > 1) {
-               pmu = NULL;
-               while ((pmu = perf_pmus__scan_core(pmu))) {
-                       ret = __write_pmu_caps(ff, pmu, true);
-                       if (ret < 0)
-                               return ret;
-               }
-       }
-
        pmu = NULL;
        while ((pmu = perf_pmus__scan(pmu))) {
-               if (pmu->is_core || !pmu->nr_caps)
+               if (!strcmp(pmu->name, "cpu")) {
+                       /* Skip as above. */
+                       continue;
+               }
+               if (perf_pmu__caps_parse(pmu) <= 0)
                        continue;
-
                ret = __write_pmu_caps(ff, pmu, true);
                if (ret < 0)
                        return ret;
@@ -1850,8 +1849,8 @@ static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp)
                node = rb_entry(next, struct bpf_prog_info_node, rb_node);
                next = rb_next(&node->rb_node);
 
-               bpf_event__print_bpf_prog_info(&node->info_linear->info,
-                                              env, fp);
+               __bpf_event__print_bpf_prog_info(&node->info_linear->info,
+                                                env, fp);
        }
 
        up_read(&env->bpf_progs.lock);
@@ -3178,7 +3177,7 @@ static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused)
                /* after reading from file, translate offset to address */
                bpil_offs_to_addr(info_linear);
                info_node->info_linear = info_linear;
-               perf_env__insert_bpf_prog_info(env, info_node);
+               __perf_env__insert_bpf_prog_info(env, info_node);
        }
 
        up_write(&env->bpf_progs.lock);
@@ -3225,7 +3224,7 @@ static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
                if (__do_read(ff, node->data, data_size))
                        goto out;
 
-               perf_env__insert_btf(env, node);
+               __perf_env__insert_btf(env, node);
                node = NULL;
        }
 
@@ -4364,9 +4363,10 @@ size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
                ret += fprintf(fp, "... ");
 
                map = cpu_map__new_data(&ev->cpus.cpus);
-               if (map)
+               if (map) {
                        ret += cpu_map__fprintf(map, fp);
-               else
+                       perf_cpu_map__put(map);
+               } else
                        ret += fprintf(fp, "failed to get cpus\n");
                break;
        default:
@@ -4381,7 +4381,8 @@ int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
                             union perf_event *event,
                             struct evlist **pevlist)
 {
-       u32 i, ids, n_ids;
+       u32 i, n_ids;
+       u64 *ids;
        struct evsel *evsel;
        struct evlist *evlist = *pevlist;
 
@@ -4397,9 +4398,8 @@ int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
 
        evlist__add(evlist, evsel);
 
-       ids = event->header.size;
-       ids -= (void *)&event->attr.id - (void *)event;
-       n_ids = ids / sizeof(u64);
+       n_ids = event->header.size - sizeof(event->header) - event->attr.attr.size;
+       n_ids = n_ids / sizeof(u64);
        /*
         * We don't have the cpu and thread maps on the header, so
         * for allocating the perf_sample_id table we fake 1 cpu and
@@ -4408,8 +4408,9 @@ int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
        if (perf_evsel__alloc_id(&evsel->core, 1, n_ids))
                return -ENOMEM;
 
+       ids = perf_record_header_attr_id(event);
        for (i = 0; i < n_ids; i++) {
-               perf_evlist__id_add(&evlist->core, &evsel->core, 0, i, event->attr.id[i]);
+               perf_evlist__id_add(&evlist->core, &evsel->core, 0, i, ids[i]);
        }
 
        return 0;