perf cpumap: Remove cpu_map__cpu(), use libperf function
authorIan Rogers <irogers@google.com>
Wed, 5 Jan 2022 06:13:20 +0000 (22:13 -0800)
committerArnaldo Carvalho de Melo <acme@redhat.com>
Wed, 12 Jan 2022 17:28:22 +0000 (14:28 -0300)
Switch the remaining few users of cpu_map__cpu() to perf_cpu_map__cpu()
and remove the function.

Reviewed-by: James Clark <james.clark@arm.com>
Signed-off-by: Ian Rogers <irogers@google.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: John Garry <john.garry@huawei.com>
Cc: Kajol Jain <kjain@linux.ibm.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Mathieu Poirier <mathieu.poirier@linaro.org>
Cc: Mike Leach <mike.leach@linaro.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Clarke <pc@us.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Riccardo Mancini <rickyman7@gmail.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Suzuki Poulouse <suzuki.poulose@arm.com>
Cc: Vineet Singh <vineet.singh@intel.com>
Cc: coresight@lists.linaro.org
Cc: linux-arm-kernel@lists.infradead.org
Cc: zhengjun.xing@intel.com
Link: https://lore.kernel.org/r/20220105061351.120843-18-irogers@google.com
[ Did the conversion to perf_ftrace__latency_prepare_bpf() as well, used when building with BUILD_BPF_SKEL=1 ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
tools/perf/builtin-ftrace.c
tools/perf/util/bpf_ftrace.c
tools/perf/util/cpumap.c
tools/perf/util/cpumap.h

index 2b54e2d..f16c39a 100644 (file)
@@ -281,7 +281,7 @@ static int set_tracing_cpumask(struct perf_cpu_map *cpumap)
        int ret;
        int last_cpu;
 
-       last_cpu = cpu_map__cpu(cpumap, cpumap->nr - 1);
+       last_cpu = perf_cpu_map__cpu(cpumap, cpumap->nr - 1);
        mask_size = last_cpu / 4 + 2; /* one more byte for EOS */
        mask_size += last_cpu / 32; /* ',' is needed for every 32th cpus */
 
index f00a2de..28dc4c6 100644 (file)
@@ -63,7 +63,7 @@ int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace)
                fd = bpf_map__fd(skel->maps.cpu_filter);
 
                for (i = 0; i < ncpus; i++) {
-                       cpu = cpu_map__cpu(ftrace->evlist->core.cpus, i);
+                       cpu = perf_cpu_map__cpu(ftrace->evlist->core.cpus, i);
                        bpf_map_update_elem(fd, &cpu, &val, BPF_ANY);
                }
        }
index e0d7f1d..32f9fc2 100644 (file)
@@ -485,11 +485,6 @@ bool cpu_map__has(struct perf_cpu_map *cpus, int cpu)
        return perf_cpu_map__idx(cpus, cpu) != -1;
 }
 
-int cpu_map__cpu(struct perf_cpu_map *cpus, int idx)
-{
-       return cpus->map[idx];
-}
-
 size_t cpu_map__snprint(struct perf_cpu_map *map, char *buf, size_t size)
 {
        int i, cpu, start = -1;
@@ -547,7 +542,7 @@ size_t cpu_map__snprint_mask(struct perf_cpu_map *map, char *buf, size_t size)
        int i, cpu;
        char *ptr = buf;
        unsigned char *bitmap;
-       int last_cpu = cpu_map__cpu(map, map->nr - 1);
+       int last_cpu = perf_cpu_map__cpu(map, map->nr - 1);
 
        if (buf == NULL)
                return 0;
@@ -559,7 +554,7 @@ size_t cpu_map__snprint_mask(struct perf_cpu_map *map, char *buf, size_t size)
        }
 
        for (i = 0; i < map->nr; i++) {
-               cpu = cpu_map__cpu(map, i);
+               cpu = perf_cpu_map__cpu(map, i);
                bitmap[cpu / 8] |= 1 << (cpu % 8);
        }
 
index a053bf3..87545bc 100644 (file)
@@ -80,7 +80,6 @@ int cpu_map__build_map(struct perf_cpu_map *cpus, struct cpu_aggr_map **res,
                       struct aggr_cpu_id (*f)(int cpu, void *data),
                       void *data);
 
-int cpu_map__cpu(struct perf_cpu_map *cpus, int idx);
 bool cpu_map__has(struct perf_cpu_map *cpus, int cpu);
 
 bool aggr_cpu_id__equal(const struct aggr_cpu_id *a, const struct aggr_cpu_id *b);