perf cpumap: Move 'has' function to libperf
authorIan Rogers <irogers@google.com>
Wed, 5 Jan 2022 06:13:23 +0000 (22:13 -0800)
committerArnaldo Carvalho de Melo <acme@redhat.com>
Wed, 12 Jan 2022 17:28:22 +0000 (14:28 -0300)
Make the cpu map argument const for consistency with the rest of the
API. Modify cpu_map__idx accordingly.

Reviewed-by: James Clark <james.clark@arm.com>
Signed-off-by: Ian Rogers <irogers@google.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: John Garry <john.garry@huawei.com>
Cc: Kajol Jain <kjain@linux.ibm.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Mathieu Poirier <mathieu.poirier@linaro.org>
Cc: Mike Leach <mike.leach@linaro.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Clarke <pc@us.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Riccardo Mancini <rickyman7@gmail.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Suzuki Poulouse <suzuki.poulose@arm.com>
Cc: Vineet Singh <vineet.singh@intel.com>
Cc: coresight@lists.linaro.org
Cc: linux-arm-kernel@lists.infradead.org
Cc: zhengjun.xing@intel.com
Link: https://lore.kernel.org/r/20220105061351.120843-21-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
tools/lib/perf/Documentation/libperf.txt
tools/lib/perf/cpumap.c
tools/lib/perf/include/internal/cpumap.h
tools/lib/perf/include/perf/cpumap.h
tools/lib/perf/libperf.map
tools/perf/arch/arm/util/cs-etm.c
tools/perf/builtin-sched.c
tools/perf/tests/topology.c
tools/perf/util/cpumap.c
tools/perf/util/cpumap.h
tools/perf/util/cputopo.c

index 63ae5e0..faef9ba 100644 (file)
@@ -48,6 +48,7 @@ SYNOPSIS
   int perf_cpu_map__nr(const struct perf_cpu_map *cpus);
   bool perf_cpu_map__empty(const struct perf_cpu_map *map);
   int perf_cpu_map__max(struct perf_cpu_map *map);
+  bool perf_cpu_map__has(const struct perf_cpu_map *map, int cpu);
 
   #define perf_cpu_map__for_each_cpu(cpu, idx, cpus)
 --
index adaad3d..3c36a06 100644 (file)
@@ -268,7 +268,7 @@ bool perf_cpu_map__empty(const struct perf_cpu_map *map)
        return map ? map->map[0] == -1 : true;
 }
 
-int perf_cpu_map__idx(struct perf_cpu_map *cpus, int cpu)
+int perf_cpu_map__idx(const struct perf_cpu_map *cpus, int cpu)
 {
        int low = 0, high = cpus->nr;
 
@@ -288,6 +288,11 @@ int perf_cpu_map__idx(struct perf_cpu_map *cpus, int cpu)
        return -1;
 }
 
+bool perf_cpu_map__has(const struct perf_cpu_map *cpus, int cpu)
+{
+       return perf_cpu_map__idx(cpus, cpu) != -1;
+}
+
 int perf_cpu_map__max(struct perf_cpu_map *map)
 {
        // cpu_map__trim_new() qsort()s it, cpu_map__default_new() sorts it as well.
index 4054169..71a31ed 100644 (file)
@@ -23,6 +23,6 @@ struct perf_cpu_map {
 #define MAX_NR_CPUS    2048
 #endif
 
-int perf_cpu_map__idx(struct perf_cpu_map *cpus, int cpu);
+int perf_cpu_map__idx(const struct perf_cpu_map *cpus, int cpu);
 
 #endif /* __LIBPERF_INTERNAL_CPUMAP_H */
index 7c27766..3f1c0af 100644 (file)
@@ -20,6 +20,7 @@ LIBPERF_API int perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx);
 LIBPERF_API int perf_cpu_map__nr(const struct perf_cpu_map *cpus);
 LIBPERF_API bool perf_cpu_map__empty(const struct perf_cpu_map *map);
 LIBPERF_API int perf_cpu_map__max(struct perf_cpu_map *map);
+LIBPERF_API bool perf_cpu_map__has(const struct perf_cpu_map *map, int cpu);
 
 #define perf_cpu_map__for_each_cpu(cpu, idx, cpus)             \
        for ((idx) = 0, (cpu) = perf_cpu_map__cpu(cpus, idx);   \
index 5979bf9..93696af 100644 (file)
@@ -10,6 +10,7 @@ LIBPERF_0.0.1 {
                perf_cpu_map__cpu;
                perf_cpu_map__empty;
                perf_cpu_map__max;
+               perf_cpu_map__has;
                perf_thread_map__new_dummy;
                perf_thread_map__set_pid;
                perf_thread_map__comm;
index 8a3d54a..129c027 100644 (file)
@@ -204,8 +204,8 @@ static int cs_etm_set_option(struct auxtrace_record *itr,
 
        /* Set option of each CPU we have */
        for (i = 0; i < cpu__max_cpu(); i++) {
-               if (!cpu_map__has(event_cpus, i) ||
-                   !cpu_map__has(online_cpus, i))
+               if (!perf_cpu_map__has(event_cpus, i) ||
+                   !perf_cpu_map__has(online_cpus, i))
                        continue;
 
                if (option & BIT(ETM_OPT_CTXTID)) {
@@ -523,8 +523,8 @@ cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused,
        /* cpu map is not empty, we have specific CPUs to work with */
        if (!perf_cpu_map__empty(event_cpus)) {
                for (i = 0; i < cpu__max_cpu(); i++) {
-                       if (!cpu_map__has(event_cpus, i) ||
-                           !cpu_map__has(online_cpus, i))
+                       if (!perf_cpu_map__has(event_cpus, i) ||
+                           !perf_cpu_map__has(online_cpus, i))
                                continue;
 
                        if (cs_etm_is_ete(itr, i))
@@ -537,7 +537,7 @@ cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused,
        } else {
                /* get configuration for all CPUs in the system */
                for (i = 0; i < cpu__max_cpu(); i++) {
-                       if (!cpu_map__has(online_cpus, i))
+                       if (!perf_cpu_map__has(online_cpus, i))
                                continue;
 
                        if (cs_etm_is_ete(itr, i))
@@ -722,8 +722,8 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
        } else {
                /* Make sure all specified CPUs are online */
                for (i = 0; i < perf_cpu_map__nr(event_cpus); i++) {
-                       if (cpu_map__has(event_cpus, i) &&
-                           !cpu_map__has(online_cpus, i))
+                       if (perf_cpu_map__has(event_cpus, i) &&
+                           !perf_cpu_map__has(online_cpus, i))
                                return -EINVAL;
                }
 
@@ -744,7 +744,7 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
        offset = CS_ETM_SNAPSHOT + 1;
 
        for (i = 0; i < cpu__max_cpu() && offset < priv_size; i++)
-               if (cpu_map__has(cpu_map, i))
+               if (perf_cpu_map__has(cpu_map, i))
                        cs_etm_get_metadata(i, &offset, itr, info);
 
        perf_cpu_map__put(online_cpus);
index 4527f63..9da1da4 100644 (file)
@@ -1617,10 +1617,10 @@ static int map_switch_event(struct perf_sched *sched, struct evsel *evsel,
                if (curr_thread && thread__has_color(curr_thread))
                        pid_color = COLOR_PIDS;
 
-               if (sched->map.cpus && !cpu_map__has(sched->map.cpus, cpu))
+               if (sched->map.cpus && !perf_cpu_map__has(sched->map.cpus, cpu))
                        continue;
 
-               if (sched->map.color_cpus && cpu_map__has(sched->map.color_cpus, cpu))
+               if (sched->map.color_cpus && perf_cpu_map__has(sched->map.color_cpus, cpu))
                        cpu_color = COLOR_CPUS;
 
                if (cpu != this_cpu)
@@ -1639,7 +1639,7 @@ static int map_switch_event(struct perf_sched *sched, struct evsel *evsel,
                        color_fprintf(stdout, color, "   ");
        }
 
-       if (sched->map.cpus && !cpu_map__has(sched->map.cpus, this_cpu))
+       if (sched->map.cpus && !perf_cpu_map__has(sched->map.cpus, this_cpu))
                goto out;
 
        timestamp__scnprintf_usec(timestamp, stimestamp, sizeof(stimestamp));
index 0cb7b01..cb29ea7 100644 (file)
@@ -112,7 +112,7 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map)
        TEST_ASSERT_VAL("Session header CPU map not set", session->header.env.cpu);
 
        for (i = 0; i < session->header.env.nr_cpus_avail; i++) {
-               if (!cpu_map__has(map, i))
+               if (!perf_cpu_map__has(map, i))
                        continue;
                pr_debug("CPU %d, core %d, socket %d\n", i,
                         session->header.env.cpu[i].core_id,
index 19e502c..f1d76a8 100644 (file)
@@ -463,11 +463,6 @@ int cpu__setup_cpunode_map(void)
        return 0;
 }
 
-bool cpu_map__has(struct perf_cpu_map *cpus, int cpu)
-{
-       return perf_cpu_map__idx(cpus, cpu) != -1;
-}
-
 size_t cpu_map__snprint(struct perf_cpu_map *map, char *buf, size_t size)
 {
        int i, cpu, start = -1;
index ecd6582..32b8b51 100644 (file)
@@ -78,8 +78,6 @@ struct cpu_aggr_map *cpu_aggr_map__new(const struct perf_cpu_map *cpus,
                                       aggr_cpu_id_get_t get_id,
                                       void *data);
 
-bool cpu_map__has(struct perf_cpu_map *cpus, int cpu);
-
 bool aggr_cpu_id__equal(const struct aggr_cpu_id *a, const struct aggr_cpu_id *b);
 bool aggr_cpu_id__is_empty(const struct aggr_cpu_id *a);
 struct aggr_cpu_id aggr_cpu_id__empty(void);
index 51b429c..8affb37 100644 (file)
@@ -218,7 +218,7 @@ struct cpu_topology *cpu_topology__new(void)
        tp->core_cpus_list = addr;
 
        for (i = 0; i < nr; i++) {
-               if (!cpu_map__has(map, i))
+               if (!perf_cpu_map__has(map, i))
                        continue;
 
                ret = build_cpu_topology(tp, i);