libperf: Introduce perf_evlist_mmap_ops::idx callback
authorJiri Olsa <jolsa@kernel.org>
Mon, 7 Oct 2019 12:53:23 +0000 (14:53 +0200)
committerArnaldo Carvalho de Melo <acme@redhat.com>
Thu, 10 Oct 2019 15:20:08 +0000 (12:20 -0300)
Add the perf_evlist_mmap_ops::idx callback to be called in
mmap_per_cpu() and mmap_per_thread() with current cpu and thread
indexes.

It's used by current aux code, so perf will use this callback to set the
aux index.

Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Michael Petlan <mpetlan@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lore.kernel.org/lkml/20191007125344.14268-16-jolsa@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
tools/perf/lib/evlist.c
tools/perf/lib/include/internal/evlist.h

index 88d63f5..3832d3e 100644 (file)
@@ -426,7 +426,8 @@ mmap_per_evsel(struct perf_evlist *evlist, int idx,
 }
 
 static int
-mmap_per_thread(struct perf_evlist *evlist, struct perf_mmap_param *mp)
+mmap_per_thread(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
+               struct perf_mmap_param *mp)
 {
        int thread;
        int nr_threads = perf_thread_map__nr(evlist->threads);
@@ -435,6 +436,9 @@ mmap_per_thread(struct perf_evlist *evlist, struct perf_mmap_param *mp)
                int output = -1;
                int output_overwrite = -1;
 
+               if (ops->idx)
+                       ops->idx(evlist, mp, thread, false);
+
                if (mmap_per_evsel(evlist, thread, mp, 0, thread,
                                   &output, &output_overwrite))
                        goto out_unmap;
@@ -448,7 +452,8 @@ out_unmap:
 }
 
 static int
-mmap_per_cpu(struct perf_evlist *evlist, struct perf_mmap_param *mp)
+mmap_per_cpu(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
+            struct perf_mmap_param *mp)
 {
        int nr_threads = perf_thread_map__nr(evlist->threads);
        int nr_cpus    = perf_cpu_map__nr(evlist->cpus);
@@ -458,6 +463,9 @@ mmap_per_cpu(struct perf_evlist *evlist, struct perf_mmap_param *mp)
                int output = -1;
                int output_overwrite = -1;
 
+               if (ops->idx)
+                       ops->idx(evlist, mp, cpu, true);
+
                for (thread = 0; thread < nr_threads; thread++) {
                        if (mmap_per_evsel(evlist, cpu, mp, cpu,
                                           thread, &output, &output_overwrite))
@@ -496,15 +504,15 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist,
        }
 
        if (perf_cpu_map__empty(cpus))
-               return mmap_per_thread(evlist, mp);
+               return mmap_per_thread(evlist, ops, mp);
 
-       return mmap_per_cpu(evlist, mp);
+       return mmap_per_cpu(evlist, ops, mp);
 }
 
 int perf_evlist__mmap(struct perf_evlist *evlist, int pages)
 {
        struct perf_mmap_param mp;
-       struct perf_evlist_mmap_ops ops;
+       struct perf_evlist_mmap_ops ops = { 0 };
 
        evlist->mmap_len = (pages + 1) * page_size;
        mp.mask = evlist->mmap_len - page_size - 1;
index e5f092f..053f620 100644 (file)
@@ -27,7 +27,11 @@ struct perf_evlist {
        struct perf_mmap        *mmap_ovw;
 };
 
+typedef void
+(*perf_evlist_mmap__cb_idx_t)(struct perf_evlist*, struct perf_mmap_param*, int, bool);
+
 struct perf_evlist_mmap_ops {
+       perf_evlist_mmap__cb_idx_t      idx;
 };
 
 int perf_evlist__alloc_pollfd(struct perf_evlist *evlist);