From: Jiri Olsa Date: Sun, 25 Oct 2015 14:51:18 +0000 (+0100) Subject: perf stat: Cache aggregated map entries in extra cpumap X-Git-Tag: v4.4-rc1~155^2~2^2~5 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=1e5a29318ba8506f52a8d727b5c6a53143f9882a;p=platform%2Fkernel%2Flinux-exynos.git perf stat: Cache aggregated map entries in extra cpumap Currently any time we need to access socket or core id for given cpu, we access the sysfs topology file. Adding a cpus_aggr_map cpu_map to cache those entries. Signed-off-by: Jiri Olsa Tested-by: Kan Liang Cc: David Ahern Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1445784728-21732-3-git-send-email-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 91e793a..2f438f7 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -100,6 +100,8 @@ static struct target target = { .uid = UINT_MAX, }; +typedef int (*aggr_get_id_t)(struct cpu_map *m, int cpu); + static int run_count = 1; static bool no_inherit = false; static volatile pid_t child_pid = -1; @@ -119,7 +121,7 @@ static unsigned int unit_width = 4; /* strlen("unit") */ static bool forever = false; static struct timespec ref_time; static struct cpu_map *aggr_map; -static int (*aggr_get_id)(struct cpu_map *m, int cpu); +static aggr_get_id_t aggr_get_id; static volatile int done = 0; @@ -954,22 +956,63 @@ static int perf_stat__get_core(struct cpu_map *map, int cpu) return cpu_map__get_core(map, cpu, NULL); } +static int cpu_map__get_max(struct cpu_map *map) +{ + int i, max = -1; + + for (i = 0; i < map->nr; i++) { + if (map->map[i] > max) + max = map->map[i]; + } + + return max; +} + +static struct cpu_map *cpus_aggr_map; + +static int perf_stat__get_aggr(aggr_get_id_t get_id, struct cpu_map *map, int idx) +{ + int cpu; + + if (idx >= map->nr) + return -1; + + cpu = map->map[idx]; + + if (cpus_aggr_map->map[cpu] == -1) + cpus_aggr_map->map[cpu] = get_id(map, idx); + + return cpus_aggr_map->map[cpu]; +} + +static int perf_stat__get_socket_cached(struct cpu_map *map, int idx) +{ + return perf_stat__get_aggr(perf_stat__get_socket, map, idx); +} + +static int perf_stat__get_core_cached(struct cpu_map *map, int idx) +{ + return perf_stat__get_aggr(perf_stat__get_core, map, idx); +} + static int perf_stat_init_aggr_mode(void) { + int nr; + switch (stat_config.aggr_mode) { case AGGR_SOCKET: if (cpu_map__build_socket_map(evsel_list->cpus, &aggr_map)) { perror("cannot build socket map"); return -1; } - aggr_get_id = perf_stat__get_socket; + aggr_get_id = perf_stat__get_socket_cached; break; case AGGR_CORE: if (cpu_map__build_core_map(evsel_list->cpus, &aggr_map)) { perror("cannot build core map"); return -1; } - aggr_get_id = perf_stat__get_core; + aggr_get_id = perf_stat__get_core_cached; break; case AGGR_NONE: case AGGR_GLOBAL: @@ -978,7 +1021,15 @@ static int perf_stat_init_aggr_mode(void) default: break; } - return 0; + + /* + * The evsel_list->cpus is the base we operate on, + * taking the highest cpu number to be the size of + * the aggregation translate cpumap. + */ + nr = cpu_map__get_max(evsel_list->cpus); + cpus_aggr_map = cpu_map__empty_new(nr + 1); + return cpus_aggr_map ? 0 : -ENOMEM; } /*