1 // SPDX-License-Identifier: GPL-2.0
16 #include "thread_map.h"
17 #include "util/hashmap.h"
18 #include <linux/zalloc.h>
20 void update_stats(struct stats *stats, u64 val)
25 delta = val - stats->mean;
26 stats->mean += delta / stats->n;
27 stats->M2 += delta*(val - stats->mean);
36 double avg_stats(struct stats *stats)
42 * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
44 * (\Sum n_i^2) - ((\Sum n_i)^2)/n
45 * s^2 = -------------------------------
48 * http://en.wikipedia.org/wiki/Stddev
50 * The std dev of the mean is related to the std dev by:
57 double stddev_stats(struct stats *stats)
59 double variance, variance_mean;
64 variance = stats->M2 / (stats->n - 1);
65 variance_mean = variance / stats->n;
67 return sqrt(variance_mean);
70 double rel_stddev_stats(double stddev, double avg)
75 pct = 100.0 * stddev/avg;
80 bool __perf_stat_evsel__is(struct evsel *evsel, enum perf_stat_evsel_id id)
82 struct perf_stat_evsel *ps = evsel->stats;
87 #define ID(id, name) [PERF_STAT_EVSEL_ID__##id] = #name
88 static const char *id_str[PERF_STAT_EVSEL_ID__MAX] = {
90 ID(CYCLES_IN_TX, cpu/cycles-t/),
91 ID(TRANSACTION_START, cpu/tx-start/),
92 ID(ELISION_START, cpu/el-start/),
93 ID(CYCLES_IN_TX_CP, cpu/cycles-ct/),
94 ID(TOPDOWN_TOTAL_SLOTS, topdown-total-slots),
95 ID(TOPDOWN_SLOTS_ISSUED, topdown-slots-issued),
96 ID(TOPDOWN_SLOTS_RETIRED, topdown-slots-retired),
97 ID(TOPDOWN_FETCH_BUBBLES, topdown-fetch-bubbles),
98 ID(TOPDOWN_RECOVERY_BUBBLES, topdown-recovery-bubbles),
99 ID(TOPDOWN_RETIRING, topdown-retiring),
100 ID(TOPDOWN_BAD_SPEC, topdown-bad-spec),
101 ID(TOPDOWN_FE_BOUND, topdown-fe-bound),
102 ID(TOPDOWN_BE_BOUND, topdown-be-bound),
103 ID(TOPDOWN_HEAVY_OPS, topdown-heavy-ops),
104 ID(TOPDOWN_BR_MISPREDICT, topdown-br-mispredict),
105 ID(TOPDOWN_FETCH_LAT, topdown-fetch-lat),
106 ID(TOPDOWN_MEM_BOUND, topdown-mem-bound),
107 ID(SMI_NUM, msr/smi/),
108 ID(APERF, msr/aperf/),
112 static void perf_stat_evsel_id_init(struct evsel *evsel)
114 struct perf_stat_evsel *ps = evsel->stats;
117 /* ps->id is 0 hence PERF_STAT_EVSEL_ID__NONE by default */
119 for (i = 0; i < PERF_STAT_EVSEL_ID__MAX; i++) {
120 if (!strcmp(evsel__name(evsel), id_str[i]) ||
121 (strstr(evsel__name(evsel), id_str[i]) && evsel->pmu_name
122 && strstr(evsel__name(evsel), evsel->pmu_name))) {
129 static void evsel__reset_aggr_stats(struct evsel *evsel)
131 struct perf_stat_evsel *ps = evsel->stats;
132 struct perf_stat_aggr *aggr = ps->aggr;
135 memset(aggr, 0, sizeof(*aggr) * ps->nr_aggr);
138 static void evsel__reset_stat_priv(struct evsel *evsel)
140 struct perf_stat_evsel *ps = evsel->stats;
142 init_stats(&ps->res_stats);
143 evsel__reset_aggr_stats(evsel);
146 static int evsel__alloc_aggr_stats(struct evsel *evsel, int nr_aggr)
148 struct perf_stat_evsel *ps = evsel->stats;
153 ps->nr_aggr = nr_aggr;
154 ps->aggr = calloc(nr_aggr, sizeof(*ps->aggr));
155 if (ps->aggr == NULL)
161 int evlist__alloc_aggr_stats(struct evlist *evlist, int nr_aggr)
165 evlist__for_each_entry(evlist, evsel) {
166 if (evsel__alloc_aggr_stats(evsel, nr_aggr) < 0)
172 static int evsel__alloc_stat_priv(struct evsel *evsel, int nr_aggr)
174 struct perf_stat_evsel *ps;
176 ps = zalloc(sizeof(*ps));
182 if (nr_aggr && evsel__alloc_aggr_stats(evsel, nr_aggr) < 0) {
188 perf_stat_evsel_id_init(evsel);
189 evsel__reset_stat_priv(evsel);
193 static void evsel__free_stat_priv(struct evsel *evsel)
195 struct perf_stat_evsel *ps = evsel->stats;
199 zfree(&ps->group_data);
201 zfree(&evsel->stats);
204 static int evsel__alloc_prev_raw_counts(struct evsel *evsel)
206 int cpu_map_nr = evsel__nr_cpus(evsel);
207 int nthreads = perf_thread_map__nr(evsel->core.threads);
208 struct perf_counts *counts;
210 counts = perf_counts__new(cpu_map_nr, nthreads);
212 evsel->prev_raw_counts = counts;
214 return counts ? 0 : -ENOMEM;
217 static void evsel__free_prev_raw_counts(struct evsel *evsel)
219 perf_counts__delete(evsel->prev_raw_counts);
220 evsel->prev_raw_counts = NULL;
223 static void evsel__reset_prev_raw_counts(struct evsel *evsel)
225 if (evsel->prev_raw_counts)
226 perf_counts__reset(evsel->prev_raw_counts);
229 static int evsel__alloc_stats(struct evsel *evsel, int nr_aggr, bool alloc_raw)
231 if (evsel__alloc_stat_priv(evsel, nr_aggr) < 0 ||
232 evsel__alloc_counts(evsel) < 0 ||
233 (alloc_raw && evsel__alloc_prev_raw_counts(evsel) < 0))
239 int evlist__alloc_stats(struct perf_stat_config *config,
240 struct evlist *evlist, bool alloc_raw)
245 if (config && config->aggr_map)
246 nr_aggr = config->aggr_map->nr;
248 evlist__for_each_entry(evlist, evsel) {
249 if (evsel__alloc_stats(evsel, nr_aggr, alloc_raw))
256 evlist__free_stats(evlist);
260 void evlist__free_stats(struct evlist *evlist)
264 evlist__for_each_entry(evlist, evsel) {
265 evsel__free_stat_priv(evsel);
266 evsel__free_counts(evsel);
267 evsel__free_prev_raw_counts(evsel);
271 void evlist__reset_stats(struct evlist *evlist)
275 evlist__for_each_entry(evlist, evsel) {
276 evsel__reset_stat_priv(evsel);
277 evsel__reset_counts(evsel);
281 void evlist__reset_aggr_stats(struct evlist *evlist)
285 evlist__for_each_entry(evlist, evsel)
286 evsel__reset_aggr_stats(evsel);
289 void evlist__reset_prev_raw_counts(struct evlist *evlist)
293 evlist__for_each_entry(evlist, evsel)
294 evsel__reset_prev_raw_counts(evsel);
297 static void evsel__copy_prev_raw_counts(struct evsel *evsel)
299 int idx, nthreads = perf_thread_map__nr(evsel->core.threads);
301 for (int thread = 0; thread < nthreads; thread++) {
302 perf_cpu_map__for_each_idx(idx, evsel__cpus(evsel)) {
303 *perf_counts(evsel->counts, idx, thread) =
304 *perf_counts(evsel->prev_raw_counts, idx, thread);
309 void evlist__copy_prev_raw_counts(struct evlist *evlist)
313 evlist__for_each_entry(evlist, evsel)
314 evsel__copy_prev_raw_counts(evsel);
317 static size_t pkg_id_hash(long __key, void *ctx __maybe_unused)
319 uint64_t *key = (uint64_t *) __key;
321 return *key & 0xffffffff;
324 static bool pkg_id_equal(long __key1, long __key2, void *ctx __maybe_unused)
326 uint64_t *key1 = (uint64_t *) __key1;
327 uint64_t *key2 = (uint64_t *) __key2;
329 return *key1 == *key2;
332 static int check_per_pkg(struct evsel *counter, struct perf_counts_values *vals,
333 int cpu_map_idx, bool *skip)
335 struct hashmap *mask = counter->per_pkg_mask;
336 struct perf_cpu_map *cpus = evsel__cpus(counter);
337 struct perf_cpu cpu = perf_cpu_map__cpu(cpus, cpu_map_idx);
343 if (!counter->per_pkg)
346 if (perf_cpu_map__empty(cpus))
350 mask = hashmap__new(pkg_id_hash, pkg_id_equal, NULL);
354 counter->per_pkg_mask = mask;
358 * we do not consider an event that has not run as a good
359 * instance to mark a package as used (skip=1). Otherwise
360 * we may run into a situation where the first CPU in a package
361 * is not running anything, yet the second is, and this function
362 * would mark the package as used after the first CPU and would
363 * not read the values from the second CPU.
365 if (!(vals->run && vals->ena))
368 s = cpu__get_socket_id(cpu);
373 * On multi-die system, die_id > 0. On no-die system, die_id = 0.
374 * We use hashmap(socket, die) to check the used socket+die pair.
376 d = cpu__get_die_id(cpu);
380 key = malloc(sizeof(*key));
384 *key = (uint64_t)d << 32 | s;
385 if (hashmap__find(mask, key, NULL)) {
389 ret = hashmap__add(mask, key, 1);
394 static bool evsel__count_has_error(struct evsel *evsel,
395 struct perf_counts_values *count,
396 struct perf_stat_config *config)
398 /* the evsel was failed already */
399 if (evsel->err || evsel->counts->scaled == -1)
402 /* this is meaningful for CPU aggregation modes only */
403 if (config->aggr_mode == AGGR_GLOBAL)
406 /* it's considered ok when it actually ran */
407 if (count->ena != 0 && count->run != 0)
414 process_counter_values(struct perf_stat_config *config, struct evsel *evsel,
415 int cpu_map_idx, int thread,
416 struct perf_counts_values *count)
418 struct perf_stat_evsel *ps = evsel->stats;
419 static struct perf_counts_values zero;
422 if (check_per_pkg(evsel, count, cpu_map_idx, &skip)) {
423 pr_err("failed to read per-pkg counter\n");
430 if (!evsel->snapshot)
431 evsel__compute_deltas(evsel, cpu_map_idx, thread, count);
432 perf_counts_values__scale(count, config->scale, NULL);
434 if (config->aggr_mode == AGGR_THREAD) {
435 struct perf_counts_values *aggr_counts = &ps->aggr[thread].counts;
438 * Skip value 0 when enabling --per-thread globally,
439 * otherwise too many 0 output.
441 if (count->val == 0 && config->system_wide)
444 ps->aggr[thread].nr++;
446 aggr_counts->val += count->val;
447 aggr_counts->ena += count->ena;
448 aggr_counts->run += count->run;
453 struct perf_cpu cpu = perf_cpu_map__cpu(evsel->core.cpus, cpu_map_idx);
454 struct aggr_cpu_id aggr_id = config->aggr_get_id(config, cpu);
455 struct perf_stat_aggr *ps_aggr;
458 for (i = 0; i < ps->nr_aggr; i++) {
459 if (!aggr_cpu_id__equal(&aggr_id, &config->aggr_map->map[i]))
462 ps_aggr = &ps->aggr[i];
466 * When any result is bad, make them all to give consistent output
467 * in interval mode. But per-task counters can have 0 enabled time
468 * when some tasks are idle.
470 if (evsel__count_has_error(evsel, count, config) && !ps_aggr->failed) {
471 ps_aggr->counts.val = 0;
472 ps_aggr->counts.ena = 0;
473 ps_aggr->counts.run = 0;
474 ps_aggr->failed = true;
477 if (!ps_aggr->failed) {
478 ps_aggr->counts.val += count->val;
479 ps_aggr->counts.ena += count->ena;
480 ps_aggr->counts.run += count->run;
489 static int process_counter_maps(struct perf_stat_config *config,
490 struct evsel *counter)
492 int nthreads = perf_thread_map__nr(counter->core.threads);
493 int ncpus = evsel__nr_cpus(counter);
496 for (thread = 0; thread < nthreads; thread++) {
497 for (idx = 0; idx < ncpus; idx++) {
498 if (process_counter_values(config, counter, idx, thread,
499 perf_counts(counter->counts, idx, thread)))
507 int perf_stat_process_counter(struct perf_stat_config *config,
508 struct evsel *counter)
510 struct perf_stat_evsel *ps = counter->stats;
514 if (counter->per_pkg)
515 evsel__zero_per_pkg(counter);
517 ret = process_counter_maps(config, counter);
521 if (config->aggr_mode != AGGR_GLOBAL)
525 * GLOBAL aggregation mode only has a single aggr counts,
526 * so we can use ps->aggr[0] as the actual output.
528 count = ps->aggr[0].counts.values;
529 update_stats(&ps->res_stats, *count);
532 fprintf(config->output, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
533 evsel__name(counter), count[0], count[1], count[2]);
539 static int evsel__merge_aggr_counters(struct evsel *evsel, struct evsel *alias)
541 struct perf_stat_evsel *ps_a = evsel->stats;
542 struct perf_stat_evsel *ps_b = alias->stats;
545 if (ps_a->aggr == NULL && ps_b->aggr == NULL)
548 if (ps_a->nr_aggr != ps_b->nr_aggr) {
549 pr_err("Unmatched aggregation mode between aliases\n");
553 for (i = 0; i < ps_a->nr_aggr; i++) {
554 struct perf_counts_values *aggr_counts_a = &ps_a->aggr[i].counts;
555 struct perf_counts_values *aggr_counts_b = &ps_b->aggr[i].counts;
557 /* NB: don't increase aggr.nr for aliases */
559 aggr_counts_a->val += aggr_counts_b->val;
560 aggr_counts_a->ena += aggr_counts_b->ena;
561 aggr_counts_a->run += aggr_counts_b->run;
566 /* events should have the same name, scale, unit, cgroup but on different PMUs */
567 static bool evsel__is_alias(struct evsel *evsel_a, struct evsel *evsel_b)
569 if (strcmp(evsel__name(evsel_a), evsel__name(evsel_b)))
572 if (evsel_a->scale != evsel_b->scale)
575 if (evsel_a->cgrp != evsel_b->cgrp)
578 if (strcmp(evsel_a->unit, evsel_b->unit))
581 if (evsel__is_clock(evsel_a) != evsel__is_clock(evsel_b))
584 return !!strcmp(evsel_a->pmu_name, evsel_b->pmu_name);
587 static void evsel__merge_aliases(struct evsel *evsel)
589 struct evlist *evlist = evsel->evlist;
592 alias = list_prepare_entry(evsel, &(evlist->core.entries), core.node);
593 list_for_each_entry_continue(alias, &evlist->core.entries, core.node) {
594 /* Merge the same events on different PMUs. */
595 if (evsel__is_alias(evsel, alias)) {
596 evsel__merge_aggr_counters(evsel, alias);
597 alias->merged_stat = true;
602 static bool evsel__should_merge_hybrid(const struct evsel *evsel,
603 const struct perf_stat_config *config)
605 return config->hybrid_merge && evsel__is_hybrid(evsel);
608 static void evsel__merge_stats(struct evsel *evsel, struct perf_stat_config *config)
610 /* this evsel is already merged */
611 if (evsel->merged_stat)
614 if (evsel->auto_merge_stats || evsel__should_merge_hybrid(evsel, config))
615 evsel__merge_aliases(evsel);
618 /* merge the same uncore and hybrid events if requested */
619 void perf_stat_merge_counters(struct perf_stat_config *config, struct evlist *evlist)
623 if (config->no_merge)
626 evlist__for_each_entry(evlist, evsel)
627 evsel__merge_stats(evsel, config);
630 static void evsel__update_percore_stats(struct evsel *evsel, struct aggr_cpu_id *core_id)
632 struct perf_stat_evsel *ps = evsel->stats;
633 struct perf_counts_values counts = { 0, };
634 struct aggr_cpu_id id;
638 /* collect per-core counts */
639 perf_cpu_map__for_each_cpu(cpu, idx, evsel->core.cpus) {
640 struct perf_stat_aggr *aggr = &ps->aggr[idx];
642 id = aggr_cpu_id__core(cpu, NULL);
643 if (!aggr_cpu_id__equal(core_id, &id))
646 counts.val += aggr->counts.val;
647 counts.ena += aggr->counts.ena;
648 counts.run += aggr->counts.run;
651 /* update aggregated per-core counts for each CPU */
652 perf_cpu_map__for_each_cpu(cpu, idx, evsel->core.cpus) {
653 struct perf_stat_aggr *aggr = &ps->aggr[idx];
655 id = aggr_cpu_id__core(cpu, NULL);
656 if (!aggr_cpu_id__equal(core_id, &id))
659 aggr->counts.val = counts.val;
660 aggr->counts.ena = counts.ena;
661 aggr->counts.run = counts.run;
667 /* we have an aggr_map for cpu, but want to aggregate the counters per-core */
668 static void evsel__process_percore(struct evsel *evsel)
670 struct perf_stat_evsel *ps = evsel->stats;
671 struct aggr_cpu_id core_id;
678 perf_cpu_map__for_each_cpu(cpu, idx, evsel->core.cpus) {
679 struct perf_stat_aggr *aggr = &ps->aggr[idx];
684 core_id = aggr_cpu_id__core(cpu, NULL);
685 evsel__update_percore_stats(evsel, &core_id);
689 /* process cpu stats on per-core events */
690 void perf_stat_process_percore(struct perf_stat_config *config, struct evlist *evlist)
694 if (config->aggr_mode != AGGR_NONE)
697 evlist__for_each_entry(evlist, evsel)
698 evsel__process_percore(evsel);
701 static void evsel__update_shadow_stats(struct evsel *evsel)
703 struct perf_stat_evsel *ps = evsel->stats;
706 if (ps->aggr == NULL)
709 for (i = 0; i < ps->nr_aggr; i++) {
710 struct perf_counts_values *aggr_counts = &ps->aggr[i].counts;
712 perf_stat__update_shadow_stats(evsel, aggr_counts->val, i, &rt_stat);
716 void perf_stat_process_shadow_stats(struct perf_stat_config *config __maybe_unused,
717 struct evlist *evlist)
721 evlist__for_each_entry(evlist, evsel)
722 evsel__update_shadow_stats(evsel);
725 int perf_event__process_stat_event(struct perf_session *session,
726 union perf_event *event)
728 struct perf_counts_values count, *ptr;
729 struct perf_record_stat *st = &event->stat;
730 struct evsel *counter;
737 counter = evlist__id2evsel(session->evlist, st->id);
739 pr_err("Failed to resolve counter for stat event.\n");
742 cpu_map_idx = perf_cpu_map__idx(evsel__cpus(counter), (struct perf_cpu){.cpu = st->cpu});
743 if (cpu_map_idx == -1) {
744 pr_err("Invalid CPU %d for event %s.\n", st->cpu, evsel__name(counter));
747 ptr = perf_counts(counter->counts, cpu_map_idx, st->thread);
749 pr_err("Failed to find perf count for CPU %d thread %d on event %s.\n",
750 st->cpu, st->thread, evsel__name(counter));
754 counter->supported = true;
758 size_t perf_event__fprintf_stat(union perf_event *event, FILE *fp)
760 struct perf_record_stat *st = (struct perf_record_stat *)event;
763 ret = fprintf(fp, "\n... id %" PRI_lu64 ", cpu %d, thread %d\n",
764 st->id, st->cpu, st->thread);
765 ret += fprintf(fp, "... value %" PRI_lu64 ", enabled %" PRI_lu64 ", running %" PRI_lu64 "\n",
766 st->val, st->ena, st->run);
771 size_t perf_event__fprintf_stat_round(union perf_event *event, FILE *fp)
773 struct perf_record_stat_round *rd = (struct perf_record_stat_round *)event;
776 ret = fprintf(fp, "\n... time %" PRI_lu64 ", type %s\n", rd->time,
777 rd->type == PERF_STAT_ROUND_TYPE__FINAL ? "FINAL" : "INTERVAL");
782 size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp)
784 struct perf_stat_config sc;
787 perf_event__read_stat_config(&sc, &event->stat_config);
789 ret = fprintf(fp, "\n");
790 ret += fprintf(fp, "... aggr_mode %d\n", sc.aggr_mode);
791 ret += fprintf(fp, "... scale %d\n", sc.scale);
792 ret += fprintf(fp, "... interval %u\n", sc.interval);
797 int create_perf_stat_counter(struct evsel *evsel,
798 struct perf_stat_config *config,
799 struct target *target,
802 struct perf_event_attr *attr = &evsel->core.attr;
803 struct evsel *leader = evsel__leader(evsel);
805 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
806 PERF_FORMAT_TOTAL_TIME_RUNNING;
809 * The event is part of non trivial group, let's enable
810 * the group read (for leader) and ID retrieval for all
813 if (leader->core.nr_members > 1)
814 attr->read_format |= PERF_FORMAT_ID|PERF_FORMAT_GROUP;
816 attr->inherit = !config->no_inherit && list_empty(&evsel->bpf_counter_list);
819 * Some events get initialized with sample_(period/type) set,
820 * like tracepoints. Clear it up for counting.
822 attr->sample_period = 0;
824 if (config->identifier)
825 attr->sample_type = PERF_SAMPLE_IDENTIFIER;
827 if (config->all_user) {
828 attr->exclude_kernel = 1;
829 attr->exclude_user = 0;
832 if (config->all_kernel) {
833 attr->exclude_kernel = 0;
834 attr->exclude_user = 1;
838 * Disabling all counters initially, they will be enabled
839 * either manually by us or by kernel via enable_on_exec
842 if (evsel__is_group_leader(evsel)) {
846 * In case of initial_delay we enable tracee
849 if (target__none(target) && !config->initial_delay)
850 attr->enable_on_exec = 1;
853 if (target__has_cpu(target) && !target__has_per_thread(target))
854 return evsel__open_per_cpu(evsel, evsel__cpus(evsel), cpu_map_idx);
856 return evsel__open_per_thread(evsel, evsel->core.threads);