2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
11 #include <linux/bitops.h>
17 #include "thread_map.h"
19 #include "../../../include/linux/hw_breakpoint.h"
20 #include "../../include/linux/perf_event.h"
21 #include "perf_regs.h"
23 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
25 static int __perf_evsel__sample_size(u64 sample_type)
27 u64 mask = sample_type & PERF_SAMPLE_MASK;
31 for (i = 0; i < 64; i++) {
32 if (mask & (1ULL << i))
41 void hists__init(struct hists *hists)
43 memset(hists, 0, sizeof(*hists));
44 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
45 hists->entries_in = &hists->entries_in_array[0];
46 hists->entries_collapsed = RB_ROOT;
47 hists->entries = RB_ROOT;
48 pthread_mutex_init(&hists->lock, NULL);
51 void perf_evsel__init(struct perf_evsel *evsel,
52 struct perf_event_attr *attr, int idx)
56 INIT_LIST_HEAD(&evsel->node);
57 hists__init(&evsel->hists);
58 evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
61 struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
63 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
66 perf_evsel__init(evsel, attr, idx);
71 const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
79 "stalled-cycles-frontend",
80 "stalled-cycles-backend",
84 static const char *__perf_evsel__hw_name(u64 config)
86 if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config])
87 return perf_evsel__hw_names[config];
89 return "unknown-hardware";
92 static int perf_evsel__add_modifiers(struct perf_evsel *evsel, char *bf, size_t size)
95 struct perf_event_attr *attr = &evsel->attr;
96 bool exclude_guest_default = false;
98 #define MOD_PRINT(context, mod) do { \
99 if (!attr->exclude_##context) { \
100 if (!colon) colon = ++r; \
101 r += scnprintf(bf + r, size - r, "%c", mod); \
104 if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) {
105 MOD_PRINT(kernel, 'k');
106 MOD_PRINT(user, 'u');
108 exclude_guest_default = true;
111 if (attr->precise_ip) {
114 r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
115 exclude_guest_default = true;
118 if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) {
119 MOD_PRINT(host, 'H');
120 MOD_PRINT(guest, 'G');
128 static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
130 int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->attr.config));
131 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
134 const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = {
146 static const char *__perf_evsel__sw_name(u64 config)
148 if (config < PERF_COUNT_SW_MAX && perf_evsel__sw_names[config])
149 return perf_evsel__sw_names[config];
150 return "unknown-software";
153 static int perf_evsel__sw_name(struct perf_evsel *evsel, char *bf, size_t size)
155 int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->attr.config));
156 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
159 static int __perf_evsel__bp_name(char *bf, size_t size, u64 addr, u64 type)
163 r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr);
165 if (type & HW_BREAKPOINT_R)
166 r += scnprintf(bf + r, size - r, "r");
168 if (type & HW_BREAKPOINT_W)
169 r += scnprintf(bf + r, size - r, "w");
171 if (type & HW_BREAKPOINT_X)
172 r += scnprintf(bf + r, size - r, "x");
177 static int perf_evsel__bp_name(struct perf_evsel *evsel, char *bf, size_t size)
179 struct perf_event_attr *attr = &evsel->attr;
180 int r = __perf_evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type);
181 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
184 const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
185 [PERF_EVSEL__MAX_ALIASES] = {
186 { "L1-dcache", "l1-d", "l1d", "L1-data", },
187 { "L1-icache", "l1-i", "l1i", "L1-instruction", },
189 { "dTLB", "d-tlb", "Data-TLB", },
190 { "iTLB", "i-tlb", "Instruction-TLB", },
191 { "branch", "branches", "bpu", "btb", "bpc", },
195 const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
196 [PERF_EVSEL__MAX_ALIASES] = {
197 { "load", "loads", "read", },
198 { "store", "stores", "write", },
199 { "prefetch", "prefetches", "speculative-read", "speculative-load", },
202 const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
203 [PERF_EVSEL__MAX_ALIASES] = {
204 { "refs", "Reference", "ops", "access", },
205 { "misses", "miss", },
208 #define C(x) PERF_COUNT_HW_CACHE_##x
209 #define CACHE_READ (1 << C(OP_READ))
210 #define CACHE_WRITE (1 << C(OP_WRITE))
211 #define CACHE_PREFETCH (1 << C(OP_PREFETCH))
212 #define COP(x) (1 << x)
215 * cache operartion stat
216 * L1I : Read and prefetch only
217 * ITLB and BPU : Read-only
219 static unsigned long perf_evsel__hw_cache_stat[C(MAX)] = {
220 [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
221 [C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
222 [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
223 [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
224 [C(ITLB)] = (CACHE_READ),
225 [C(BPU)] = (CACHE_READ),
226 [C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
229 bool perf_evsel__is_cache_op_valid(u8 type, u8 op)
231 if (perf_evsel__hw_cache_stat[type] & COP(op))
232 return true; /* valid */
234 return false; /* invalid */
237 int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
238 char *bf, size_t size)
241 return scnprintf(bf, size, "%s-%s-%s", perf_evsel__hw_cache[type][0],
242 perf_evsel__hw_cache_op[op][0],
243 perf_evsel__hw_cache_result[result][0]);
246 return scnprintf(bf, size, "%s-%s", perf_evsel__hw_cache[type][0],
247 perf_evsel__hw_cache_op[op][1]);
250 static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size)
252 u8 op, result, type = (config >> 0) & 0xff;
253 const char *err = "unknown-ext-hardware-cache-type";
255 if (type > PERF_COUNT_HW_CACHE_MAX)
258 op = (config >> 8) & 0xff;
259 err = "unknown-ext-hardware-cache-op";
260 if (op > PERF_COUNT_HW_CACHE_OP_MAX)
263 result = (config >> 16) & 0xff;
264 err = "unknown-ext-hardware-cache-result";
265 if (result > PERF_COUNT_HW_CACHE_RESULT_MAX)
268 err = "invalid-cache";
269 if (!perf_evsel__is_cache_op_valid(type, op))
272 return __perf_evsel__hw_cache_type_op_res_name(type, op, result, bf, size);
274 return scnprintf(bf, size, "%s", err);
277 static int perf_evsel__hw_cache_name(struct perf_evsel *evsel, char *bf, size_t size)
279 int ret = __perf_evsel__hw_cache_name(evsel->attr.config, bf, size);
280 return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
283 static int perf_evsel__raw_name(struct perf_evsel *evsel, char *bf, size_t size)
285 int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config);
286 return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
289 const char *perf_evsel__name(struct perf_evsel *evsel)
296 switch (evsel->attr.type) {
298 perf_evsel__raw_name(evsel, bf, sizeof(bf));
301 case PERF_TYPE_HARDWARE:
302 perf_evsel__hw_name(evsel, bf, sizeof(bf));
305 case PERF_TYPE_HW_CACHE:
306 perf_evsel__hw_cache_name(evsel, bf, sizeof(bf));
309 case PERF_TYPE_SOFTWARE:
310 perf_evsel__sw_name(evsel, bf, sizeof(bf));
313 case PERF_TYPE_TRACEPOINT:
314 scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint");
317 case PERF_TYPE_BREAKPOINT:
318 perf_evsel__bp_name(evsel, bf, sizeof(bf));
322 scnprintf(bf, sizeof(bf), "unknown attr type: %d",
327 evsel->name = strdup(bf);
329 return evsel->name ?: "unknown";
332 void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts,
333 struct perf_evsel *first)
335 struct perf_event_attr *attr = &evsel->attr;
336 int track = !evsel->idx; /* only the first counter needs these */
339 attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1;
340 attr->inherit = !opts->no_inherit;
341 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
342 PERF_FORMAT_TOTAL_TIME_RUNNING |
345 attr->sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID;
348 * We default some events to a 1 default interval. But keep
349 * it a weak assumption overridable by the user.
351 if (!attr->sample_period || (opts->user_freq != UINT_MAX &&
352 opts->user_interval != ULLONG_MAX)) {
354 attr->sample_type |= PERF_SAMPLE_PERIOD;
356 attr->sample_freq = opts->freq;
358 attr->sample_period = opts->default_interval;
362 if (opts->no_samples)
363 attr->sample_freq = 0;
365 if (opts->inherit_stat)
366 attr->inherit_stat = 1;
368 if (opts->sample_address) {
369 attr->sample_type |= PERF_SAMPLE_ADDR;
370 attr->mmap_data = track;
373 if (opts->call_graph) {
374 attr->sample_type |= PERF_SAMPLE_CALLCHAIN;
376 if (opts->call_graph == CALLCHAIN_DWARF) {
377 attr->sample_type |= PERF_SAMPLE_REGS_USER |
378 PERF_SAMPLE_STACK_USER;
379 attr->sample_regs_user = PERF_REGS_MASK;
380 attr->sample_stack_user = opts->stack_dump_size;
381 attr->exclude_callchain_user = 1;
385 if (perf_target__has_cpu(&opts->target))
386 attr->sample_type |= PERF_SAMPLE_CPU;
389 attr->sample_type |= PERF_SAMPLE_PERIOD;
391 if (!opts->sample_id_all_missing &&
392 (opts->sample_time || !opts->no_inherit ||
393 perf_target__has_cpu(&opts->target)))
394 attr->sample_type |= PERF_SAMPLE_TIME;
396 if (opts->raw_samples) {
397 attr->sample_type |= PERF_SAMPLE_TIME;
398 attr->sample_type |= PERF_SAMPLE_RAW;
399 attr->sample_type |= PERF_SAMPLE_CPU;
402 if (opts->no_delay) {
404 attr->wakeup_events = 1;
406 if (opts->branch_stack) {
407 attr->sample_type |= PERF_SAMPLE_BRANCH_STACK;
408 attr->branch_sample_type = opts->branch_stack;
414 if (perf_target__none(&opts->target) &&
415 (!opts->group || evsel == first)) {
416 attr->enable_on_exec = 1;
420 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
423 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
426 for (cpu = 0; cpu < ncpus; cpu++) {
427 for (thread = 0; thread < nthreads; thread++) {
428 FD(evsel, cpu, thread) = -1;
433 return evsel->fd != NULL ? 0 : -ENOMEM;
436 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
438 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
439 if (evsel->sample_id == NULL)
442 evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
443 if (evsel->id == NULL) {
444 xyarray__delete(evsel->sample_id);
445 evsel->sample_id = NULL;
452 int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
454 evsel->counts = zalloc((sizeof(*evsel->counts) +
455 (ncpus * sizeof(struct perf_counts_values))));
456 return evsel->counts != NULL ? 0 : -ENOMEM;
459 void perf_evsel__free_fd(struct perf_evsel *evsel)
461 xyarray__delete(evsel->fd);
465 void perf_evsel__free_id(struct perf_evsel *evsel)
467 xyarray__delete(evsel->sample_id);
468 evsel->sample_id = NULL;
473 void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
477 for (cpu = 0; cpu < ncpus; cpu++)
478 for (thread = 0; thread < nthreads; ++thread) {
479 close(FD(evsel, cpu, thread));
480 FD(evsel, cpu, thread) = -1;
484 void perf_evsel__exit(struct perf_evsel *evsel)
486 assert(list_empty(&evsel->node));
487 xyarray__delete(evsel->fd);
488 xyarray__delete(evsel->sample_id);
492 void perf_evsel__delete(struct perf_evsel *evsel)
494 perf_evsel__exit(evsel);
495 close_cgroup(evsel->cgrp);
496 free(evsel->group_name);
501 int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
502 int cpu, int thread, bool scale)
504 struct perf_counts_values count;
505 size_t nv = scale ? 3 : 1;
507 if (FD(evsel, cpu, thread) < 0)
510 if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
513 if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
519 else if (count.run < count.ena)
520 count.val = (u64)((double)count.val * count.ena / count.run + 0.5);
522 count.ena = count.run = 0;
524 evsel->counts->cpu[cpu] = count;
528 int __perf_evsel__read(struct perf_evsel *evsel,
529 int ncpus, int nthreads, bool scale)
531 size_t nv = scale ? 3 : 1;
533 struct perf_counts_values *aggr = &evsel->counts->aggr, count;
535 aggr->val = aggr->ena = aggr->run = 0;
537 for (cpu = 0; cpu < ncpus; cpu++) {
538 for (thread = 0; thread < nthreads; thread++) {
539 if (FD(evsel, cpu, thread) < 0)
542 if (readn(FD(evsel, cpu, thread),
543 &count, nv * sizeof(u64)) < 0)
546 aggr->val += count.val;
548 aggr->ena += count.ena;
549 aggr->run += count.run;
554 evsel->counts->scaled = 0;
556 if (aggr->run == 0) {
557 evsel->counts->scaled = -1;
562 if (aggr->run < aggr->ena) {
563 evsel->counts->scaled = 1;
564 aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5);
567 aggr->ena = aggr->run = 0;
572 static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread)
574 struct perf_evsel *leader = evsel->leader;
581 * Leader must be already processed/open,
586 fd = FD(leader, cpu, thread);
592 static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
593 struct thread_map *threads)
596 unsigned long flags = 0;
599 if (evsel->fd == NULL &&
600 perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
604 flags = PERF_FLAG_PID_CGROUP;
605 pid = evsel->cgrp->fd;
608 for (cpu = 0; cpu < cpus->nr; cpu++) {
610 for (thread = 0; thread < threads->nr; thread++) {
614 pid = threads->map[thread];
616 group_fd = get_group_fd(evsel, cpu, thread);
618 FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
622 if (FD(evsel, cpu, thread) < 0) {
633 while (--thread >= 0) {
634 close(FD(evsel, cpu, thread));
635 FD(evsel, cpu, thread) = -1;
637 thread = threads->nr;
638 } while (--cpu >= 0);
642 void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads)
644 if (evsel->fd == NULL)
647 perf_evsel__close_fd(evsel, ncpus, nthreads);
648 perf_evsel__free_fd(evsel);
661 struct thread_map map;
663 } empty_thread_map = {
668 int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
669 struct thread_map *threads)
672 /* Work around old compiler warnings about strict aliasing */
673 cpus = &empty_cpu_map.map;
677 threads = &empty_thread_map.map;
679 return __perf_evsel__open(evsel, cpus, threads);
682 int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
683 struct cpu_map *cpus)
685 return __perf_evsel__open(evsel, cpus, &empty_thread_map.map);
688 int perf_evsel__open_per_thread(struct perf_evsel *evsel,
689 struct thread_map *threads)
691 return __perf_evsel__open(evsel, &empty_cpu_map.map, threads);
694 static int perf_event__parse_id_sample(const union perf_event *event, u64 type,
695 struct perf_sample *sample,
698 const u64 *array = event->sample.array;
701 array += ((event->header.size -
702 sizeof(event->header)) / sizeof(u64)) - 1;
704 if (type & PERF_SAMPLE_CPU) {
707 /* undo swap of u64, then swap on individual u32s */
708 u.val64 = bswap_64(u.val64);
709 u.val32[0] = bswap_32(u.val32[0]);
712 sample->cpu = u.val32[0];
716 if (type & PERF_SAMPLE_STREAM_ID) {
717 sample->stream_id = *array;
721 if (type & PERF_SAMPLE_ID) {
726 if (type & PERF_SAMPLE_TIME) {
727 sample->time = *array;
731 if (type & PERF_SAMPLE_TID) {
734 /* undo swap of u64, then swap on individual u32s */
735 u.val64 = bswap_64(u.val64);
736 u.val32[0] = bswap_32(u.val32[0]);
737 u.val32[1] = bswap_32(u.val32[1]);
740 sample->pid = u.val32[0];
741 sample->tid = u.val32[1];
747 static bool sample_overlap(const union perf_event *event,
748 const void *offset, u64 size)
750 const void *base = event;
752 if (offset + size > base + event->header.size)
758 int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
759 struct perf_sample *data, bool swapped)
761 u64 type = evsel->attr.sample_type;
762 u64 regs_user = evsel->attr.sample_regs_user;
766 * used for cross-endian analysis. See git commit 65014ab3
767 * for why this goofiness is needed.
771 memset(data, 0, sizeof(*data));
772 data->cpu = data->pid = data->tid = -1;
773 data->stream_id = data->id = data->time = -1ULL;
776 if (event->header.type != PERF_RECORD_SAMPLE) {
777 if (!evsel->attr.sample_id_all)
779 return perf_event__parse_id_sample(event, type, data, swapped);
782 array = event->sample.array;
784 if (evsel->sample_size + sizeof(event->header) > event->header.size)
787 if (type & PERF_SAMPLE_IP) {
788 data->ip = event->ip.ip;
792 if (type & PERF_SAMPLE_TID) {
795 /* undo swap of u64, then swap on individual u32s */
796 u.val64 = bswap_64(u.val64);
797 u.val32[0] = bswap_32(u.val32[0]);
798 u.val32[1] = bswap_32(u.val32[1]);
801 data->pid = u.val32[0];
802 data->tid = u.val32[1];
806 if (type & PERF_SAMPLE_TIME) {
812 if (type & PERF_SAMPLE_ADDR) {
818 if (type & PERF_SAMPLE_ID) {
823 if (type & PERF_SAMPLE_STREAM_ID) {
824 data->stream_id = *array;
828 if (type & PERF_SAMPLE_CPU) {
832 /* undo swap of u64, then swap on individual u32s */
833 u.val64 = bswap_64(u.val64);
834 u.val32[0] = bswap_32(u.val32[0]);
837 data->cpu = u.val32[0];
841 if (type & PERF_SAMPLE_PERIOD) {
842 data->period = *array;
846 if (type & PERF_SAMPLE_READ) {
847 fprintf(stderr, "PERF_SAMPLE_READ is unsupported for now\n");
851 if (type & PERF_SAMPLE_CALLCHAIN) {
852 if (sample_overlap(event, array, sizeof(data->callchain->nr)))
855 data->callchain = (struct ip_callchain *)array;
857 if (sample_overlap(event, array, data->callchain->nr))
860 array += 1 + data->callchain->nr;
863 if (type & PERF_SAMPLE_RAW) {
867 if (WARN_ONCE(swapped,
868 "Endianness of raw data not corrected!\n")) {
869 /* undo swap of u64, then swap on individual u32s */
870 u.val64 = bswap_64(u.val64);
871 u.val32[0] = bswap_32(u.val32[0]);
872 u.val32[1] = bswap_32(u.val32[1]);
875 if (sample_overlap(event, array, sizeof(u32)))
878 data->raw_size = u.val32[0];
879 pdata = (void *) array + sizeof(u32);
881 if (sample_overlap(event, pdata, data->raw_size))
884 data->raw_data = (void *) pdata;
886 array = (void *)array + data->raw_size + sizeof(u32);
889 if (type & PERF_SAMPLE_BRANCH_STACK) {
892 data->branch_stack = (struct branch_stack *)array;
895 sz = data->branch_stack->nr * sizeof(struct branch_entry);
900 if (type & PERF_SAMPLE_REGS_USER) {
901 /* First u64 tells us if we have any regs in sample. */
902 u64 avail = *array++;
905 data->user_regs.regs = (u64 *)array;
906 array += hweight_long(regs_user);
910 if (type & PERF_SAMPLE_STACK_USER) {
913 data->user_stack.offset = ((char *)(array - 1)
917 data->user_stack.size = 0;
919 data->user_stack.data = (char *)array;
920 array += size / sizeof(*array);
921 data->user_stack.size = *array;
928 int perf_event__synthesize_sample(union perf_event *event, u64 type,
929 const struct perf_sample *sample,
935 * used for cross-endian analysis. See git commit 65014ab3
936 * for why this goofiness is needed.
940 array = event->sample.array;
942 if (type & PERF_SAMPLE_IP) {
943 event->ip.ip = sample->ip;
947 if (type & PERF_SAMPLE_TID) {
948 u.val32[0] = sample->pid;
949 u.val32[1] = sample->tid;
952 * Inverse of what is done in perf_evsel__parse_sample
954 u.val32[0] = bswap_32(u.val32[0]);
955 u.val32[1] = bswap_32(u.val32[1]);
956 u.val64 = bswap_64(u.val64);
963 if (type & PERF_SAMPLE_TIME) {
964 *array = sample->time;
968 if (type & PERF_SAMPLE_ADDR) {
969 *array = sample->addr;
973 if (type & PERF_SAMPLE_ID) {
978 if (type & PERF_SAMPLE_STREAM_ID) {
979 *array = sample->stream_id;
983 if (type & PERF_SAMPLE_CPU) {
984 u.val32[0] = sample->cpu;
987 * Inverse of what is done in perf_evsel__parse_sample
989 u.val32[0] = bswap_32(u.val32[0]);
990 u.val64 = bswap_64(u.val64);
996 if (type & PERF_SAMPLE_PERIOD) {
997 *array = sample->period;