2 * intel_pt.c: Intel Processor Trace support
3 * Copyright (c) 2013-2015, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 #include <linux/kernel.h>
21 #include <linux/types.h>
36 #include "thread-stack.h"
38 #include "callchain.h"
46 #include "intel-pt-decoder/intel-pt-log.h"
47 #include "intel-pt-decoder/intel-pt-decoder.h"
48 #include "intel-pt-decoder/intel-pt-insn-decoder.h"
49 #include "intel-pt-decoder/intel-pt-pkt-decoder.h"
51 #define MAX_TIMESTAMP (~0ULL)
54 struct auxtrace auxtrace;
55 struct auxtrace_queues queues;
56 struct auxtrace_heap heap;
58 struct perf_session *session;
59 struct machine *machine;
60 struct perf_evsel *switch_evsel;
61 struct thread *unknown_thread;
62 bool timeless_decoding;
71 int have_sched_switch;
77 struct perf_tsc_conversion tc;
78 bool cap_user_time_zero;
80 struct itrace_synth_opts synth_opts;
82 bool sample_instructions;
83 u64 instructions_sample_type;
88 u64 branches_sample_type;
91 bool sample_transactions;
92 u64 transactions_sample_type;
96 u64 ptwrites_sample_type;
99 bool sample_pwr_events;
100 u64 pwr_events_sample_type;
107 bool synth_needs_swap;
116 unsigned max_non_turbo_ratio;
119 unsigned long num_events;
122 struct addr_filters filts;
126 INTEL_PT_SS_NOT_TRACING,
129 INTEL_PT_SS_EXPECTING_SWITCH_EVENT,
130 INTEL_PT_SS_EXPECTING_SWITCH_IP,
133 struct intel_pt_queue {
135 unsigned int queue_nr;
136 struct auxtrace_buffer *buffer;
138 const struct intel_pt_state *state;
139 struct ip_callchain *chain;
140 struct branch_stack *last_branch;
141 struct branch_stack *last_branch_rb;
142 size_t last_branch_pos;
143 union perf_event *event_buf;
146 bool step_through_buffers;
147 bool use_buffer_pid_tid;
153 struct thread *thread;
161 char insn[INTEL_PT_INSN_BUF_SZ];
164 static void intel_pt_dump(struct intel_pt *pt __maybe_unused,
165 unsigned char *buf, size_t len)
167 struct intel_pt_pkt packet;
170 char desc[INTEL_PT_PKT_DESC_MAX];
171 const char *color = PERF_COLOR_BLUE;
173 color_fprintf(stdout, color,
174 ". ... Intel Processor Trace data: size %zu bytes\n",
178 ret = intel_pt_get_packet(buf, len, &packet);
184 color_fprintf(stdout, color, " %08x: ", pos);
185 for (i = 0; i < pkt_len; i++)
186 color_fprintf(stdout, color, " %02x", buf[i]);
188 color_fprintf(stdout, color, " ");
190 ret = intel_pt_pkt_desc(&packet, desc,
191 INTEL_PT_PKT_DESC_MAX);
193 color_fprintf(stdout, color, " %s\n", desc);
195 color_fprintf(stdout, color, " Bad packet!\n");
203 static void intel_pt_dump_event(struct intel_pt *pt, unsigned char *buf,
207 intel_pt_dump(pt, buf, len);
210 static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a,
211 struct auxtrace_buffer *b)
213 bool consecutive = false;
216 start = intel_pt_find_overlap(a->data, a->size, b->data, b->size,
217 pt->have_tsc, &consecutive);
220 b->use_size = b->data + b->size - start;
222 if (b->use_size && consecutive)
223 b->consecutive = true;
227 static void intel_pt_use_buffer_pid_tid(struct intel_pt_queue *ptq,
228 struct auxtrace_queue *queue,
229 struct auxtrace_buffer *buffer)
231 if (queue->cpu == -1 && buffer->cpu != -1)
232 ptq->cpu = buffer->cpu;
234 ptq->pid = buffer->pid;
235 ptq->tid = buffer->tid;
237 intel_pt_log("queue %u cpu %d pid %d tid %d\n",
238 ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
240 thread__zput(ptq->thread);
242 if (ptq->tid != -1) {
244 ptq->thread = machine__findnew_thread(ptq->pt->machine,
248 ptq->thread = machine__find_thread(ptq->pt->machine, -1,
253 /* This function assumes data is processed sequentially only */
254 static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data)
256 struct intel_pt_queue *ptq = data;
257 struct auxtrace_buffer *buffer = ptq->buffer, *old_buffer = buffer;
258 struct auxtrace_queue *queue;
265 queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
267 buffer = auxtrace_buffer__next(queue, buffer);
270 auxtrace_buffer__drop_data(old_buffer);
275 ptq->buffer = buffer;
278 int fd = perf_data_file__fd(ptq->pt->session->file);
280 buffer->data = auxtrace_buffer__get_data(buffer, fd);
285 if (ptq->pt->snapshot_mode && !buffer->consecutive && old_buffer &&
286 intel_pt_do_fix_overlap(ptq->pt, old_buffer, buffer))
289 if (buffer->use_data) {
290 b->len = buffer->use_size;
291 b->buf = buffer->use_data;
293 b->len = buffer->size;
294 b->buf = buffer->data;
296 b->ref_timestamp = buffer->reference;
299 * If in snapshot mode and the buffer has no usable data, get next
300 * buffer and again check overlap against old_buffer.
302 if (ptq->pt->snapshot_mode && !b->len)
306 auxtrace_buffer__drop_data(old_buffer);
308 if (!old_buffer || ptq->pt->sampling_mode || (ptq->pt->snapshot_mode &&
309 !buffer->consecutive)) {
310 b->consecutive = false;
311 b->trace_nr = buffer->buffer_nr + 1;
313 b->consecutive = true;
316 if (ptq->use_buffer_pid_tid && (ptq->pid != buffer->pid ||
317 ptq->tid != buffer->tid))
318 intel_pt_use_buffer_pid_tid(ptq, queue, buffer);
320 if (ptq->step_through_buffers)
324 return intel_pt_get_trace(b, data);
329 struct intel_pt_cache_entry {
330 struct auxtrace_cache_entry entry;
333 enum intel_pt_insn_op op;
334 enum intel_pt_insn_branch branch;
337 char insn[INTEL_PT_INSN_BUF_SZ];
340 static int intel_pt_config_div(const char *var, const char *value, void *data)
345 if (!strcmp(var, "intel-pt.cache-divisor")) {
346 val = strtol(value, NULL, 0);
347 if (val > 0 && val <= INT_MAX)
354 static int intel_pt_cache_divisor(void)
361 perf_config(intel_pt_config_div, &d);
369 static unsigned int intel_pt_cache_size(struct dso *dso,
370 struct machine *machine)
374 size = dso__data_size(dso, machine);
375 size /= intel_pt_cache_divisor();
378 if (size > (1 << 21))
380 return 32 - __builtin_clz(size);
383 static struct auxtrace_cache *intel_pt_cache(struct dso *dso,
384 struct machine *machine)
386 struct auxtrace_cache *c;
389 if (dso->auxtrace_cache)
390 return dso->auxtrace_cache;
392 bits = intel_pt_cache_size(dso, machine);
394 /* Ignoring cache creation failure */
395 c = auxtrace_cache__new(bits, sizeof(struct intel_pt_cache_entry), 200);
397 dso->auxtrace_cache = c;
402 static int intel_pt_cache_add(struct dso *dso, struct machine *machine,
403 u64 offset, u64 insn_cnt, u64 byte_cnt,
404 struct intel_pt_insn *intel_pt_insn)
406 struct auxtrace_cache *c = intel_pt_cache(dso, machine);
407 struct intel_pt_cache_entry *e;
413 e = auxtrace_cache__alloc_entry(c);
417 e->insn_cnt = insn_cnt;
418 e->byte_cnt = byte_cnt;
419 e->op = intel_pt_insn->op;
420 e->branch = intel_pt_insn->branch;
421 e->length = intel_pt_insn->length;
422 e->rel = intel_pt_insn->rel;
423 memcpy(e->insn, intel_pt_insn->buf, INTEL_PT_INSN_BUF_SZ);
425 err = auxtrace_cache__add(c, offset, &e->entry);
427 auxtrace_cache__free_entry(c, e);
432 static struct intel_pt_cache_entry *
433 intel_pt_cache_lookup(struct dso *dso, struct machine *machine, u64 offset)
435 struct auxtrace_cache *c = intel_pt_cache(dso, machine);
440 return auxtrace_cache__lookup(dso->auxtrace_cache, offset);
443 static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
444 uint64_t *insn_cnt_ptr, uint64_t *ip,
445 uint64_t to_ip, uint64_t max_insn_cnt,
448 struct intel_pt_queue *ptq = data;
449 struct machine *machine = ptq->pt->machine;
450 struct thread *thread;
451 struct addr_location al;
452 unsigned char buf[INTEL_PT_INSN_BUF_SZ];
456 u64 offset, start_offset, start_ip;
460 intel_pt_insn->length = 0;
462 if (to_ip && *ip == to_ip)
465 if (*ip >= ptq->pt->kernel_start)
466 cpumode = PERF_RECORD_MISC_KERNEL;
468 cpumode = PERF_RECORD_MISC_USER;
470 thread = ptq->thread;
472 if (cpumode != PERF_RECORD_MISC_KERNEL)
474 thread = ptq->pt->unknown_thread;
478 thread__find_addr_map(thread, cpumode, MAP__FUNCTION, *ip, &al);
479 if (!al.map || !al.map->dso)
482 if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
483 dso__data_status_seen(al.map->dso,
484 DSO_DATA_STATUS_SEEN_ITRACE))
487 offset = al.map->map_ip(al.map, *ip);
489 if (!to_ip && one_map) {
490 struct intel_pt_cache_entry *e;
492 e = intel_pt_cache_lookup(al.map->dso, machine, offset);
494 (!max_insn_cnt || e->insn_cnt <= max_insn_cnt)) {
495 *insn_cnt_ptr = e->insn_cnt;
497 intel_pt_insn->op = e->op;
498 intel_pt_insn->branch = e->branch;
499 intel_pt_insn->length = e->length;
500 intel_pt_insn->rel = e->rel;
501 memcpy(intel_pt_insn->buf, e->insn,
502 INTEL_PT_INSN_BUF_SZ);
503 intel_pt_log_insn_no_data(intel_pt_insn, *ip);
508 start_offset = offset;
511 /* Load maps to ensure dso->is_64_bit has been updated */
514 x86_64 = al.map->dso->is_64_bit;
517 len = dso__data_read_offset(al.map->dso, machine,
519 INTEL_PT_INSN_BUF_SZ);
523 if (intel_pt_get_insn(buf, len, x86_64, intel_pt_insn))
526 intel_pt_log_insn(intel_pt_insn, *ip);
530 if (intel_pt_insn->branch != INTEL_PT_BR_NO_BRANCH)
533 if (max_insn_cnt && insn_cnt >= max_insn_cnt)
536 *ip += intel_pt_insn->length;
538 if (to_ip && *ip == to_ip)
541 if (*ip >= al.map->end)
544 offset += intel_pt_insn->length;
549 *insn_cnt_ptr = insn_cnt;
555 * Didn't lookup in the 'to_ip' case, so do it now to prevent duplicate
559 struct intel_pt_cache_entry *e;
561 e = intel_pt_cache_lookup(al.map->dso, machine, start_offset);
566 /* Ignore cache errors */
567 intel_pt_cache_add(al.map->dso, machine, start_offset, insn_cnt,
568 *ip - start_ip, intel_pt_insn);
573 *insn_cnt_ptr = insn_cnt;
577 static bool intel_pt_match_pgd_ip(struct intel_pt *pt, uint64_t ip,
578 uint64_t offset, const char *filename)
580 struct addr_filter *filt;
581 bool have_filter = false;
582 bool hit_tracestop = false;
583 bool hit_filter = false;
585 list_for_each_entry(filt, &pt->filts.head, list) {
589 if ((filename && !filt->filename) ||
590 (!filename && filt->filename) ||
591 (filename && strcmp(filename, filt->filename)))
594 if (!(offset >= filt->addr && offset < filt->addr + filt->size))
597 intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s hit filter: %s offset %#"PRIx64" size %#"PRIx64"\n",
598 ip, offset, filename ? filename : "[kernel]",
599 filt->start ? "filter" : "stop",
600 filt->addr, filt->size);
605 hit_tracestop = true;
608 if (!hit_tracestop && !hit_filter)
609 intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s is not in a filter region\n",
610 ip, offset, filename ? filename : "[kernel]");
612 return hit_tracestop || (have_filter && !hit_filter);
615 static int __intel_pt_pgd_ip(uint64_t ip, void *data)
617 struct intel_pt_queue *ptq = data;
618 struct thread *thread;
619 struct addr_location al;
623 if (ip >= ptq->pt->kernel_start)
624 return intel_pt_match_pgd_ip(ptq->pt, ip, ip, NULL);
626 cpumode = PERF_RECORD_MISC_USER;
628 thread = ptq->thread;
632 thread__find_addr_map(thread, cpumode, MAP__FUNCTION, ip, &al);
633 if (!al.map || !al.map->dso)
636 offset = al.map->map_ip(al.map, ip);
638 return intel_pt_match_pgd_ip(ptq->pt, ip, offset,
639 al.map->dso->long_name);
642 static bool intel_pt_pgd_ip(uint64_t ip, void *data)
644 return __intel_pt_pgd_ip(ip, data) > 0;
647 static bool intel_pt_get_config(struct intel_pt *pt,
648 struct perf_event_attr *attr, u64 *config)
650 if (attr->type == pt->pmu_type) {
652 *config = attr->config;
659 static bool intel_pt_exclude_kernel(struct intel_pt *pt)
661 struct perf_evsel *evsel;
663 evlist__for_each_entry(pt->session->evlist, evsel) {
664 if (intel_pt_get_config(pt, &evsel->attr, NULL) &&
665 !evsel->attr.exclude_kernel)
671 static bool intel_pt_return_compression(struct intel_pt *pt)
673 struct perf_evsel *evsel;
676 if (!pt->noretcomp_bit)
679 evlist__for_each_entry(pt->session->evlist, evsel) {
680 if (intel_pt_get_config(pt, &evsel->attr, &config) &&
681 (config & pt->noretcomp_bit))
687 static bool intel_pt_branch_enable(struct intel_pt *pt)
689 struct perf_evsel *evsel;
692 evlist__for_each_entry(pt->session->evlist, evsel) {
693 if (intel_pt_get_config(pt, &evsel->attr, &config) &&
694 (config & 1) && !(config & 0x2000))
700 static unsigned int intel_pt_mtc_period(struct intel_pt *pt)
702 struct perf_evsel *evsel;
706 if (!pt->mtc_freq_bits)
709 for (shift = 0, config = pt->mtc_freq_bits; !(config & 1); shift++)
712 evlist__for_each_entry(pt->session->evlist, evsel) {
713 if (intel_pt_get_config(pt, &evsel->attr, &config))
714 return (config & pt->mtc_freq_bits) >> shift;
719 static bool intel_pt_timeless_decoding(struct intel_pt *pt)
721 struct perf_evsel *evsel;
722 bool timeless_decoding = true;
725 if (!pt->tsc_bit || !pt->cap_user_time_zero)
728 evlist__for_each_entry(pt->session->evlist, evsel) {
729 if (!(evsel->attr.sample_type & PERF_SAMPLE_TIME))
731 if (intel_pt_get_config(pt, &evsel->attr, &config)) {
732 if (config & pt->tsc_bit)
733 timeless_decoding = false;
738 return timeless_decoding;
741 static bool intel_pt_tracing_kernel(struct intel_pt *pt)
743 struct perf_evsel *evsel;
745 evlist__for_each_entry(pt->session->evlist, evsel) {
746 if (intel_pt_get_config(pt, &evsel->attr, NULL) &&
747 !evsel->attr.exclude_kernel)
753 static bool intel_pt_have_tsc(struct intel_pt *pt)
755 struct perf_evsel *evsel;
756 bool have_tsc = false;
762 evlist__for_each_entry(pt->session->evlist, evsel) {
763 if (intel_pt_get_config(pt, &evsel->attr, &config)) {
764 if (config & pt->tsc_bit)
773 static u64 intel_pt_ns_to_ticks(const struct intel_pt *pt, u64 ns)
777 quot = ns / pt->tc.time_mult;
778 rem = ns % pt->tc.time_mult;
779 return (quot << pt->tc.time_shift) + (rem << pt->tc.time_shift) /
783 static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
784 unsigned int queue_nr)
786 struct intel_pt_params params = { .get_trace = 0, };
787 struct intel_pt_queue *ptq;
789 ptq = zalloc(sizeof(struct intel_pt_queue));
793 if (pt->synth_opts.callchain) {
794 size_t sz = sizeof(struct ip_callchain);
796 sz += pt->synth_opts.callchain_sz * sizeof(u64);
797 ptq->chain = zalloc(sz);
802 if (pt->synth_opts.last_branch) {
803 size_t sz = sizeof(struct branch_stack);
805 sz += pt->synth_opts.last_branch_sz *
806 sizeof(struct branch_entry);
807 ptq->last_branch = zalloc(sz);
808 if (!ptq->last_branch)
810 ptq->last_branch_rb = zalloc(sz);
811 if (!ptq->last_branch_rb)
815 ptq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
820 ptq->queue_nr = queue_nr;
821 ptq->exclude_kernel = intel_pt_exclude_kernel(pt);
827 params.get_trace = intel_pt_get_trace;
828 params.walk_insn = intel_pt_walk_next_insn;
830 params.return_compression = intel_pt_return_compression(pt);
831 params.branch_enable = intel_pt_branch_enable(pt);
832 params.max_non_turbo_ratio = pt->max_non_turbo_ratio;
833 params.mtc_period = intel_pt_mtc_period(pt);
834 params.tsc_ctc_ratio_n = pt->tsc_ctc_ratio_n;
835 params.tsc_ctc_ratio_d = pt->tsc_ctc_ratio_d;
837 if (pt->filts.cnt > 0)
838 params.pgd_ip = intel_pt_pgd_ip;
840 if (pt->synth_opts.instructions) {
841 if (pt->synth_opts.period) {
842 switch (pt->synth_opts.period_type) {
843 case PERF_ITRACE_PERIOD_INSTRUCTIONS:
845 INTEL_PT_PERIOD_INSTRUCTIONS;
846 params.period = pt->synth_opts.period;
848 case PERF_ITRACE_PERIOD_TICKS:
849 params.period_type = INTEL_PT_PERIOD_TICKS;
850 params.period = pt->synth_opts.period;
852 case PERF_ITRACE_PERIOD_NANOSECS:
853 params.period_type = INTEL_PT_PERIOD_TICKS;
854 params.period = intel_pt_ns_to_ticks(pt,
855 pt->synth_opts.period);
862 if (!params.period) {
863 params.period_type = INTEL_PT_PERIOD_INSTRUCTIONS;
868 ptq->decoder = intel_pt_decoder_new(¶ms);
875 zfree(&ptq->event_buf);
876 zfree(&ptq->last_branch);
877 zfree(&ptq->last_branch_rb);
883 static void intel_pt_free_queue(void *priv)
885 struct intel_pt_queue *ptq = priv;
889 thread__zput(ptq->thread);
890 intel_pt_decoder_free(ptq->decoder);
891 zfree(&ptq->event_buf);
892 zfree(&ptq->last_branch);
893 zfree(&ptq->last_branch_rb);
898 static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt,
899 struct auxtrace_queue *queue)
901 struct intel_pt_queue *ptq = queue->priv;
903 if (queue->tid == -1 || pt->have_sched_switch) {
904 ptq->tid = machine__get_current_tid(pt->machine, ptq->cpu);
905 thread__zput(ptq->thread);
908 if (!ptq->thread && ptq->tid != -1)
909 ptq->thread = machine__find_thread(pt->machine, -1, ptq->tid);
912 ptq->pid = ptq->thread->pid_;
913 if (queue->cpu == -1)
914 ptq->cpu = ptq->thread->cpu;
918 static void intel_pt_sample_flags(struct intel_pt_queue *ptq)
920 if (ptq->state->flags & INTEL_PT_ABORT_TX) {
921 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT;
922 } else if (ptq->state->flags & INTEL_PT_ASYNC) {
923 if (ptq->state->to_ip)
924 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
926 PERF_IP_FLAG_INTERRUPT;
928 ptq->flags = PERF_IP_FLAG_BRANCH |
929 PERF_IP_FLAG_TRACE_END;
932 if (ptq->state->from_ip)
933 ptq->flags = intel_pt_insn_type(ptq->state->insn_op);
935 ptq->flags = PERF_IP_FLAG_BRANCH |
936 PERF_IP_FLAG_TRACE_BEGIN;
937 if (ptq->state->flags & INTEL_PT_IN_TX)
938 ptq->flags |= PERF_IP_FLAG_IN_TX;
939 ptq->insn_len = ptq->state->insn_len;
940 memcpy(ptq->insn, ptq->state->insn, INTEL_PT_INSN_BUF_SZ);
944 static int intel_pt_setup_queue(struct intel_pt *pt,
945 struct auxtrace_queue *queue,
946 unsigned int queue_nr)
948 struct intel_pt_queue *ptq = queue->priv;
950 if (list_empty(&queue->head))
954 ptq = intel_pt_alloc_queue(pt, queue_nr);
959 if (queue->cpu != -1)
960 ptq->cpu = queue->cpu;
961 ptq->tid = queue->tid;
963 if (pt->sampling_mode) {
964 if (pt->timeless_decoding)
965 ptq->step_through_buffers = true;
966 if (pt->timeless_decoding || !pt->have_sched_switch)
967 ptq->use_buffer_pid_tid = true;
970 ptq->sync_switch = pt->sync_switch;
974 (!ptq->sync_switch ||
975 ptq->switch_state != INTEL_PT_SS_EXPECTING_SWITCH_EVENT)) {
976 const struct intel_pt_state *state;
979 if (pt->timeless_decoding)
982 intel_pt_log("queue %u getting timestamp\n", queue_nr);
983 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
984 queue_nr, ptq->cpu, ptq->pid, ptq->tid);
986 state = intel_pt_decode(ptq->decoder);
988 if (state->err == INTEL_PT_ERR_NODATA) {
989 intel_pt_log("queue %u has no timestamp\n",
995 if (state->timestamp)
999 ptq->timestamp = state->timestamp;
1000 intel_pt_log("queue %u timestamp 0x%" PRIx64 "\n",
1001 queue_nr, ptq->timestamp);
1003 ptq->have_sample = true;
1004 intel_pt_sample_flags(ptq);
1005 ret = auxtrace_heap__add(&pt->heap, queue_nr, ptq->timestamp);
1008 ptq->on_heap = true;
1014 static int intel_pt_setup_queues(struct intel_pt *pt)
1019 for (i = 0; i < pt->queues.nr_queues; i++) {
1020 ret = intel_pt_setup_queue(pt, &pt->queues.queue_array[i], i);
1027 static inline void intel_pt_copy_last_branch_rb(struct intel_pt_queue *ptq)
1029 struct branch_stack *bs_src = ptq->last_branch_rb;
1030 struct branch_stack *bs_dst = ptq->last_branch;
1033 bs_dst->nr = bs_src->nr;
1038 nr = ptq->pt->synth_opts.last_branch_sz - ptq->last_branch_pos;
1039 memcpy(&bs_dst->entries[0],
1040 &bs_src->entries[ptq->last_branch_pos],
1041 sizeof(struct branch_entry) * nr);
1043 if (bs_src->nr >= ptq->pt->synth_opts.last_branch_sz) {
1044 memcpy(&bs_dst->entries[nr],
1045 &bs_src->entries[0],
1046 sizeof(struct branch_entry) * ptq->last_branch_pos);
1050 static inline void intel_pt_reset_last_branch_rb(struct intel_pt_queue *ptq)
1052 ptq->last_branch_pos = 0;
1053 ptq->last_branch_rb->nr = 0;
1056 static void intel_pt_update_last_branch_rb(struct intel_pt_queue *ptq)
1058 const struct intel_pt_state *state = ptq->state;
1059 struct branch_stack *bs = ptq->last_branch_rb;
1060 struct branch_entry *be;
1062 if (!ptq->last_branch_pos)
1063 ptq->last_branch_pos = ptq->pt->synth_opts.last_branch_sz;
1065 ptq->last_branch_pos -= 1;
1067 be = &bs->entries[ptq->last_branch_pos];
1068 be->from = state->from_ip;
1069 be->to = state->to_ip;
1070 be->flags.abort = !!(state->flags & INTEL_PT_ABORT_TX);
1071 be->flags.in_tx = !!(state->flags & INTEL_PT_IN_TX);
1072 /* No support for mispredict */
1073 be->flags.mispred = ptq->pt->mispred_all;
1075 if (bs->nr < ptq->pt->synth_opts.last_branch_sz)
1079 static inline bool intel_pt_skip_event(struct intel_pt *pt)
1081 return pt->synth_opts.initial_skip &&
1082 pt->num_events++ < pt->synth_opts.initial_skip;
1085 static void intel_pt_prep_b_sample(struct intel_pt *pt,
1086 struct intel_pt_queue *ptq,
1087 union perf_event *event,
1088 struct perf_sample *sample)
1090 event->sample.header.type = PERF_RECORD_SAMPLE;
1091 event->sample.header.misc = PERF_RECORD_MISC_USER;
1092 event->sample.header.size = sizeof(struct perf_event_header);
1094 if (!pt->timeless_decoding)
1095 sample->time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
1097 sample->cpumode = PERF_RECORD_MISC_USER;
1098 sample->ip = ptq->state->from_ip;
1099 sample->pid = ptq->pid;
1100 sample->tid = ptq->tid;
1101 sample->addr = ptq->state->to_ip;
1103 sample->cpu = ptq->cpu;
1104 sample->flags = ptq->flags;
1105 sample->insn_len = ptq->insn_len;
1106 memcpy(sample->insn, ptq->insn, INTEL_PT_INSN_BUF_SZ);
1109 static int intel_pt_inject_event(union perf_event *event,
1110 struct perf_sample *sample, u64 type,
1113 event->header.size = perf_event__sample_event_size(sample, type, 0);
1114 return perf_event__synthesize_sample(event, type, 0, sample, swapped);
1117 static inline int intel_pt_opt_inject(struct intel_pt *pt,
1118 union perf_event *event,
1119 struct perf_sample *sample, u64 type)
1121 if (!pt->synth_opts.inject)
1124 return intel_pt_inject_event(event, sample, type, pt->synth_needs_swap);
1127 static int intel_pt_deliver_synth_b_event(struct intel_pt *pt,
1128 union perf_event *event,
1129 struct perf_sample *sample, u64 type)
1133 ret = intel_pt_opt_inject(pt, event, sample, type);
1137 ret = perf_session__deliver_synth_event(pt->session, event, sample);
1139 pr_err("Intel PT: failed to deliver event, error %d\n", ret);
1144 static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
1146 struct intel_pt *pt = ptq->pt;
1147 union perf_event *event = ptq->event_buf;
1148 struct perf_sample sample = { .ip = 0, };
1149 struct dummy_branch_stack {
1151 struct branch_entry entries;
1154 if (pt->branches_filter && !(pt->branches_filter & ptq->flags))
1157 if (intel_pt_skip_event(pt))
1160 intel_pt_prep_b_sample(pt, ptq, event, &sample);
1162 sample.id = ptq->pt->branches_id;
1163 sample.stream_id = ptq->pt->branches_id;
1166 * perf report cannot handle events without a branch stack when using
1167 * SORT_MODE__BRANCH so make a dummy one.
1169 if (pt->synth_opts.last_branch && sort__mode == SORT_MODE__BRANCH) {
1170 dummy_bs = (struct dummy_branch_stack){
1177 sample.branch_stack = (struct branch_stack *)&dummy_bs;
1180 return intel_pt_deliver_synth_b_event(pt, event, &sample,
1181 pt->branches_sample_type);
1184 static void intel_pt_prep_sample(struct intel_pt *pt,
1185 struct intel_pt_queue *ptq,
1186 union perf_event *event,
1187 struct perf_sample *sample)
1189 intel_pt_prep_b_sample(pt, ptq, event, sample);
1191 if (pt->synth_opts.callchain) {
1192 thread_stack__sample(ptq->thread, ptq->chain,
1193 pt->synth_opts.callchain_sz, sample->ip);
1194 sample->callchain = ptq->chain;
1197 if (pt->synth_opts.last_branch) {
1198 intel_pt_copy_last_branch_rb(ptq);
1199 sample->branch_stack = ptq->last_branch;
1203 static inline int intel_pt_deliver_synth_event(struct intel_pt *pt,
1204 struct intel_pt_queue *ptq,
1205 union perf_event *event,
1206 struct perf_sample *sample,
1211 ret = intel_pt_deliver_synth_b_event(pt, event, sample, type);
1213 if (pt->synth_opts.last_branch)
1214 intel_pt_reset_last_branch_rb(ptq);
1219 static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq)
1221 struct intel_pt *pt = ptq->pt;
1222 union perf_event *event = ptq->event_buf;
1223 struct perf_sample sample = { .ip = 0, };
1225 if (intel_pt_skip_event(pt))
1228 intel_pt_prep_sample(pt, ptq, event, &sample);
1230 sample.id = ptq->pt->instructions_id;
1231 sample.stream_id = ptq->pt->instructions_id;
1232 sample.period = ptq->state->tot_insn_cnt - ptq->last_insn_cnt;
1234 ptq->last_insn_cnt = ptq->state->tot_insn_cnt;
1236 return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1237 pt->instructions_sample_type);
1240 static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
1242 struct intel_pt *pt = ptq->pt;
1243 union perf_event *event = ptq->event_buf;
1244 struct perf_sample sample = { .ip = 0, };
1246 if (intel_pt_skip_event(pt))
1249 intel_pt_prep_sample(pt, ptq, event, &sample);
1251 sample.id = ptq->pt->transactions_id;
1252 sample.stream_id = ptq->pt->transactions_id;
1254 return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1255 pt->transactions_sample_type);
1258 static void intel_pt_prep_p_sample(struct intel_pt *pt,
1259 struct intel_pt_queue *ptq,
1260 union perf_event *event,
1261 struct perf_sample *sample)
1263 intel_pt_prep_sample(pt, ptq, event, sample);
1266 * Zero IP is used to mean "trace start" but that is not the case for
1267 * power or PTWRITE events with no IP, so clear the flags.
1273 static int intel_pt_synth_ptwrite_sample(struct intel_pt_queue *ptq)
1275 struct intel_pt *pt = ptq->pt;
1276 union perf_event *event = ptq->event_buf;
1277 struct perf_sample sample = { .ip = 0, };
1278 struct perf_synth_intel_ptwrite raw;
1280 if (intel_pt_skip_event(pt))
1283 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1285 sample.id = ptq->pt->ptwrites_id;
1286 sample.stream_id = ptq->pt->ptwrites_id;
1289 raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
1290 raw.payload = cpu_to_le64(ptq->state->ptw_payload);
1292 sample.raw_size = perf_synth__raw_size(raw);
1293 sample.raw_data = perf_synth__raw_data(&raw);
1295 return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1296 pt->ptwrites_sample_type);
1299 static int intel_pt_synth_cbr_sample(struct intel_pt_queue *ptq)
1301 struct intel_pt *pt = ptq->pt;
1302 union perf_event *event = ptq->event_buf;
1303 struct perf_sample sample = { .ip = 0, };
1304 struct perf_synth_intel_cbr raw;
1307 if (intel_pt_skip_event(pt))
1310 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1312 sample.id = ptq->pt->cbr_id;
1313 sample.stream_id = ptq->pt->cbr_id;
1315 flags = (u16)ptq->state->cbr_payload | (pt->max_non_turbo_ratio << 16);
1316 raw.flags = cpu_to_le32(flags);
1317 raw.freq = cpu_to_le32(raw.cbr * pt->cbr2khz);
1320 sample.raw_size = perf_synth__raw_size(raw);
1321 sample.raw_data = perf_synth__raw_data(&raw);
1323 return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1324 pt->pwr_events_sample_type);
1327 static int intel_pt_synth_mwait_sample(struct intel_pt_queue *ptq)
1329 struct intel_pt *pt = ptq->pt;
1330 union perf_event *event = ptq->event_buf;
1331 struct perf_sample sample = { .ip = 0, };
1332 struct perf_synth_intel_mwait raw;
1334 if (intel_pt_skip_event(pt))
1337 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1339 sample.id = ptq->pt->mwait_id;
1340 sample.stream_id = ptq->pt->mwait_id;
1343 raw.payload = cpu_to_le64(ptq->state->mwait_payload);
1345 sample.raw_size = perf_synth__raw_size(raw);
1346 sample.raw_data = perf_synth__raw_data(&raw);
1348 return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1349 pt->pwr_events_sample_type);
1352 static int intel_pt_synth_pwre_sample(struct intel_pt_queue *ptq)
1354 struct intel_pt *pt = ptq->pt;
1355 union perf_event *event = ptq->event_buf;
1356 struct perf_sample sample = { .ip = 0, };
1357 struct perf_synth_intel_pwre raw;
1359 if (intel_pt_skip_event(pt))
1362 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1364 sample.id = ptq->pt->pwre_id;
1365 sample.stream_id = ptq->pt->pwre_id;
1368 raw.payload = cpu_to_le64(ptq->state->pwre_payload);
1370 sample.raw_size = perf_synth__raw_size(raw);
1371 sample.raw_data = perf_synth__raw_data(&raw);
1373 return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1374 pt->pwr_events_sample_type);
1377 static int intel_pt_synth_exstop_sample(struct intel_pt_queue *ptq)
1379 struct intel_pt *pt = ptq->pt;
1380 union perf_event *event = ptq->event_buf;
1381 struct perf_sample sample = { .ip = 0, };
1382 struct perf_synth_intel_exstop raw;
1384 if (intel_pt_skip_event(pt))
1387 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1389 sample.id = ptq->pt->exstop_id;
1390 sample.stream_id = ptq->pt->exstop_id;
1393 raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
1395 sample.raw_size = perf_synth__raw_size(raw);
1396 sample.raw_data = perf_synth__raw_data(&raw);
1398 return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1399 pt->pwr_events_sample_type);
1402 static int intel_pt_synth_pwrx_sample(struct intel_pt_queue *ptq)
1404 struct intel_pt *pt = ptq->pt;
1405 union perf_event *event = ptq->event_buf;
1406 struct perf_sample sample = { .ip = 0, };
1407 struct perf_synth_intel_pwrx raw;
1409 if (intel_pt_skip_event(pt))
1412 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1414 sample.id = ptq->pt->pwrx_id;
1415 sample.stream_id = ptq->pt->pwrx_id;
1418 raw.payload = cpu_to_le64(ptq->state->pwrx_payload);
1420 sample.raw_size = perf_synth__raw_size(raw);
1421 sample.raw_data = perf_synth__raw_data(&raw);
1423 return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1424 pt->pwr_events_sample_type);
1427 static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu,
1428 pid_t pid, pid_t tid, u64 ip)
1430 union perf_event event;
1431 char msg[MAX_AUXTRACE_ERROR_MSG];
1434 intel_pt__strerror(code, msg, MAX_AUXTRACE_ERROR_MSG);
1436 auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
1437 code, cpu, pid, tid, ip, msg);
1439 err = perf_session__deliver_synth_event(pt->session, &event, NULL);
1441 pr_err("Intel Processor Trace: failed to deliver error event, error %d\n",
1447 static int intel_pt_next_tid(struct intel_pt *pt, struct intel_pt_queue *ptq)
1449 struct auxtrace_queue *queue;
1450 pid_t tid = ptq->next_tid;
1456 intel_pt_log("switch: cpu %d tid %d\n", ptq->cpu, tid);
1458 err = machine__set_current_tid(pt->machine, ptq->cpu, -1, tid);
1460 queue = &pt->queues.queue_array[ptq->queue_nr];
1461 intel_pt_set_pid_tid_cpu(pt, queue);
1468 static inline bool intel_pt_is_switch_ip(struct intel_pt_queue *ptq, u64 ip)
1470 struct intel_pt *pt = ptq->pt;
1472 return ip == pt->switch_ip &&
1473 (ptq->flags & PERF_IP_FLAG_BRANCH) &&
1474 !(ptq->flags & (PERF_IP_FLAG_CONDITIONAL | PERF_IP_FLAG_ASYNC |
1475 PERF_IP_FLAG_INTERRUPT | PERF_IP_FLAG_TX_ABORT));
1478 #define INTEL_PT_PWR_EVT (INTEL_PT_MWAIT_OP | INTEL_PT_PWR_ENTRY | \
1479 INTEL_PT_EX_STOP | INTEL_PT_PWR_EXIT | \
1482 static int intel_pt_sample(struct intel_pt_queue *ptq)
1484 const struct intel_pt_state *state = ptq->state;
1485 struct intel_pt *pt = ptq->pt;
1488 if (!ptq->have_sample)
1491 ptq->have_sample = false;
1493 if (pt->sample_pwr_events && (state->type & INTEL_PT_PWR_EVT)) {
1494 if (state->type & INTEL_PT_CBR_CHG) {
1495 err = intel_pt_synth_cbr_sample(ptq);
1499 if (state->type & INTEL_PT_MWAIT_OP) {
1500 err = intel_pt_synth_mwait_sample(ptq);
1504 if (state->type & INTEL_PT_PWR_ENTRY) {
1505 err = intel_pt_synth_pwre_sample(ptq);
1509 if (state->type & INTEL_PT_EX_STOP) {
1510 err = intel_pt_synth_exstop_sample(ptq);
1514 if (state->type & INTEL_PT_PWR_EXIT) {
1515 err = intel_pt_synth_pwrx_sample(ptq);
1521 if (pt->sample_instructions && (state->type & INTEL_PT_INSTRUCTION)) {
1522 err = intel_pt_synth_instruction_sample(ptq);
1527 if (pt->sample_transactions && (state->type & INTEL_PT_TRANSACTION)) {
1528 err = intel_pt_synth_transaction_sample(ptq);
1533 if (pt->sample_ptwrites && (state->type & INTEL_PT_PTW)) {
1534 err = intel_pt_synth_ptwrite_sample(ptq);
1539 if (!(state->type & INTEL_PT_BRANCH))
1542 if (pt->synth_opts.callchain || pt->synth_opts.thread_stack)
1543 thread_stack__event(ptq->thread, ptq->flags, state->from_ip,
1544 state->to_ip, ptq->insn_len,
1547 thread_stack__set_trace_nr(ptq->thread, state->trace_nr);
1549 if (pt->sample_branches) {
1550 err = intel_pt_synth_branch_sample(ptq);
1555 if (pt->synth_opts.last_branch)
1556 intel_pt_update_last_branch_rb(ptq);
1558 if (!ptq->sync_switch)
1561 if (intel_pt_is_switch_ip(ptq, state->to_ip)) {
1562 switch (ptq->switch_state) {
1563 case INTEL_PT_SS_NOT_TRACING:
1564 case INTEL_PT_SS_UNKNOWN:
1565 case INTEL_PT_SS_EXPECTING_SWITCH_IP:
1566 err = intel_pt_next_tid(pt, ptq);
1569 ptq->switch_state = INTEL_PT_SS_TRACING;
1572 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_EVENT;
1575 } else if (!state->to_ip) {
1576 ptq->switch_state = INTEL_PT_SS_NOT_TRACING;
1577 } else if (ptq->switch_state == INTEL_PT_SS_NOT_TRACING) {
1578 ptq->switch_state = INTEL_PT_SS_UNKNOWN;
1579 } else if (ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
1580 state->to_ip == pt->ptss_ip &&
1581 (ptq->flags & PERF_IP_FLAG_CALL)) {
1582 ptq->switch_state = INTEL_PT_SS_TRACING;
1588 static u64 intel_pt_switch_ip(struct intel_pt *pt, u64 *ptss_ip)
1590 struct machine *machine = pt->machine;
1592 struct symbol *sym, *start;
1593 u64 ip, switch_ip = 0;
1599 map = machine__kernel_map(machine);
1606 start = dso__first_symbol(map->dso, MAP__FUNCTION);
1608 for (sym = start; sym; sym = dso__next_symbol(sym)) {
1609 if (sym->binding == STB_GLOBAL &&
1610 !strcmp(sym->name, "__switch_to")) {
1611 ip = map->unmap_ip(map, sym->start);
1612 if (ip >= map->start && ip < map->end) {
1619 if (!switch_ip || !ptss_ip)
1622 if (pt->have_sched_switch == 1)
1623 ptss = "perf_trace_sched_switch";
1625 ptss = "__perf_event_task_sched_out";
1627 for (sym = start; sym; sym = dso__next_symbol(sym)) {
1628 if (!strcmp(sym->name, ptss)) {
1629 ip = map->unmap_ip(map, sym->start);
1630 if (ip >= map->start && ip < map->end) {
1640 static void intel_pt_enable_sync_switch(struct intel_pt *pt)
1644 pt->sync_switch = true;
1646 for (i = 0; i < pt->queues.nr_queues; i++) {
1647 struct auxtrace_queue *queue = &pt->queues.queue_array[i];
1648 struct intel_pt_queue *ptq = queue->priv;
1651 ptq->sync_switch = true;
1655 static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
1657 const struct intel_pt_state *state = ptq->state;
1658 struct intel_pt *pt = ptq->pt;
1661 if (!pt->kernel_start) {
1662 pt->kernel_start = machine__kernel_start(pt->machine);
1663 if (pt->per_cpu_mmaps &&
1664 (pt->have_sched_switch == 1 || pt->have_sched_switch == 3) &&
1665 !pt->timeless_decoding && intel_pt_tracing_kernel(pt) &&
1666 !pt->sampling_mode) {
1667 pt->switch_ip = intel_pt_switch_ip(pt, &pt->ptss_ip);
1668 if (pt->switch_ip) {
1669 intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n",
1670 pt->switch_ip, pt->ptss_ip);
1671 intel_pt_enable_sync_switch(pt);
1676 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
1677 ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
1679 err = intel_pt_sample(ptq);
1683 state = intel_pt_decode(ptq->decoder);
1685 if (state->err == INTEL_PT_ERR_NODATA)
1687 if (ptq->sync_switch &&
1688 state->from_ip >= pt->kernel_start) {
1689 ptq->sync_switch = false;
1690 intel_pt_next_tid(pt, ptq);
1692 if (pt->synth_opts.errors) {
1693 err = intel_pt_synth_error(pt, state->err,
1704 ptq->have_sample = true;
1705 intel_pt_sample_flags(ptq);
1707 /* Use estimated TSC upon return to user space */
1709 (state->from_ip >= pt->kernel_start || !state->from_ip) &&
1710 state->to_ip && state->to_ip < pt->kernel_start) {
1711 intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
1712 state->timestamp, state->est_timestamp);
1713 ptq->timestamp = state->est_timestamp;
1714 /* Use estimated TSC in unknown switch state */
1715 } else if (ptq->sync_switch &&
1716 ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
1717 intel_pt_is_switch_ip(ptq, state->to_ip) &&
1718 ptq->next_tid == -1) {
1719 intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
1720 state->timestamp, state->est_timestamp);
1721 ptq->timestamp = state->est_timestamp;
1722 } else if (state->timestamp > ptq->timestamp) {
1723 ptq->timestamp = state->timestamp;
1726 if (!pt->timeless_decoding && ptq->timestamp >= *timestamp) {
1727 *timestamp = ptq->timestamp;
1734 static inline int intel_pt_update_queues(struct intel_pt *pt)
1736 if (pt->queues.new_data) {
1737 pt->queues.new_data = false;
1738 return intel_pt_setup_queues(pt);
1743 static int intel_pt_process_queues(struct intel_pt *pt, u64 timestamp)
1745 unsigned int queue_nr;
1750 struct auxtrace_queue *queue;
1751 struct intel_pt_queue *ptq;
1753 if (!pt->heap.heap_cnt)
1756 if (pt->heap.heap_array[0].ordinal >= timestamp)
1759 queue_nr = pt->heap.heap_array[0].queue_nr;
1760 queue = &pt->queues.queue_array[queue_nr];
1763 intel_pt_log("queue %u processing 0x%" PRIx64 " to 0x%" PRIx64 "\n",
1764 queue_nr, pt->heap.heap_array[0].ordinal,
1767 auxtrace_heap__pop(&pt->heap);
1769 if (pt->heap.heap_cnt) {
1770 ts = pt->heap.heap_array[0].ordinal + 1;
1777 intel_pt_set_pid_tid_cpu(pt, queue);
1779 ret = intel_pt_run_decoder(ptq, &ts);
1782 auxtrace_heap__add(&pt->heap, queue_nr, ts);
1787 ret = auxtrace_heap__add(&pt->heap, queue_nr, ts);
1791 ptq->on_heap = false;
1798 static int intel_pt_process_timeless_queues(struct intel_pt *pt, pid_t tid,
1801 struct auxtrace_queues *queues = &pt->queues;
1805 for (i = 0; i < queues->nr_queues; i++) {
1806 struct auxtrace_queue *queue = &pt->queues.queue_array[i];
1807 struct intel_pt_queue *ptq = queue->priv;
1809 if (ptq && (tid == -1 || ptq->tid == tid)) {
1811 intel_pt_set_pid_tid_cpu(pt, queue);
1812 intel_pt_run_decoder(ptq, &ts);
1818 static int intel_pt_lost(struct intel_pt *pt, struct perf_sample *sample)
1820 return intel_pt_synth_error(pt, INTEL_PT_ERR_LOST, sample->cpu,
1821 sample->pid, sample->tid, 0);
1824 static struct intel_pt_queue *intel_pt_cpu_to_ptq(struct intel_pt *pt, int cpu)
1828 if (cpu < 0 || !pt->queues.nr_queues)
1831 if ((unsigned)cpu >= pt->queues.nr_queues)
1832 i = pt->queues.nr_queues - 1;
1836 if (pt->queues.queue_array[i].cpu == cpu)
1837 return pt->queues.queue_array[i].priv;
1839 for (j = 0; i > 0; j++) {
1840 if (pt->queues.queue_array[--i].cpu == cpu)
1841 return pt->queues.queue_array[i].priv;
1844 for (; j < pt->queues.nr_queues; j++) {
1845 if (pt->queues.queue_array[j].cpu == cpu)
1846 return pt->queues.queue_array[j].priv;
1852 static int intel_pt_sync_switch(struct intel_pt *pt, int cpu, pid_t tid,
1855 struct intel_pt_queue *ptq;
1858 if (!pt->sync_switch)
1861 ptq = intel_pt_cpu_to_ptq(pt, cpu);
1862 if (!ptq || !ptq->sync_switch)
1865 switch (ptq->switch_state) {
1866 case INTEL_PT_SS_NOT_TRACING:
1869 case INTEL_PT_SS_UNKNOWN:
1870 case INTEL_PT_SS_TRACING:
1871 ptq->next_tid = tid;
1872 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_IP;
1874 case INTEL_PT_SS_EXPECTING_SWITCH_EVENT:
1875 if (!ptq->on_heap) {
1876 ptq->timestamp = perf_time_to_tsc(timestamp,
1878 err = auxtrace_heap__add(&pt->heap, ptq->queue_nr,
1882 ptq->on_heap = true;
1884 ptq->switch_state = INTEL_PT_SS_TRACING;
1886 case INTEL_PT_SS_EXPECTING_SWITCH_IP:
1887 ptq->next_tid = tid;
1888 intel_pt_log("ERROR: cpu %d expecting switch ip\n", cpu);
1897 static int intel_pt_process_switch(struct intel_pt *pt,
1898 struct perf_sample *sample)
1900 struct perf_evsel *evsel;
1904 evsel = perf_evlist__id2evsel(pt->session->evlist, sample->id);
1905 if (evsel != pt->switch_evsel)
1908 tid = perf_evsel__intval(evsel, sample, "next_pid");
1911 intel_pt_log("sched_switch: cpu %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
1912 cpu, tid, sample->time, perf_time_to_tsc(sample->time,
1915 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
1919 return machine__set_current_tid(pt->machine, cpu, -1, tid);
1922 static int intel_pt_context_switch(struct intel_pt *pt, union perf_event *event,
1923 struct perf_sample *sample)
1925 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
1931 if (pt->have_sched_switch == 3) {
1934 if (event->header.type != PERF_RECORD_SWITCH_CPU_WIDE) {
1935 pr_err("Expecting CPU-wide context switch event\n");
1938 pid = event->context_switch.next_prev_pid;
1939 tid = event->context_switch.next_prev_tid;
1948 pr_err("context_switch event has no tid\n");
1952 intel_pt_log("context_switch: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
1953 cpu, pid, tid, sample->time, perf_time_to_tsc(sample->time,
1956 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
1960 return machine__set_current_tid(pt->machine, cpu, pid, tid);
1963 static int intel_pt_process_itrace_start(struct intel_pt *pt,
1964 union perf_event *event,
1965 struct perf_sample *sample)
1967 if (!pt->per_cpu_mmaps)
1970 intel_pt_log("itrace_start: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
1971 sample->cpu, event->itrace_start.pid,
1972 event->itrace_start.tid, sample->time,
1973 perf_time_to_tsc(sample->time, &pt->tc));
1975 return machine__set_current_tid(pt->machine, sample->cpu,
1976 event->itrace_start.pid,
1977 event->itrace_start.tid);
1980 static int intel_pt_process_event(struct perf_session *session,
1981 union perf_event *event,
1982 struct perf_sample *sample,
1983 struct perf_tool *tool)
1985 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
1993 if (!tool->ordered_events) {
1994 pr_err("Intel Processor Trace requires ordered events\n");
1998 if (sample->time && sample->time != (u64)-1)
1999 timestamp = perf_time_to_tsc(sample->time, &pt->tc);
2003 if (timestamp || pt->timeless_decoding) {
2004 err = intel_pt_update_queues(pt);
2009 if (pt->timeless_decoding) {
2010 if (event->header.type == PERF_RECORD_EXIT) {
2011 err = intel_pt_process_timeless_queues(pt,
2015 } else if (timestamp) {
2016 err = intel_pt_process_queues(pt, timestamp);
2021 if (event->header.type == PERF_RECORD_AUX &&
2022 (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) &&
2023 pt->synth_opts.errors) {
2024 err = intel_pt_lost(pt, sample);
2029 if (pt->switch_evsel && event->header.type == PERF_RECORD_SAMPLE)
2030 err = intel_pt_process_switch(pt, sample);
2031 else if (event->header.type == PERF_RECORD_ITRACE_START)
2032 err = intel_pt_process_itrace_start(pt, event, sample);
2033 else if (event->header.type == PERF_RECORD_SWITCH ||
2034 event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)
2035 err = intel_pt_context_switch(pt, event, sample);
2037 intel_pt_log("event %s (%u): cpu %d time %"PRIu64" tsc %#"PRIx64"\n",
2038 perf_event__name(event->header.type), event->header.type,
2039 sample->cpu, sample->time, timestamp);
2044 static int intel_pt_flush(struct perf_session *session, struct perf_tool *tool)
2046 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2053 if (!tool->ordered_events)
2056 ret = intel_pt_update_queues(pt);
2060 if (pt->timeless_decoding)
2061 return intel_pt_process_timeless_queues(pt, -1,
2064 return intel_pt_process_queues(pt, MAX_TIMESTAMP);
2067 static void intel_pt_free_events(struct perf_session *session)
2069 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2071 struct auxtrace_queues *queues = &pt->queues;
2074 for (i = 0; i < queues->nr_queues; i++) {
2075 intel_pt_free_queue(queues->queue_array[i].priv);
2076 queues->queue_array[i].priv = NULL;
2078 intel_pt_log_disable();
2079 auxtrace_queues__free(queues);
2082 static void intel_pt_free(struct perf_session *session)
2084 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2087 auxtrace_heap__free(&pt->heap);
2088 intel_pt_free_events(session);
2089 session->auxtrace = NULL;
2090 thread__put(pt->unknown_thread);
2091 addr_filters__exit(&pt->filts);
2096 static int intel_pt_process_auxtrace_event(struct perf_session *session,
2097 union perf_event *event,
2098 struct perf_tool *tool __maybe_unused)
2100 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2103 if (pt->sampling_mode)
2106 if (!pt->data_queued) {
2107 struct auxtrace_buffer *buffer;
2109 int fd = perf_data_file__fd(session->file);
2112 if (perf_data_file__is_pipe(session->file)) {
2115 data_offset = lseek(fd, 0, SEEK_CUR);
2116 if (data_offset == -1)
2120 err = auxtrace_queues__add_event(&pt->queues, session, event,
2121 data_offset, &buffer);
2125 /* Dump here now we have copied a piped trace out of the pipe */
2127 if (auxtrace_buffer__get_data(buffer, fd)) {
2128 intel_pt_dump_event(pt, buffer->data,
2130 auxtrace_buffer__put_data(buffer);
2138 struct intel_pt_synth {
2139 struct perf_tool dummy_tool;
2140 struct perf_session *session;
2143 static int intel_pt_event_synth(struct perf_tool *tool,
2144 union perf_event *event,
2145 struct perf_sample *sample __maybe_unused,
2146 struct machine *machine __maybe_unused)
2148 struct intel_pt_synth *intel_pt_synth =
2149 container_of(tool, struct intel_pt_synth, dummy_tool);
2151 return perf_session__deliver_synth_event(intel_pt_synth->session, event,
2155 static int intel_pt_synth_event(struct perf_session *session, const char *name,
2156 struct perf_event_attr *attr, u64 id)
2158 struct intel_pt_synth intel_pt_synth;
2161 pr_debug("Synthesizing '%s' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
2162 name, id, (u64)attr->sample_type);
2164 memset(&intel_pt_synth, 0, sizeof(struct intel_pt_synth));
2165 intel_pt_synth.session = session;
2167 err = perf_event__synthesize_attr(&intel_pt_synth.dummy_tool, attr, 1,
2168 &id, intel_pt_event_synth);
2170 pr_err("%s: failed to synthesize '%s' event type\n",
2176 static void intel_pt_set_event_name(struct perf_evlist *evlist, u64 id,
2179 struct perf_evsel *evsel;
2181 evlist__for_each_entry(evlist, evsel) {
2182 if (evsel->id && evsel->id[0] == id) {
2184 zfree(&evsel->name);
2185 evsel->name = strdup(name);
2191 static struct perf_evsel *intel_pt_evsel(struct intel_pt *pt,
2192 struct perf_evlist *evlist)
2194 struct perf_evsel *evsel;
2196 evlist__for_each_entry(evlist, evsel) {
2197 if (evsel->attr.type == pt->pmu_type && evsel->ids)
2204 static int intel_pt_synth_events(struct intel_pt *pt,
2205 struct perf_session *session)
2207 struct perf_evlist *evlist = session->evlist;
2208 struct perf_evsel *evsel = intel_pt_evsel(pt, evlist);
2209 struct perf_event_attr attr;
2214 pr_debug("There are no selected events with Intel Processor Trace data\n");
2218 memset(&attr, 0, sizeof(struct perf_event_attr));
2219 attr.size = sizeof(struct perf_event_attr);
2220 attr.type = PERF_TYPE_HARDWARE;
2221 attr.sample_type = evsel->attr.sample_type & PERF_SAMPLE_MASK;
2222 attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
2224 if (pt->timeless_decoding)
2225 attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
2227 attr.sample_type |= PERF_SAMPLE_TIME;
2228 if (!pt->per_cpu_mmaps)
2229 attr.sample_type &= ~(u64)PERF_SAMPLE_CPU;
2230 attr.exclude_user = evsel->attr.exclude_user;
2231 attr.exclude_kernel = evsel->attr.exclude_kernel;
2232 attr.exclude_hv = evsel->attr.exclude_hv;
2233 attr.exclude_host = evsel->attr.exclude_host;
2234 attr.exclude_guest = evsel->attr.exclude_guest;
2235 attr.sample_id_all = evsel->attr.sample_id_all;
2236 attr.read_format = evsel->attr.read_format;
2238 id = evsel->id[0] + 1000000000;
2242 if (pt->synth_opts.branches) {
2243 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
2244 attr.sample_period = 1;
2245 attr.sample_type |= PERF_SAMPLE_ADDR;
2246 err = intel_pt_synth_event(session, "branches", &attr, id);
2249 pt->sample_branches = true;
2250 pt->branches_sample_type = attr.sample_type;
2251 pt->branches_id = id;
2253 attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR;
2256 if (pt->synth_opts.callchain)
2257 attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
2258 if (pt->synth_opts.last_branch)
2259 attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
2261 if (pt->synth_opts.instructions) {
2262 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
2263 if (pt->synth_opts.period_type == PERF_ITRACE_PERIOD_NANOSECS)
2264 attr.sample_period =
2265 intel_pt_ns_to_ticks(pt, pt->synth_opts.period);
2267 attr.sample_period = pt->synth_opts.period;
2268 err = intel_pt_synth_event(session, "instructions", &attr, id);
2271 pt->sample_instructions = true;
2272 pt->instructions_sample_type = attr.sample_type;
2273 pt->instructions_id = id;
2277 attr.sample_type &= ~(u64)PERF_SAMPLE_PERIOD;
2278 attr.sample_period = 1;
2280 if (pt->synth_opts.transactions) {
2281 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
2282 err = intel_pt_synth_event(session, "transactions", &attr, id);
2285 pt->sample_transactions = true;
2286 pt->transactions_sample_type = attr.sample_type;
2287 pt->transactions_id = id;
2288 intel_pt_set_event_name(evlist, id, "transactions");
2292 attr.type = PERF_TYPE_SYNTH;
2293 attr.sample_type |= PERF_SAMPLE_RAW;
2295 if (pt->synth_opts.ptwrites) {
2296 attr.config = PERF_SYNTH_INTEL_PTWRITE;
2297 err = intel_pt_synth_event(session, "ptwrite", &attr, id);
2300 pt->sample_ptwrites = true;
2301 pt->ptwrites_sample_type = attr.sample_type;
2302 pt->ptwrites_id = id;
2303 intel_pt_set_event_name(evlist, id, "ptwrite");
2307 if (pt->synth_opts.pwr_events) {
2308 pt->sample_pwr_events = true;
2309 pt->pwr_events_sample_type = attr.sample_type;
2311 attr.config = PERF_SYNTH_INTEL_CBR;
2312 err = intel_pt_synth_event(session, "cbr", &attr, id);
2316 intel_pt_set_event_name(evlist, id, "cbr");
2320 if (pt->synth_opts.pwr_events && (evsel->attr.config & 0x10)) {
2321 attr.config = PERF_SYNTH_INTEL_MWAIT;
2322 err = intel_pt_synth_event(session, "mwait", &attr, id);
2326 intel_pt_set_event_name(evlist, id, "mwait");
2329 attr.config = PERF_SYNTH_INTEL_PWRE;
2330 err = intel_pt_synth_event(session, "pwre", &attr, id);
2334 intel_pt_set_event_name(evlist, id, "pwre");
2337 attr.config = PERF_SYNTH_INTEL_EXSTOP;
2338 err = intel_pt_synth_event(session, "exstop", &attr, id);
2342 intel_pt_set_event_name(evlist, id, "exstop");
2345 attr.config = PERF_SYNTH_INTEL_PWRX;
2346 err = intel_pt_synth_event(session, "pwrx", &attr, id);
2350 intel_pt_set_event_name(evlist, id, "pwrx");
2354 pt->synth_needs_swap = evsel->needs_swap;
2359 static struct perf_evsel *intel_pt_find_sched_switch(struct perf_evlist *evlist)
2361 struct perf_evsel *evsel;
2363 evlist__for_each_entry_reverse(evlist, evsel) {
2364 const char *name = perf_evsel__name(evsel);
2366 if (!strcmp(name, "sched:sched_switch"))
2373 static bool intel_pt_find_switch(struct perf_evlist *evlist)
2375 struct perf_evsel *evsel;
2377 evlist__for_each_entry(evlist, evsel) {
2378 if (evsel->attr.context_switch)
2385 static int intel_pt_perf_config(const char *var, const char *value, void *data)
2387 struct intel_pt *pt = data;
2389 if (!strcmp(var, "intel-pt.mispred-all"))
2390 pt->mispred_all = perf_config_bool(var, value);
2395 static const char * const intel_pt_info_fmts[] = {
2396 [INTEL_PT_PMU_TYPE] = " PMU Type %"PRId64"\n",
2397 [INTEL_PT_TIME_SHIFT] = " Time Shift %"PRIu64"\n",
2398 [INTEL_PT_TIME_MULT] = " Time Muliplier %"PRIu64"\n",
2399 [INTEL_PT_TIME_ZERO] = " Time Zero %"PRIu64"\n",
2400 [INTEL_PT_CAP_USER_TIME_ZERO] = " Cap Time Zero %"PRId64"\n",
2401 [INTEL_PT_TSC_BIT] = " TSC bit %#"PRIx64"\n",
2402 [INTEL_PT_NORETCOMP_BIT] = " NoRETComp bit %#"PRIx64"\n",
2403 [INTEL_PT_HAVE_SCHED_SWITCH] = " Have sched_switch %"PRId64"\n",
2404 [INTEL_PT_SNAPSHOT_MODE] = " Snapshot mode %"PRId64"\n",
2405 [INTEL_PT_PER_CPU_MMAPS] = " Per-cpu maps %"PRId64"\n",
2406 [INTEL_PT_MTC_BIT] = " MTC bit %#"PRIx64"\n",
2407 [INTEL_PT_TSC_CTC_N] = " TSC:CTC numerator %"PRIu64"\n",
2408 [INTEL_PT_TSC_CTC_D] = " TSC:CTC denominator %"PRIu64"\n",
2409 [INTEL_PT_CYC_BIT] = " CYC bit %#"PRIx64"\n",
2410 [INTEL_PT_MAX_NONTURBO_RATIO] = " Max non-turbo ratio %"PRIu64"\n",
2411 [INTEL_PT_FILTER_STR_LEN] = " Filter string len. %"PRIu64"\n",
2414 static void intel_pt_print_info(u64 *arr, int start, int finish)
2421 for (i = start; i <= finish; i++)
2422 fprintf(stdout, intel_pt_info_fmts[i], arr[i]);
2425 static void intel_pt_print_info_str(const char *name, const char *str)
2430 fprintf(stdout, " %-20s%s\n", name, str ? str : "");
2433 static bool intel_pt_has(struct auxtrace_info_event *auxtrace_info, int pos)
2435 return auxtrace_info->header.size >=
2436 sizeof(struct auxtrace_info_event) + (sizeof(u64) * (pos + 1));
2439 int intel_pt_process_auxtrace_info(union perf_event *event,
2440 struct perf_session *session)
2442 struct auxtrace_info_event *auxtrace_info = &event->auxtrace_info;
2443 size_t min_sz = sizeof(u64) * INTEL_PT_PER_CPU_MMAPS;
2444 struct intel_pt *pt;
2449 if (auxtrace_info->header.size < sizeof(struct auxtrace_info_event) +
2453 pt = zalloc(sizeof(struct intel_pt));
2457 addr_filters__init(&pt->filts);
2459 err = perf_config(intel_pt_perf_config, pt);
2463 err = auxtrace_queues__init(&pt->queues);
2467 intel_pt_log_set_name(INTEL_PT_PMU_NAME);
2469 pt->session = session;
2470 pt->machine = &session->machines.host; /* No kvm support */
2471 pt->auxtrace_type = auxtrace_info->type;
2472 pt->pmu_type = auxtrace_info->priv[INTEL_PT_PMU_TYPE];
2473 pt->tc.time_shift = auxtrace_info->priv[INTEL_PT_TIME_SHIFT];
2474 pt->tc.time_mult = auxtrace_info->priv[INTEL_PT_TIME_MULT];
2475 pt->tc.time_zero = auxtrace_info->priv[INTEL_PT_TIME_ZERO];
2476 pt->cap_user_time_zero = auxtrace_info->priv[INTEL_PT_CAP_USER_TIME_ZERO];
2477 pt->tsc_bit = auxtrace_info->priv[INTEL_PT_TSC_BIT];
2478 pt->noretcomp_bit = auxtrace_info->priv[INTEL_PT_NORETCOMP_BIT];
2479 pt->have_sched_switch = auxtrace_info->priv[INTEL_PT_HAVE_SCHED_SWITCH];
2480 pt->snapshot_mode = auxtrace_info->priv[INTEL_PT_SNAPSHOT_MODE];
2481 pt->per_cpu_mmaps = auxtrace_info->priv[INTEL_PT_PER_CPU_MMAPS];
2482 intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_PMU_TYPE,
2483 INTEL_PT_PER_CPU_MMAPS);
2485 if (intel_pt_has(auxtrace_info, INTEL_PT_CYC_BIT)) {
2486 pt->mtc_bit = auxtrace_info->priv[INTEL_PT_MTC_BIT];
2487 pt->mtc_freq_bits = auxtrace_info->priv[INTEL_PT_MTC_FREQ_BITS];
2488 pt->tsc_ctc_ratio_n = auxtrace_info->priv[INTEL_PT_TSC_CTC_N];
2489 pt->tsc_ctc_ratio_d = auxtrace_info->priv[INTEL_PT_TSC_CTC_D];
2490 pt->cyc_bit = auxtrace_info->priv[INTEL_PT_CYC_BIT];
2491 intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_MTC_BIT,
2495 if (intel_pt_has(auxtrace_info, INTEL_PT_MAX_NONTURBO_RATIO)) {
2496 pt->max_non_turbo_ratio =
2497 auxtrace_info->priv[INTEL_PT_MAX_NONTURBO_RATIO];
2498 intel_pt_print_info(&auxtrace_info->priv[0],
2499 INTEL_PT_MAX_NONTURBO_RATIO,
2500 INTEL_PT_MAX_NONTURBO_RATIO);
2503 info = &auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN] + 1;
2504 info_end = (void *)info + auxtrace_info->header.size;
2506 if (intel_pt_has(auxtrace_info, INTEL_PT_FILTER_STR_LEN)) {
2509 len = auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN];
2510 intel_pt_print_info(&auxtrace_info->priv[0],
2511 INTEL_PT_FILTER_STR_LEN,
2512 INTEL_PT_FILTER_STR_LEN);
2514 const char *filter = (const char *)info;
2516 len = roundup(len + 1, 8);
2518 if ((void *)info > info_end) {
2519 pr_err("%s: bad filter string length\n", __func__);
2521 goto err_free_queues;
2523 pt->filter = memdup(filter, len);
2526 goto err_free_queues;
2528 if (session->header.needs_swap)
2529 mem_bswap_64(pt->filter, len);
2530 if (pt->filter[len - 1]) {
2531 pr_err("%s: filter string not null terminated\n", __func__);
2533 goto err_free_queues;
2535 err = addr_filters__parse_bare_filter(&pt->filts,
2538 goto err_free_queues;
2540 intel_pt_print_info_str("Filter string", pt->filter);
2543 pt->timeless_decoding = intel_pt_timeless_decoding(pt);
2544 pt->have_tsc = intel_pt_have_tsc(pt);
2545 pt->sampling_mode = false;
2546 pt->est_tsc = !pt->timeless_decoding;
2548 pt->unknown_thread = thread__new(999999999, 999999999);
2549 if (!pt->unknown_thread) {
2551 goto err_free_queues;
2555 * Since this thread will not be kept in any rbtree not in a
2556 * list, initialize its list node so that at thread__put() the
2557 * current thread lifetime assuption is kept and we don't segfault
2558 * at list_del_init().
2560 INIT_LIST_HEAD(&pt->unknown_thread->node);
2562 err = thread__set_comm(pt->unknown_thread, "unknown", 0);
2564 goto err_delete_thread;
2565 if (thread__init_map_groups(pt->unknown_thread, pt->machine)) {
2567 goto err_delete_thread;
2570 pt->auxtrace.process_event = intel_pt_process_event;
2571 pt->auxtrace.process_auxtrace_event = intel_pt_process_auxtrace_event;
2572 pt->auxtrace.flush_events = intel_pt_flush;
2573 pt->auxtrace.free_events = intel_pt_free_events;
2574 pt->auxtrace.free = intel_pt_free;
2575 session->auxtrace = &pt->auxtrace;
2580 if (pt->have_sched_switch == 1) {
2581 pt->switch_evsel = intel_pt_find_sched_switch(session->evlist);
2582 if (!pt->switch_evsel) {
2583 pr_err("%s: missing sched_switch event\n", __func__);
2585 goto err_delete_thread;
2587 } else if (pt->have_sched_switch == 2 &&
2588 !intel_pt_find_switch(session->evlist)) {
2589 pr_err("%s: missing context_switch attribute flag\n", __func__);
2591 goto err_delete_thread;
2594 if (session->itrace_synth_opts && session->itrace_synth_opts->set) {
2595 pt->synth_opts = *session->itrace_synth_opts;
2597 itrace_synth_opts__set_default(&pt->synth_opts);
2598 if (use_browser != -1) {
2599 pt->synth_opts.branches = false;
2600 pt->synth_opts.callchain = true;
2602 if (session->itrace_synth_opts)
2603 pt->synth_opts.thread_stack =
2604 session->itrace_synth_opts->thread_stack;
2607 if (pt->synth_opts.log)
2608 intel_pt_log_enable();
2610 /* Maximum non-turbo ratio is TSC freq / 100 MHz */
2611 if (pt->tc.time_mult) {
2612 u64 tsc_freq = intel_pt_ns_to_ticks(pt, 1000000000);
2614 if (!pt->max_non_turbo_ratio)
2615 pt->max_non_turbo_ratio =
2616 (tsc_freq + 50000000) / 100000000;
2617 intel_pt_log("TSC frequency %"PRIu64"\n", tsc_freq);
2618 intel_pt_log("Maximum non-turbo ratio %u\n",
2619 pt->max_non_turbo_ratio);
2620 pt->cbr2khz = tsc_freq / pt->max_non_turbo_ratio / 1000;
2623 if (pt->synth_opts.calls)
2624 pt->branches_filter |= PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC |
2625 PERF_IP_FLAG_TRACE_END;
2626 if (pt->synth_opts.returns)
2627 pt->branches_filter |= PERF_IP_FLAG_RETURN |
2628 PERF_IP_FLAG_TRACE_BEGIN;
2630 if (pt->synth_opts.callchain && !symbol_conf.use_callchain) {
2631 symbol_conf.use_callchain = true;
2632 if (callchain_register_param(&callchain_param) < 0) {
2633 symbol_conf.use_callchain = false;
2634 pt->synth_opts.callchain = false;
2638 err = intel_pt_synth_events(pt, session);
2640 goto err_delete_thread;
2642 err = auxtrace_queues__process_index(&pt->queues, session);
2644 goto err_delete_thread;
2646 if (pt->queues.populated)
2647 pt->data_queued = true;
2649 if (pt->timeless_decoding)
2650 pr_debug2("Intel PT decoding without timestamps\n");
2655 thread__zput(pt->unknown_thread);
2657 intel_pt_log_disable();
2658 auxtrace_queues__free(&pt->queues);
2659 session->auxtrace = NULL;
2661 addr_filters__exit(&pt->filts);