uint64_t ctc_delta;
uint64_t cycle_cnt;
uint64_t cyc_ref_timestamp;
+ uint64_t first_timestamp;
uint32_t last_mtc;
uint32_t tsc_ctc_ratio_n;
uint32_t tsc_ctc_ratio_d;
decoder->branch_enable = params->branch_enable;
decoder->hop = params->quick >= 1;
decoder->leap = params->quick >= 2;
+ decoder->first_timestamp = params->first_timestamp;
decoder->flags = params->flags;
return decoder;
}
+void intel_pt_set_first_timestamp(struct intel_pt_decoder *decoder,
+ uint64_t first_timestamp)
+{
+ decoder->first_timestamp = first_timestamp;
+}
+
static void intel_pt_pop_blk(struct intel_pt_stack *stack)
{
struct intel_pt_blk *blk = stack->blk;
void *data;
bool return_compression;
bool branch_enable;
+ uint64_t first_timestamp;
uint64_t ctl;
uint64_t period;
enum intel_pt_period_type period_type;
int intel_pt__strerror(int code, char *buf, size_t buflen);
+void intel_pt_set_first_timestamp(struct intel_pt_decoder *decoder,
+ uint64_t first_timestamp);
+
#endif
u64 kernel_start;
u64 switch_ip;
u64 ptss_ip;
+ u64 first_timestamp;
struct perf_tsc_conversion tc;
bool cap_user_time_zero;
params.tsc_ctc_ratio_n = pt->tsc_ctc_ratio_n;
params.tsc_ctc_ratio_d = pt->tsc_ctc_ratio_d;
params.quick = pt->synth_opts.quick;
+ params.first_timestamp = pt->first_timestamp;
if (pt->filts.cnt > 0)
params.pgd_ip = intel_pt_pgd_ip;
free(ptq);
}
+static void intel_pt_first_timestamp(struct intel_pt *pt, u64 timestamp)
+{
+ unsigned int i;
+
+ pt->first_timestamp = timestamp;
+
+ for (i = 0; i < pt->queues.nr_queues; i++) {
+ struct auxtrace_queue *queue = &pt->queues.queue_array[i];
+ struct intel_pt_queue *ptq = queue->priv;
+
+ if (ptq && ptq->decoder)
+ intel_pt_set_first_timestamp(ptq->decoder, timestamp);
+ }
+}
+
static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt,
struct auxtrace_queue *queue)
{
sample->time);
}
} else if (timestamp) {
+ if (!pt->first_timestamp)
+ intel_pt_first_timestamp(pt, timestamp);
err = intel_pt_process_queues(pt, timestamp);
}
if (err)