From c8f9eb0d6ebaa768c9f6eb2ee21b01d74230934d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 16 Jul 2020 13:11:29 +0800 Subject: [PATCH] arm64: perf: Add cap_user_time_short This completes the ARM64 cap_user_time support. Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Leo Yan Link: https://lore.kernel.org/r/20200716051130.4359-7-leo.yan@linaro.org Signed-off-by: Will Deacon --- arch/arm64/kernel/perf_event.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 674edc7..fdb6029 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -1177,6 +1177,7 @@ void arch_perf_update_userpage(struct perf_event *event, userpg->cap_user_time = 0; userpg->cap_user_time_zero = 0; + userpg->cap_user_time_short = 0; do { rd = sched_clock_read_begin(&seq); @@ -1187,13 +1188,13 @@ void arch_perf_update_userpage(struct perf_event *event, userpg->time_mult = rd->mult; userpg->time_shift = rd->shift; userpg->time_zero = rd->epoch_ns; + userpg->time_cycles = rd->epoch_cyc; + userpg->time_mask = rd->sched_clock_mask; /* - * This isn't strictly correct, the ARM64 counter can be - * 'short' and then we get funnies when it wraps. The correct - * thing would be to extend the perf ABI with a cycle and mask - * value, but because wrapping on ARM64 is very rare in - * practise this 'works'. + * Subtract the cycle base, such that software that + * doesn't know about cap_user_time_short still 'works' + * assuming no wraps. */ ns = mul_u64_u32_shr(rd->epoch_cyc, rd->mult, rd->shift); userpg->time_zero -= ns; @@ -1219,4 +1220,5 @@ void arch_perf_update_userpage(struct perf_event *event, */ userpg->cap_user_time = 1; userpg->cap_user_time_zero = 1; + userpg->cap_user_time_short = 1; } -- 2.7.4