1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
6 * Atish Patra <atish.patra@wdc.com>
9 #include <linux/errno.h>
10 #include <linux/err.h>
11 #include <linux/kvm_host.h>
12 #include <linux/uaccess.h>
13 #include <clocksource/timer-riscv.h>
15 #include <asm/delay.h>
16 #include <asm/kvm_vcpu_timer.h>
18 static u64 kvm_riscv_current_cycles(struct kvm_guest_timer *gt)
20 return get_cycles64() + gt->time_delta;
23 static u64 kvm_riscv_delta_cycles2ns(u64 cycles,
24 struct kvm_guest_timer *gt,
25 struct kvm_vcpu_timer *t)
28 u64 cycles_now, cycles_delta, delta_ns;
30 local_irq_save(flags);
31 cycles_now = kvm_riscv_current_cycles(gt);
32 if (cycles_now < cycles)
33 cycles_delta = cycles - cycles_now;
36 delta_ns = (cycles_delta * gt->nsec_mult) >> gt->nsec_shift;
37 local_irq_restore(flags);
42 static enum hrtimer_restart kvm_riscv_vcpu_hrtimer_expired(struct hrtimer *h)
45 struct kvm_vcpu_timer *t = container_of(h, struct kvm_vcpu_timer, hrt);
46 struct kvm_vcpu *vcpu = container_of(t, struct kvm_vcpu, arch.timer);
47 struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
49 if (kvm_riscv_current_cycles(gt) < t->next_cycles) {
50 delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t);
51 hrtimer_forward_now(&t->hrt, ktime_set(0, delta_ns));
52 return HRTIMER_RESTART;
56 kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_TIMER);
58 return HRTIMER_NORESTART;
61 static int kvm_riscv_vcpu_timer_cancel(struct kvm_vcpu_timer *t)
63 if (!t->init_done || !t->next_set)
66 hrtimer_cancel(&t->hrt);
72 static int kvm_riscv_vcpu_update_vstimecmp(struct kvm_vcpu *vcpu, u64 ncycles)
74 #if defined(CONFIG_32BIT)
75 csr_write(CSR_VSTIMECMP, ncycles & 0xFFFFFFFF);
76 csr_write(CSR_VSTIMECMPH, ncycles >> 32);
78 csr_write(CSR_VSTIMECMP, ncycles);
83 static int kvm_riscv_vcpu_update_hrtimer(struct kvm_vcpu *vcpu, u64 ncycles)
85 struct kvm_vcpu_timer *t = &vcpu->arch.timer;
86 struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
92 kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_TIMER);
94 delta_ns = kvm_riscv_delta_cycles2ns(ncycles, gt, t);
95 t->next_cycles = ncycles;
96 hrtimer_start(&t->hrt, ktime_set(0, delta_ns), HRTIMER_MODE_REL);
102 int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles)
104 struct kvm_vcpu_timer *t = &vcpu->arch.timer;
106 return t->timer_next_event(vcpu, ncycles);
109 static enum hrtimer_restart kvm_riscv_vcpu_vstimer_expired(struct hrtimer *h)
112 struct kvm_vcpu_timer *t = container_of(h, struct kvm_vcpu_timer, hrt);
113 struct kvm_vcpu *vcpu = container_of(t, struct kvm_vcpu, arch.timer);
114 struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
116 if (kvm_riscv_current_cycles(gt) < t->next_cycles) {
117 delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t);
118 hrtimer_forward_now(&t->hrt, ktime_set(0, delta_ns));
119 return HRTIMER_RESTART;
125 return HRTIMER_NORESTART;
128 bool kvm_riscv_vcpu_timer_pending(struct kvm_vcpu *vcpu)
130 struct kvm_vcpu_timer *t = &vcpu->arch.timer;
131 struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
133 if (!kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t) ||
134 kvm_riscv_vcpu_has_interrupts(vcpu, 1UL << IRQ_VS_TIMER))
140 static void kvm_riscv_vcpu_timer_blocking(struct kvm_vcpu *vcpu)
142 struct kvm_vcpu_timer *t = &vcpu->arch.timer;
143 struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
149 delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t);
150 hrtimer_start(&t->hrt, ktime_set(0, delta_ns), HRTIMER_MODE_REL);
154 static void kvm_riscv_vcpu_timer_unblocking(struct kvm_vcpu *vcpu)
156 kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer);
159 int kvm_riscv_vcpu_get_reg_timer(struct kvm_vcpu *vcpu,
160 const struct kvm_one_reg *reg)
162 struct kvm_vcpu_timer *t = &vcpu->arch.timer;
163 struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
164 u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
165 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
167 KVM_REG_RISCV_TIMER);
170 if (KVM_REG_SIZE(reg->id) != sizeof(u64))
172 if (reg_num >= sizeof(struct kvm_riscv_timer) / sizeof(u64))
176 case KVM_REG_RISCV_TIMER_REG(frequency):
177 reg_val = riscv_timebase;
179 case KVM_REG_RISCV_TIMER_REG(time):
180 reg_val = kvm_riscv_current_cycles(gt);
182 case KVM_REG_RISCV_TIMER_REG(compare):
183 reg_val = t->next_cycles;
185 case KVM_REG_RISCV_TIMER_REG(state):
186 reg_val = (t->next_set) ? KVM_RISCV_TIMER_STATE_ON :
187 KVM_RISCV_TIMER_STATE_OFF;
193 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
199 int kvm_riscv_vcpu_set_reg_timer(struct kvm_vcpu *vcpu,
200 const struct kvm_one_reg *reg)
202 struct kvm_vcpu_timer *t = &vcpu->arch.timer;
203 struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
204 u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
205 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
207 KVM_REG_RISCV_TIMER);
211 if (KVM_REG_SIZE(reg->id) != sizeof(u64))
213 if (reg_num >= sizeof(struct kvm_riscv_timer) / sizeof(u64))
216 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
220 case KVM_REG_RISCV_TIMER_REG(frequency):
221 if (reg_val != riscv_timebase)
224 case KVM_REG_RISCV_TIMER_REG(time):
225 gt->time_delta = reg_val - get_cycles64();
227 case KVM_REG_RISCV_TIMER_REG(compare):
228 t->next_cycles = reg_val;
230 case KVM_REG_RISCV_TIMER_REG(state):
231 if (reg_val == KVM_RISCV_TIMER_STATE_ON)
232 ret = kvm_riscv_vcpu_timer_next_event(vcpu, reg_val);
234 ret = kvm_riscv_vcpu_timer_cancel(t);
244 int kvm_riscv_vcpu_timer_init(struct kvm_vcpu *vcpu)
246 struct kvm_vcpu_timer *t = &vcpu->arch.timer;
251 hrtimer_init(&t->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
255 /* Enable sstc for every vcpu if available in hardware */
256 if (riscv_isa_extension_available(NULL, SSTC)) {
257 t->sstc_enabled = true;
258 t->hrt.function = kvm_riscv_vcpu_vstimer_expired;
259 t->timer_next_event = kvm_riscv_vcpu_update_vstimecmp;
261 t->sstc_enabled = false;
262 t->hrt.function = kvm_riscv_vcpu_hrtimer_expired;
263 t->timer_next_event = kvm_riscv_vcpu_update_hrtimer;
269 int kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu *vcpu)
273 ret = kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer);
274 vcpu->arch.timer.init_done = false;
279 int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu)
281 struct kvm_vcpu_timer *t = &vcpu->arch.timer;
283 t->next_cycles = -1ULL;
284 return kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer);
287 static void kvm_riscv_vcpu_update_timedelta(struct kvm_vcpu *vcpu)
289 struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
291 #if defined(CONFIG_32BIT)
292 csr_write(CSR_HTIMEDELTA, (u32)(gt->time_delta));
293 csr_write(CSR_HTIMEDELTAH, (u32)(gt->time_delta >> 32));
295 csr_write(CSR_HTIMEDELTA, gt->time_delta);
299 void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu)
301 struct kvm_vcpu_timer *t = &vcpu->arch.timer;
303 kvm_riscv_vcpu_update_timedelta(vcpu);
305 if (!t->sstc_enabled)
308 #if defined(CONFIG_32BIT)
309 csr_write(CSR_VSTIMECMP, (u32)t->next_cycles);
310 csr_write(CSR_VSTIMECMPH, (u32)(t->next_cycles >> 32));
312 csr_write(CSR_VSTIMECMP, t->next_cycles);
315 /* timer should be enabled for the remaining operations */
316 if (unlikely(!t->init_done))
319 kvm_riscv_vcpu_timer_unblocking(vcpu);
322 void kvm_riscv_vcpu_timer_sync(struct kvm_vcpu *vcpu)
324 struct kvm_vcpu_timer *t = &vcpu->arch.timer;
326 if (!t->sstc_enabled)
329 #if defined(CONFIG_32BIT)
330 t->next_cycles = csr_read(CSR_VSTIMECMP);
331 t->next_cycles |= (u64)csr_read(CSR_VSTIMECMPH) << 32;
333 t->next_cycles = csr_read(CSR_VSTIMECMP);
337 void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu)
339 struct kvm_vcpu_timer *t = &vcpu->arch.timer;
341 if (!t->sstc_enabled)
345 * The vstimecmp CSRs are saved by kvm_riscv_vcpu_timer_sync()
346 * upon every VM exit so no need to save here.
349 /* timer should be enabled for the remaining operations */
350 if (unlikely(!t->init_done))
353 if (kvm_vcpu_is_blocking(vcpu))
354 kvm_riscv_vcpu_timer_blocking(vcpu);
357 void kvm_riscv_guest_timer_init(struct kvm *kvm)
359 struct kvm_guest_timer *gt = &kvm->arch.timer;
361 riscv_cs_get_mult_shift(>->nsec_mult, >->nsec_shift);
362 gt->time_delta = -get_cycles64();