1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
6 * Anup Patel <anup.patel@wdc.com>
9 #include <linux/bitops.h>
10 #include <linux/errno.h>
11 #include <linux/err.h>
12 #include <linux/kdebug.h>
13 #include <linux/module.h>
14 #include <linux/percpu.h>
15 #include <linux/uaccess.h>
16 #include <linux/vmalloc.h>
17 #include <linux/sched/signal.h>
19 #include <linux/kvm_host.h>
21 #include <asm/hwcap.h>
23 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
24 KVM_GENERIC_VCPU_STATS(),
25 STATS_DESC_COUNTER(VCPU, ecall_exit_stat),
26 STATS_DESC_COUNTER(VCPU, wfi_exit_stat),
27 STATS_DESC_COUNTER(VCPU, mmio_exit_user),
28 STATS_DESC_COUNTER(VCPU, mmio_exit_kernel),
29 STATS_DESC_COUNTER(VCPU, exits)
32 const struct kvm_stats_header kvm_vcpu_stats_header = {
33 .name_size = KVM_STATS_NAME_SIZE,
34 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
35 .id_offset = sizeof(struct kvm_stats_header),
36 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
37 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
38 sizeof(kvm_vcpu_stats_desc),
41 #define KVM_RISCV_ISA_ALLOWED (riscv_isa_extension_mask(a) | \
42 riscv_isa_extension_mask(c) | \
43 riscv_isa_extension_mask(d) | \
44 riscv_isa_extension_mask(f) | \
45 riscv_isa_extension_mask(i) | \
46 riscv_isa_extension_mask(m) | \
47 riscv_isa_extension_mask(s) | \
48 riscv_isa_extension_mask(u))
50 static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
52 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
53 struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
54 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
55 struct kvm_cpu_context *reset_cntx = &vcpu->arch.guest_reset_context;
59 * The preemption should be disabled here because it races with
60 * kvm_sched_out/kvm_sched_in(called from preempt notifiers) which
61 * also calls vcpu_load/put.
64 loaded = (vcpu->cpu != -1);
66 kvm_arch_vcpu_put(vcpu);
68 memcpy(csr, reset_csr, sizeof(*csr));
70 memcpy(cntx, reset_cntx, sizeof(*cntx));
72 kvm_riscv_vcpu_fp_reset(vcpu);
74 kvm_riscv_vcpu_timer_reset(vcpu);
76 WRITE_ONCE(vcpu->arch.irqs_pending, 0);
77 WRITE_ONCE(vcpu->arch.irqs_pending_mask, 0);
79 /* Reset the guest CSRs for hotplug usecase */
81 kvm_arch_vcpu_load(vcpu, smp_processor_id());
85 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
90 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
92 struct kvm_cpu_context *cntx;
94 /* Mark this VCPU never ran */
95 vcpu->arch.ran_atleast_once = false;
96 vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
98 /* Setup ISA features available to VCPU */
99 vcpu->arch.isa = riscv_isa_extension_base(NULL) & KVM_RISCV_ISA_ALLOWED;
101 /* Setup reset state of shadow SSTATUS and HSTATUS CSRs */
102 cntx = &vcpu->arch.guest_reset_context;
103 cntx->sstatus = SR_SPP | SR_SPIE;
105 cntx->hstatus |= HSTATUS_VTW;
106 cntx->hstatus |= HSTATUS_SPVP;
107 cntx->hstatus |= HSTATUS_SPV;
109 /* Setup VCPU timer */
110 kvm_riscv_vcpu_timer_init(vcpu);
113 kvm_riscv_reset_vcpu(vcpu);
118 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
121 * vcpu with id 0 is the designated boot cpu.
122 * Keep all vcpus with non-zero id in power-off state so that
123 * they can be brought up using SBI HSM extension.
125 if (vcpu->vcpu_idx != 0)
126 kvm_riscv_vcpu_power_off(vcpu);
129 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
131 /* Cleanup VCPU timer */
132 kvm_riscv_vcpu_timer_deinit(vcpu);
134 /* Free unused pages pre-allocated for Stage2 page table mappings */
135 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
138 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
140 return kvm_riscv_vcpu_has_interrupts(vcpu, 1UL << IRQ_VS_TIMER);
143 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
147 void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
151 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
153 return (kvm_riscv_vcpu_has_interrupts(vcpu, -1UL) &&
154 !vcpu->arch.power_off && !vcpu->arch.pause);
157 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
159 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
162 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
164 return (vcpu->arch.guest_context.sstatus & SR_SPP) ? true : false;
167 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
169 return VM_FAULT_SIGBUS;
172 static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
173 const struct kvm_one_reg *reg)
175 unsigned long __user *uaddr =
176 (unsigned long __user *)(unsigned long)reg->addr;
177 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
179 KVM_REG_RISCV_CONFIG);
180 unsigned long reg_val;
182 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
186 case KVM_REG_RISCV_CONFIG_REG(isa):
187 reg_val = vcpu->arch.isa;
193 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
199 static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
200 const struct kvm_one_reg *reg)
202 unsigned long __user *uaddr =
203 (unsigned long __user *)(unsigned long)reg->addr;
204 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
206 KVM_REG_RISCV_CONFIG);
207 unsigned long reg_val;
209 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
212 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
216 case KVM_REG_RISCV_CONFIG_REG(isa):
217 if (!vcpu->arch.ran_atleast_once) {
218 vcpu->arch.isa = reg_val;
219 vcpu->arch.isa &= riscv_isa_extension_base(NULL);
220 vcpu->arch.isa &= KVM_RISCV_ISA_ALLOWED;
221 kvm_riscv_vcpu_fp_reset(vcpu);
233 static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu,
234 const struct kvm_one_reg *reg)
236 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
237 unsigned long __user *uaddr =
238 (unsigned long __user *)(unsigned long)reg->addr;
239 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
242 unsigned long reg_val;
244 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
246 if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
249 if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
250 reg_val = cntx->sepc;
251 else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
252 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
253 reg_val = ((unsigned long *)cntx)[reg_num];
254 else if (reg_num == KVM_REG_RISCV_CORE_REG(mode))
255 reg_val = (cntx->sstatus & SR_SPP) ?
256 KVM_RISCV_MODE_S : KVM_RISCV_MODE_U;
260 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
266 static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu,
267 const struct kvm_one_reg *reg)
269 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
270 unsigned long __user *uaddr =
271 (unsigned long __user *)(unsigned long)reg->addr;
272 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
275 unsigned long reg_val;
277 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
279 if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
282 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
285 if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
286 cntx->sepc = reg_val;
287 else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
288 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
289 ((unsigned long *)cntx)[reg_num] = reg_val;
290 else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) {
291 if (reg_val == KVM_RISCV_MODE_S)
292 cntx->sstatus |= SR_SPP;
294 cntx->sstatus &= ~SR_SPP;
301 static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
302 const struct kvm_one_reg *reg)
304 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
305 unsigned long __user *uaddr =
306 (unsigned long __user *)(unsigned long)reg->addr;
307 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
310 unsigned long reg_val;
312 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
314 if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
317 if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
318 kvm_riscv_vcpu_flush_interrupts(vcpu);
319 reg_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK;
321 reg_val = ((unsigned long *)csr)[reg_num];
323 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
329 static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
330 const struct kvm_one_reg *reg)
332 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
333 unsigned long __user *uaddr =
334 (unsigned long __user *)(unsigned long)reg->addr;
335 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
338 unsigned long reg_val;
340 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
342 if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
345 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
348 if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
349 reg_val &= VSIP_VALID_MASK;
350 reg_val <<= VSIP_TO_HVIP_SHIFT;
353 ((unsigned long *)csr)[reg_num] = reg_val;
355 if (reg_num == KVM_REG_RISCV_CSR_REG(sip))
356 WRITE_ONCE(vcpu->arch.irqs_pending_mask, 0);
361 static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
362 const struct kvm_one_reg *reg)
364 if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CONFIG)
365 return kvm_riscv_vcpu_set_reg_config(vcpu, reg);
366 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CORE)
367 return kvm_riscv_vcpu_set_reg_core(vcpu, reg);
368 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CSR)
369 return kvm_riscv_vcpu_set_reg_csr(vcpu, reg);
370 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_TIMER)
371 return kvm_riscv_vcpu_set_reg_timer(vcpu, reg);
372 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_F)
373 return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
375 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_D)
376 return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
382 static int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
383 const struct kvm_one_reg *reg)
385 if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CONFIG)
386 return kvm_riscv_vcpu_get_reg_config(vcpu, reg);
387 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CORE)
388 return kvm_riscv_vcpu_get_reg_core(vcpu, reg);
389 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CSR)
390 return kvm_riscv_vcpu_get_reg_csr(vcpu, reg);
391 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_TIMER)
392 return kvm_riscv_vcpu_get_reg_timer(vcpu, reg);
393 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_F)
394 return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
396 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_D)
397 return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
403 long kvm_arch_vcpu_async_ioctl(struct file *filp,
404 unsigned int ioctl, unsigned long arg)
406 struct kvm_vcpu *vcpu = filp->private_data;
407 void __user *argp = (void __user *)arg;
409 if (ioctl == KVM_INTERRUPT) {
410 struct kvm_interrupt irq;
412 if (copy_from_user(&irq, argp, sizeof(irq)))
415 if (irq.irq == KVM_INTERRUPT_SET)
416 return kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_EXT);
418 return kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_EXT);
424 long kvm_arch_vcpu_ioctl(struct file *filp,
425 unsigned int ioctl, unsigned long arg)
427 struct kvm_vcpu *vcpu = filp->private_data;
428 void __user *argp = (void __user *)arg;
432 case KVM_SET_ONE_REG:
433 case KVM_GET_ONE_REG: {
434 struct kvm_one_reg reg;
437 if (copy_from_user(®, argp, sizeof(reg)))
440 if (ioctl == KVM_SET_ONE_REG)
441 r = kvm_riscv_vcpu_set_reg(vcpu, ®);
443 r = kvm_riscv_vcpu_get_reg(vcpu, ®);
453 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
454 struct kvm_sregs *sregs)
459 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
460 struct kvm_sregs *sregs)
465 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
470 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
475 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
476 struct kvm_translation *tr)
481 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
486 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
491 void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu)
493 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
494 unsigned long mask, val;
496 if (READ_ONCE(vcpu->arch.irqs_pending_mask)) {
497 mask = xchg_acquire(&vcpu->arch.irqs_pending_mask, 0);
498 val = READ_ONCE(vcpu->arch.irqs_pending) & mask;
505 void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu)
508 struct kvm_vcpu_arch *v = &vcpu->arch;
509 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
511 /* Read current HVIP and VSIE CSRs */
512 csr->vsie = csr_read(CSR_VSIE);
514 /* Sync-up HVIP.VSSIP bit changes does by Guest */
515 hvip = csr_read(CSR_HVIP);
516 if ((csr->hvip ^ hvip) & (1UL << IRQ_VS_SOFT)) {
517 if (hvip & (1UL << IRQ_VS_SOFT)) {
518 if (!test_and_set_bit(IRQ_VS_SOFT,
519 &v->irqs_pending_mask))
520 set_bit(IRQ_VS_SOFT, &v->irqs_pending);
522 if (!test_and_set_bit(IRQ_VS_SOFT,
523 &v->irqs_pending_mask))
524 clear_bit(IRQ_VS_SOFT, &v->irqs_pending);
529 int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
531 if (irq != IRQ_VS_SOFT &&
532 irq != IRQ_VS_TIMER &&
536 set_bit(irq, &vcpu->arch.irqs_pending);
537 smp_mb__before_atomic();
538 set_bit(irq, &vcpu->arch.irqs_pending_mask);
545 int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
547 if (irq != IRQ_VS_SOFT &&
548 irq != IRQ_VS_TIMER &&
552 clear_bit(irq, &vcpu->arch.irqs_pending);
553 smp_mb__before_atomic();
554 set_bit(irq, &vcpu->arch.irqs_pending_mask);
559 bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, unsigned long mask)
561 unsigned long ie = ((vcpu->arch.guest_csr.vsie & VSIP_VALID_MASK)
562 << VSIP_TO_HVIP_SHIFT) & mask;
564 return (READ_ONCE(vcpu->arch.irqs_pending) & ie) ? true : false;
567 void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu)
569 vcpu->arch.power_off = true;
570 kvm_make_request(KVM_REQ_SLEEP, vcpu);
574 void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu)
576 vcpu->arch.power_off = false;
577 kvm_vcpu_wake_up(vcpu);
580 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
581 struct kvm_mp_state *mp_state)
583 if (vcpu->arch.power_off)
584 mp_state->mp_state = KVM_MP_STATE_STOPPED;
586 mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
591 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
592 struct kvm_mp_state *mp_state)
596 switch (mp_state->mp_state) {
597 case KVM_MP_STATE_RUNNABLE:
598 vcpu->arch.power_off = false;
600 case KVM_MP_STATE_STOPPED:
601 kvm_riscv_vcpu_power_off(vcpu);
610 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
611 struct kvm_guest_debug *dbg)
613 /* TODO; To be implemented later. */
617 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
619 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
621 csr_write(CSR_VSSTATUS, csr->vsstatus);
622 csr_write(CSR_VSIE, csr->vsie);
623 csr_write(CSR_VSTVEC, csr->vstvec);
624 csr_write(CSR_VSSCRATCH, csr->vsscratch);
625 csr_write(CSR_VSEPC, csr->vsepc);
626 csr_write(CSR_VSCAUSE, csr->vscause);
627 csr_write(CSR_VSTVAL, csr->vstval);
628 csr_write(CSR_HVIP, csr->hvip);
629 csr_write(CSR_VSATP, csr->vsatp);
631 kvm_riscv_stage2_update_hgatp(vcpu);
633 kvm_riscv_vcpu_timer_restore(vcpu);
635 kvm_riscv_vcpu_host_fp_save(&vcpu->arch.host_context);
636 kvm_riscv_vcpu_guest_fp_restore(&vcpu->arch.guest_context,
642 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
644 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
648 kvm_riscv_vcpu_guest_fp_save(&vcpu->arch.guest_context,
650 kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context);
652 csr_write(CSR_HGATP, 0);
654 csr->vsstatus = csr_read(CSR_VSSTATUS);
655 csr->vsie = csr_read(CSR_VSIE);
656 csr->vstvec = csr_read(CSR_VSTVEC);
657 csr->vsscratch = csr_read(CSR_VSSCRATCH);
658 csr->vsepc = csr_read(CSR_VSEPC);
659 csr->vscause = csr_read(CSR_VSCAUSE);
660 csr->vstval = csr_read(CSR_VSTVAL);
661 csr->hvip = csr_read(CSR_HVIP);
662 csr->vsatp = csr_read(CSR_VSATP);
665 static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu)
667 struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
669 if (kvm_request_pending(vcpu)) {
670 if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) {
671 rcuwait_wait_event(wait,
672 (!vcpu->arch.power_off) && (!vcpu->arch.pause),
675 if (vcpu->arch.power_off || vcpu->arch.pause) {
677 * Awaken to handle a signal, request to
680 kvm_make_request(KVM_REQ_SLEEP, vcpu);
684 if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
685 kvm_riscv_reset_vcpu(vcpu);
687 if (kvm_check_request(KVM_REQ_UPDATE_HGATP, vcpu))
688 kvm_riscv_stage2_update_hgatp(vcpu);
690 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
691 __kvm_riscv_hfence_gvma_all();
695 static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu)
697 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
699 csr_write(CSR_HVIP, csr->hvip);
703 * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while
704 * the vCPU is running.
706 * This must be noinstr as instrumentation may make use of RCU, and this is not
707 * safe during the EQS.
709 static void noinstr kvm_riscv_vcpu_enter_exit(struct kvm_vcpu *vcpu)
711 guest_state_enter_irqoff();
712 __kvm_riscv_switch_to(&vcpu->arch);
713 guest_state_exit_irqoff();
716 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
719 struct kvm_cpu_trap trap;
720 struct kvm_run *run = vcpu->run;
722 /* Mark this VCPU ran at least once */
723 vcpu->arch.ran_atleast_once = true;
725 vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
727 /* Process MMIO value returned from user-space */
728 if (run->exit_reason == KVM_EXIT_MMIO) {
729 ret = kvm_riscv_vcpu_mmio_return(vcpu, vcpu->run);
731 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
736 /* Process SBI value returned from user-space */
737 if (run->exit_reason == KVM_EXIT_RISCV_SBI) {
738 ret = kvm_riscv_vcpu_sbi_return(vcpu, vcpu->run);
740 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
745 if (run->immediate_exit) {
746 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
752 kvm_sigset_activate(vcpu);
755 run->exit_reason = KVM_EXIT_UNKNOWN;
757 /* Check conditions before entering the guest */
760 kvm_riscv_stage2_vmid_update(vcpu);
762 kvm_riscv_check_vcpu_requests(vcpu);
769 * Exit if we have a signal pending so that we can deliver
770 * the signal to user space.
772 if (signal_pending(current)) {
774 run->exit_reason = KVM_EXIT_INTR;
778 * Ensure we set mode to IN_GUEST_MODE after we disable
779 * interrupts and before the final VCPU requests check.
780 * See the comment in kvm_vcpu_exiting_guest_mode() and
781 * Documentation/virt/kvm/vcpu-requests.rst
783 vcpu->mode = IN_GUEST_MODE;
785 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
786 smp_mb__after_srcu_read_unlock();
789 * We might have got VCPU interrupts updated asynchronously
790 * so update it in HW.
792 kvm_riscv_vcpu_flush_interrupts(vcpu);
794 /* Update HVIP CSR for current CPU */
795 kvm_riscv_update_hvip(vcpu);
798 kvm_riscv_stage2_vmid_ver_changed(&vcpu->kvm->arch.vmid) ||
799 kvm_request_pending(vcpu)) {
800 vcpu->mode = OUTSIDE_GUEST_MODE;
803 vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
807 guest_timing_enter_irqoff();
809 kvm_riscv_vcpu_enter_exit(vcpu);
811 vcpu->mode = OUTSIDE_GUEST_MODE;
815 * Save SCAUSE, STVAL, HTVAL, and HTINST because we might
816 * get an interrupt between __kvm_riscv_switch_to() and
817 * local_irq_enable() which can potentially change CSRs.
819 trap.sepc = vcpu->arch.guest_context.sepc;
820 trap.scause = csr_read(CSR_SCAUSE);
821 trap.stval = csr_read(CSR_STVAL);
822 trap.htval = csr_read(CSR_HTVAL);
823 trap.htinst = csr_read(CSR_HTINST);
825 /* Syncup interrupts state with HW */
826 kvm_riscv_vcpu_sync_interrupts(vcpu);
829 * We must ensure that any pending interrupts are taken before
830 * we exit guest timing so that timer ticks are accounted as
831 * guest time. Transiently unmask interrupts so that any
832 * pending interrupts are taken.
834 * There's no barrier which ensures that pending interrupts are
835 * recognised, so we just hope that the CPU takes any pending
836 * interrupts between the enable and disable.
841 guest_timing_exit_irqoff();
847 vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
849 ret = kvm_riscv_vcpu_exit(vcpu, run, &trap);
852 kvm_sigset_deactivate(vcpu);
856 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);