1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
6 * Anup Patel <anup.patel@wdc.com>
9 #include <linux/bitops.h>
10 #include <linux/entry-kvm.h>
11 #include <linux/errno.h>
12 #include <linux/err.h>
13 #include <linux/kdebug.h>
14 #include <linux/module.h>
15 #include <linux/percpu.h>
16 #include <linux/uaccess.h>
17 #include <linux/vmalloc.h>
18 #include <linux/sched/signal.h>
20 #include <linux/kvm_host.h>
22 #include <asm/cacheflush.h>
23 #include <asm/hwcap.h>
26 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
27 KVM_GENERIC_VCPU_STATS(),
28 STATS_DESC_COUNTER(VCPU, ecall_exit_stat),
29 STATS_DESC_COUNTER(VCPU, wfi_exit_stat),
30 STATS_DESC_COUNTER(VCPU, mmio_exit_user),
31 STATS_DESC_COUNTER(VCPU, mmio_exit_kernel),
32 STATS_DESC_COUNTER(VCPU, csr_exit_user),
33 STATS_DESC_COUNTER(VCPU, csr_exit_kernel),
34 STATS_DESC_COUNTER(VCPU, signal_exits),
35 STATS_DESC_COUNTER(VCPU, exits)
38 const struct kvm_stats_header kvm_vcpu_stats_header = {
39 .name_size = KVM_STATS_NAME_SIZE,
40 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
41 .id_offset = sizeof(struct kvm_stats_header),
42 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
43 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
44 sizeof(kvm_vcpu_stats_desc),
47 #define KVM_RISCV_BASE_ISA_MASK GENMASK(25, 0)
49 #define KVM_ISA_EXT_ARR(ext) [KVM_RISCV_ISA_EXT_##ext] = RISCV_ISA_EXT_##ext
51 /* Mapping between KVM ISA Extension ID & Host ISA extension ID */
52 static const unsigned long kvm_isa_ext_arr[] = {
53 [KVM_RISCV_ISA_EXT_A] = RISCV_ISA_EXT_a,
54 [KVM_RISCV_ISA_EXT_C] = RISCV_ISA_EXT_c,
55 [KVM_RISCV_ISA_EXT_D] = RISCV_ISA_EXT_d,
56 [KVM_RISCV_ISA_EXT_F] = RISCV_ISA_EXT_f,
57 [KVM_RISCV_ISA_EXT_H] = RISCV_ISA_EXT_h,
58 [KVM_RISCV_ISA_EXT_I] = RISCV_ISA_EXT_i,
59 [KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m,
60 [KVM_RISCV_ISA_EXT_V] = RISCV_ISA_EXT_v,
62 KVM_ISA_EXT_ARR(SSAIA),
63 KVM_ISA_EXT_ARR(SSTC),
64 KVM_ISA_EXT_ARR(SVINVAL),
65 KVM_ISA_EXT_ARR(SVPBMT),
67 KVM_ISA_EXT_ARR(ZIHINTPAUSE),
68 KVM_ISA_EXT_ARR(ZICBOM),
69 KVM_ISA_EXT_ARR(ZICBOZ),
72 static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)
76 for (i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
77 if (kvm_isa_ext_arr[i] == base_ext)
81 return KVM_RISCV_ISA_EXT_MAX;
84 static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)
87 case KVM_RISCV_ISA_EXT_H:
96 static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
99 case KVM_RISCV_ISA_EXT_A:
100 case KVM_RISCV_ISA_EXT_C:
101 case KVM_RISCV_ISA_EXT_I:
102 case KVM_RISCV_ISA_EXT_M:
103 case KVM_RISCV_ISA_EXT_SSAIA:
104 case KVM_RISCV_ISA_EXT_SSTC:
105 case KVM_RISCV_ISA_EXT_SVINVAL:
106 case KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
107 case KVM_RISCV_ISA_EXT_ZBB:
116 static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
118 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
119 struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
120 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
121 struct kvm_cpu_context *reset_cntx = &vcpu->arch.guest_reset_context;
125 * The preemption should be disabled here because it races with
126 * kvm_sched_out/kvm_sched_in(called from preempt notifiers) which
127 * also calls vcpu_load/put.
130 loaded = (vcpu->cpu != -1);
132 kvm_arch_vcpu_put(vcpu);
134 vcpu->arch.last_exit_cpu = -1;
136 memcpy(csr, reset_csr, sizeof(*csr));
138 memcpy(cntx, reset_cntx, sizeof(*cntx));
140 kvm_riscv_vcpu_fp_reset(vcpu);
142 kvm_riscv_vcpu_timer_reset(vcpu);
144 kvm_riscv_vcpu_aia_reset(vcpu);
146 bitmap_zero(vcpu->arch.irqs_pending, KVM_RISCV_VCPU_NR_IRQS);
147 bitmap_zero(vcpu->arch.irqs_pending_mask, KVM_RISCV_VCPU_NR_IRQS);
149 kvm_riscv_vcpu_pmu_reset(vcpu);
151 vcpu->arch.hfence_head = 0;
152 vcpu->arch.hfence_tail = 0;
153 memset(vcpu->arch.hfence_queue, 0, sizeof(vcpu->arch.hfence_queue));
155 /* Reset the guest CSRs for hotplug usecase */
157 kvm_arch_vcpu_load(vcpu, smp_processor_id());
161 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
166 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
169 struct kvm_cpu_context *cntx;
170 struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
171 unsigned long host_isa, i;
173 /* Mark this VCPU never ran */
174 vcpu->arch.ran_atleast_once = false;
175 vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
176 bitmap_zero(vcpu->arch.isa, RISCV_ISA_EXT_MAX);
178 /* Setup ISA features available to VCPU */
179 for (i = 0; i < ARRAY_SIZE(kvm_isa_ext_arr); i++) {
180 host_isa = kvm_isa_ext_arr[i];
181 if (__riscv_isa_extension_available(NULL, host_isa) &&
182 kvm_riscv_vcpu_isa_enable_allowed(i))
183 set_bit(host_isa, vcpu->arch.isa);
186 /* Setup vendor, arch, and implementation details */
187 vcpu->arch.mvendorid = sbi_get_mvendorid();
188 vcpu->arch.marchid = sbi_get_marchid();
189 vcpu->arch.mimpid = sbi_get_mimpid();
191 /* Setup VCPU hfence queue */
192 spin_lock_init(&vcpu->arch.hfence_lock);
194 /* Setup reset state of shadow SSTATUS and HSTATUS CSRs */
195 cntx = &vcpu->arch.guest_reset_context;
196 cntx->sstatus = SR_SPP | SR_SPIE;
198 cntx->hstatus |= HSTATUS_VTW;
199 cntx->hstatus |= HSTATUS_SPVP;
200 cntx->hstatus |= HSTATUS_SPV;
202 /* By default, make CY, TM, and IR counters accessible in VU mode */
203 reset_csr->scounteren = 0x7;
205 /* Setup VCPU timer */
206 kvm_riscv_vcpu_timer_init(vcpu);
208 /* setup performance monitoring */
209 kvm_riscv_vcpu_pmu_init(vcpu);
212 rc = kvm_riscv_vcpu_aia_init(vcpu);
217 kvm_riscv_reset_vcpu(vcpu);
222 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
225 * vcpu with id 0 is the designated boot cpu.
226 * Keep all vcpus with non-zero id in power-off state so that
227 * they can be brought up using SBI HSM extension.
229 if (vcpu->vcpu_idx != 0)
230 kvm_riscv_vcpu_power_off(vcpu);
233 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
235 /* Cleanup VCPU AIA context */
236 kvm_riscv_vcpu_aia_deinit(vcpu);
238 /* Cleanup VCPU timer */
239 kvm_riscv_vcpu_timer_deinit(vcpu);
241 kvm_riscv_vcpu_pmu_deinit(vcpu);
243 /* Free unused pages pre-allocated for G-stage page table mappings */
244 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
247 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
249 return kvm_riscv_vcpu_timer_pending(vcpu);
252 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
256 void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
260 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
262 return (kvm_riscv_vcpu_has_interrupts(vcpu, -1UL) &&
263 !vcpu->arch.power_off && !vcpu->arch.pause);
266 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
268 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
271 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
273 return (vcpu->arch.guest_context.sstatus & SR_SPP) ? true : false;
276 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
278 return VM_FAULT_SIGBUS;
281 static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
282 const struct kvm_one_reg *reg)
284 unsigned long __user *uaddr =
285 (unsigned long __user *)(unsigned long)reg->addr;
286 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
288 KVM_REG_RISCV_CONFIG);
289 unsigned long reg_val;
291 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
295 case KVM_REG_RISCV_CONFIG_REG(isa):
296 reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK;
298 case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
299 if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
301 reg_val = riscv_cbom_block_size;
303 case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
304 if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
306 reg_val = riscv_cboz_block_size;
308 case KVM_REG_RISCV_CONFIG_REG(mvendorid):
309 reg_val = vcpu->arch.mvendorid;
311 case KVM_REG_RISCV_CONFIG_REG(marchid):
312 reg_val = vcpu->arch.marchid;
314 case KVM_REG_RISCV_CONFIG_REG(mimpid):
315 reg_val = vcpu->arch.mimpid;
321 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
327 static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
328 const struct kvm_one_reg *reg)
330 unsigned long __user *uaddr =
331 (unsigned long __user *)(unsigned long)reg->addr;
332 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
334 KVM_REG_RISCV_CONFIG);
335 unsigned long i, isa_ext, reg_val;
337 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
340 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
344 case KVM_REG_RISCV_CONFIG_REG(isa):
346 * This ONE REG interface is only defined for
347 * single letter extensions.
349 if (fls(reg_val) >= RISCV_ISA_EXT_BASE)
352 if (!vcpu->arch.ran_atleast_once) {
353 /* Ignore the enable/disable request for certain extensions */
354 for (i = 0; i < RISCV_ISA_EXT_BASE; i++) {
355 isa_ext = kvm_riscv_vcpu_base2isa_ext(i);
356 if (isa_ext >= KVM_RISCV_ISA_EXT_MAX) {
360 if (!kvm_riscv_vcpu_isa_enable_allowed(isa_ext))
361 if (reg_val & BIT(i))
363 if (!kvm_riscv_vcpu_isa_disable_allowed(isa_ext))
364 if (!(reg_val & BIT(i)))
367 reg_val &= riscv_isa_extension_base(NULL);
368 /* Do not modify anything beyond single letter extensions */
369 reg_val = (vcpu->arch.isa[0] & ~KVM_RISCV_BASE_ISA_MASK) |
370 (reg_val & KVM_RISCV_BASE_ISA_MASK);
371 vcpu->arch.isa[0] = reg_val;
372 kvm_riscv_vcpu_fp_reset(vcpu);
377 case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
379 case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
381 case KVM_REG_RISCV_CONFIG_REG(mvendorid):
382 if (!vcpu->arch.ran_atleast_once)
383 vcpu->arch.mvendorid = reg_val;
387 case KVM_REG_RISCV_CONFIG_REG(marchid):
388 if (!vcpu->arch.ran_atleast_once)
389 vcpu->arch.marchid = reg_val;
393 case KVM_REG_RISCV_CONFIG_REG(mimpid):
394 if (!vcpu->arch.ran_atleast_once)
395 vcpu->arch.mimpid = reg_val;
406 static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu,
407 const struct kvm_one_reg *reg)
409 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
410 unsigned long __user *uaddr =
411 (unsigned long __user *)(unsigned long)reg->addr;
412 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
415 unsigned long reg_val;
417 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
419 if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
422 if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
423 reg_val = cntx->sepc;
424 else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
425 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
426 reg_val = ((unsigned long *)cntx)[reg_num];
427 else if (reg_num == KVM_REG_RISCV_CORE_REG(mode))
428 reg_val = (cntx->sstatus & SR_SPP) ?
429 KVM_RISCV_MODE_S : KVM_RISCV_MODE_U;
433 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
439 static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu,
440 const struct kvm_one_reg *reg)
442 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
443 unsigned long __user *uaddr =
444 (unsigned long __user *)(unsigned long)reg->addr;
445 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
448 unsigned long reg_val;
450 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
452 if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
455 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
458 if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
459 cntx->sepc = reg_val;
460 else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
461 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
462 ((unsigned long *)cntx)[reg_num] = reg_val;
463 else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) {
464 if (reg_val == KVM_RISCV_MODE_S)
465 cntx->sstatus |= SR_SPP;
467 cntx->sstatus &= ~SR_SPP;
474 static int kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu *vcpu,
475 unsigned long reg_num,
476 unsigned long *out_val)
478 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
480 if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
483 if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
484 kvm_riscv_vcpu_flush_interrupts(vcpu);
485 *out_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK;
486 *out_val |= csr->hvip & ~IRQ_LOCAL_MASK;
488 *out_val = ((unsigned long *)csr)[reg_num];
493 static inline int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu *vcpu,
494 unsigned long reg_num,
495 unsigned long reg_val)
497 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
499 if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
502 if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
503 reg_val &= VSIP_VALID_MASK;
504 reg_val <<= VSIP_TO_HVIP_SHIFT;
507 ((unsigned long *)csr)[reg_num] = reg_val;
509 if (reg_num == KVM_REG_RISCV_CSR_REG(sip))
510 WRITE_ONCE(vcpu->arch.irqs_pending_mask[0], 0);
515 static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
516 const struct kvm_one_reg *reg)
519 unsigned long __user *uaddr =
520 (unsigned long __user *)(unsigned long)reg->addr;
521 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
524 unsigned long reg_val, reg_subtype;
526 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
529 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
530 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
531 switch (reg_subtype) {
532 case KVM_REG_RISCV_CSR_GENERAL:
533 rc = kvm_riscv_vcpu_general_get_csr(vcpu, reg_num, ®_val);
535 case KVM_REG_RISCV_CSR_AIA:
536 rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, ®_val);
545 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
551 static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
552 const struct kvm_one_reg *reg)
555 unsigned long __user *uaddr =
556 (unsigned long __user *)(unsigned long)reg->addr;
557 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
560 unsigned long reg_val, reg_subtype;
562 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
565 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
568 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
569 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
570 switch (reg_subtype) {
571 case KVM_REG_RISCV_CSR_GENERAL:
572 rc = kvm_riscv_vcpu_general_set_csr(vcpu, reg_num, reg_val);
574 case KVM_REG_RISCV_CSR_AIA:
575 rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val);
587 static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu,
588 const struct kvm_one_reg *reg)
590 unsigned long __user *uaddr =
591 (unsigned long __user *)(unsigned long)reg->addr;
592 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
594 KVM_REG_RISCV_ISA_EXT);
595 unsigned long reg_val = 0;
596 unsigned long host_isa_ext;
598 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
601 if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
602 reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
605 host_isa_ext = kvm_isa_ext_arr[reg_num];
606 if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext))
607 reg_val = 1; /* Mark the given extension as available */
609 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
615 static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
616 const struct kvm_one_reg *reg)
618 unsigned long __user *uaddr =
619 (unsigned long __user *)(unsigned long)reg->addr;
620 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
622 KVM_REG_RISCV_ISA_EXT);
623 unsigned long reg_val;
624 unsigned long host_isa_ext;
626 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
629 if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
630 reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
633 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
636 host_isa_ext = kvm_isa_ext_arr[reg_num];
637 if (!__riscv_isa_extension_available(NULL, host_isa_ext))
640 if (!vcpu->arch.ran_atleast_once) {
642 * All multi-letter extension and a few single letter
643 * extension can be disabled
646 kvm_riscv_vcpu_isa_enable_allowed(reg_num))
647 set_bit(host_isa_ext, vcpu->arch.isa);
649 kvm_riscv_vcpu_isa_disable_allowed(reg_num))
650 clear_bit(host_isa_ext, vcpu->arch.isa);
653 kvm_riscv_vcpu_fp_reset(vcpu);
661 static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
662 const struct kvm_one_reg *reg)
664 switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
665 case KVM_REG_RISCV_CONFIG:
666 return kvm_riscv_vcpu_set_reg_config(vcpu, reg);
667 case KVM_REG_RISCV_CORE:
668 return kvm_riscv_vcpu_set_reg_core(vcpu, reg);
669 case KVM_REG_RISCV_CSR:
670 return kvm_riscv_vcpu_set_reg_csr(vcpu, reg);
671 case KVM_REG_RISCV_TIMER:
672 return kvm_riscv_vcpu_set_reg_timer(vcpu, reg);
673 case KVM_REG_RISCV_FP_F:
674 return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
676 case KVM_REG_RISCV_FP_D:
677 return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
679 case KVM_REG_RISCV_ISA_EXT:
680 return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg);
681 case KVM_REG_RISCV_SBI_EXT:
682 return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu, reg);
690 static int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
691 const struct kvm_one_reg *reg)
693 switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
694 case KVM_REG_RISCV_CONFIG:
695 return kvm_riscv_vcpu_get_reg_config(vcpu, reg);
696 case KVM_REG_RISCV_CORE:
697 return kvm_riscv_vcpu_get_reg_core(vcpu, reg);
698 case KVM_REG_RISCV_CSR:
699 return kvm_riscv_vcpu_get_reg_csr(vcpu, reg);
700 case KVM_REG_RISCV_TIMER:
701 return kvm_riscv_vcpu_get_reg_timer(vcpu, reg);
702 case KVM_REG_RISCV_FP_F:
703 return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
705 case KVM_REG_RISCV_FP_D:
706 return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
708 case KVM_REG_RISCV_ISA_EXT:
709 return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg);
710 case KVM_REG_RISCV_SBI_EXT:
711 return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu, reg);
719 long kvm_arch_vcpu_async_ioctl(struct file *filp,
720 unsigned int ioctl, unsigned long arg)
722 struct kvm_vcpu *vcpu = filp->private_data;
723 void __user *argp = (void __user *)arg;
725 if (ioctl == KVM_INTERRUPT) {
726 struct kvm_interrupt irq;
728 if (copy_from_user(&irq, argp, sizeof(irq)))
731 if (irq.irq == KVM_INTERRUPT_SET)
732 return kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_EXT);
734 return kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_EXT);
740 long kvm_arch_vcpu_ioctl(struct file *filp,
741 unsigned int ioctl, unsigned long arg)
743 struct kvm_vcpu *vcpu = filp->private_data;
744 void __user *argp = (void __user *)arg;
748 case KVM_SET_ONE_REG:
749 case KVM_GET_ONE_REG: {
750 struct kvm_one_reg reg;
753 if (copy_from_user(®, argp, sizeof(reg)))
756 if (ioctl == KVM_SET_ONE_REG)
757 r = kvm_riscv_vcpu_set_reg(vcpu, ®);
759 r = kvm_riscv_vcpu_get_reg(vcpu, ®);
769 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
770 struct kvm_sregs *sregs)
775 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
776 struct kvm_sregs *sregs)
781 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
786 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
791 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
792 struct kvm_translation *tr)
797 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
802 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
807 void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu)
809 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
810 unsigned long mask, val;
812 if (READ_ONCE(vcpu->arch.irqs_pending_mask[0])) {
813 mask = xchg_acquire(&vcpu->arch.irqs_pending_mask[0], 0);
814 val = READ_ONCE(vcpu->arch.irqs_pending[0]) & mask;
820 /* Flush AIA high interrupts */
821 kvm_riscv_vcpu_aia_flush_interrupts(vcpu);
824 void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu)
827 struct kvm_vcpu_arch *v = &vcpu->arch;
828 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
830 /* Read current HVIP and VSIE CSRs */
831 csr->vsie = csr_read(CSR_VSIE);
833 /* Sync-up HVIP.VSSIP bit changes does by Guest */
834 hvip = csr_read(CSR_HVIP);
835 if ((csr->hvip ^ hvip) & (1UL << IRQ_VS_SOFT)) {
836 if (hvip & (1UL << IRQ_VS_SOFT)) {
837 if (!test_and_set_bit(IRQ_VS_SOFT,
838 v->irqs_pending_mask))
839 set_bit(IRQ_VS_SOFT, v->irqs_pending);
841 if (!test_and_set_bit(IRQ_VS_SOFT,
842 v->irqs_pending_mask))
843 clear_bit(IRQ_VS_SOFT, v->irqs_pending);
847 /* Sync-up AIA high interrupts */
848 kvm_riscv_vcpu_aia_sync_interrupts(vcpu);
850 /* Sync-up timer CSRs */
851 kvm_riscv_vcpu_timer_sync(vcpu);
854 int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
857 * We only allow VS-mode software, timer, and external
858 * interrupts when irq is one of the local interrupts
859 * defined by RISC-V privilege specification.
861 if (irq < IRQ_LOCAL_MAX &&
862 irq != IRQ_VS_SOFT &&
863 irq != IRQ_VS_TIMER &&
867 set_bit(irq, vcpu->arch.irqs_pending);
868 smp_mb__before_atomic();
869 set_bit(irq, vcpu->arch.irqs_pending_mask);
876 int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
879 * We only allow VS-mode software, timer, and external
880 * interrupts when irq is one of the local interrupts
881 * defined by RISC-V privilege specification.
883 if (irq < IRQ_LOCAL_MAX &&
884 irq != IRQ_VS_SOFT &&
885 irq != IRQ_VS_TIMER &&
889 clear_bit(irq, vcpu->arch.irqs_pending);
890 smp_mb__before_atomic();
891 set_bit(irq, vcpu->arch.irqs_pending_mask);
896 bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, u64 mask)
900 ie = ((vcpu->arch.guest_csr.vsie & VSIP_VALID_MASK)
901 << VSIP_TO_HVIP_SHIFT) & (unsigned long)mask;
902 ie |= vcpu->arch.guest_csr.vsie & ~IRQ_LOCAL_MASK &
904 if (READ_ONCE(vcpu->arch.irqs_pending[0]) & ie)
907 /* Check AIA high interrupts */
908 return kvm_riscv_vcpu_aia_has_interrupts(vcpu, mask);
911 void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu)
913 vcpu->arch.power_off = true;
914 kvm_make_request(KVM_REQ_SLEEP, vcpu);
918 void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu)
920 vcpu->arch.power_off = false;
921 kvm_vcpu_wake_up(vcpu);
924 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
925 struct kvm_mp_state *mp_state)
927 if (vcpu->arch.power_off)
928 mp_state->mp_state = KVM_MP_STATE_STOPPED;
930 mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
935 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
936 struct kvm_mp_state *mp_state)
940 switch (mp_state->mp_state) {
941 case KVM_MP_STATE_RUNNABLE:
942 vcpu->arch.power_off = false;
944 case KVM_MP_STATE_STOPPED:
945 kvm_riscv_vcpu_power_off(vcpu);
954 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
955 struct kvm_guest_debug *dbg)
957 /* TODO; To be implemented later. */
961 static void kvm_riscv_vcpu_update_config(const unsigned long *isa)
965 if (riscv_isa_extension_available(isa, SVPBMT))
966 henvcfg |= ENVCFG_PBMTE;
968 if (riscv_isa_extension_available(isa, SSTC))
969 henvcfg |= ENVCFG_STCE;
971 if (riscv_isa_extension_available(isa, ZICBOM))
972 henvcfg |= (ENVCFG_CBIE | ENVCFG_CBCFE);
974 if (riscv_isa_extension_available(isa, ZICBOZ))
975 henvcfg |= ENVCFG_CBZE;
977 csr_write(CSR_HENVCFG, henvcfg);
979 csr_write(CSR_HENVCFGH, henvcfg >> 32);
983 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
985 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
987 csr_write(CSR_VSSTATUS, csr->vsstatus);
988 csr_write(CSR_VSIE, csr->vsie);
989 csr_write(CSR_VSTVEC, csr->vstvec);
990 csr_write(CSR_VSSCRATCH, csr->vsscratch);
991 csr_write(CSR_VSEPC, csr->vsepc);
992 csr_write(CSR_VSCAUSE, csr->vscause);
993 csr_write(CSR_VSTVAL, csr->vstval);
994 csr_write(CSR_HVIP, csr->hvip);
995 csr_write(CSR_VSATP, csr->vsatp);
997 kvm_riscv_vcpu_update_config(vcpu->arch.isa);
999 kvm_riscv_gstage_update_hgatp(vcpu);
1001 kvm_riscv_vcpu_timer_restore(vcpu);
1003 kvm_riscv_vcpu_host_fp_save(&vcpu->arch.host_context);
1004 kvm_riscv_vcpu_guest_fp_restore(&vcpu->arch.guest_context,
1007 kvm_riscv_vcpu_aia_load(vcpu, cpu);
1012 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1014 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
1018 kvm_riscv_vcpu_aia_put(vcpu);
1020 kvm_riscv_vcpu_guest_fp_save(&vcpu->arch.guest_context,
1022 kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context);
1024 kvm_riscv_vcpu_timer_save(vcpu);
1026 csr->vsstatus = csr_read(CSR_VSSTATUS);
1027 csr->vsie = csr_read(CSR_VSIE);
1028 csr->vstvec = csr_read(CSR_VSTVEC);
1029 csr->vsscratch = csr_read(CSR_VSSCRATCH);
1030 csr->vsepc = csr_read(CSR_VSEPC);
1031 csr->vscause = csr_read(CSR_VSCAUSE);
1032 csr->vstval = csr_read(CSR_VSTVAL);
1033 csr->hvip = csr_read(CSR_HVIP);
1034 csr->vsatp = csr_read(CSR_VSATP);
1037 static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu)
1039 struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
1041 if (kvm_request_pending(vcpu)) {
1042 if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) {
1043 kvm_vcpu_srcu_read_unlock(vcpu);
1044 rcuwait_wait_event(wait,
1045 (!vcpu->arch.power_off) && (!vcpu->arch.pause),
1046 TASK_INTERRUPTIBLE);
1047 kvm_vcpu_srcu_read_lock(vcpu);
1049 if (vcpu->arch.power_off || vcpu->arch.pause) {
1051 * Awaken to handle a signal, request to
1052 * sleep again later.
1054 kvm_make_request(KVM_REQ_SLEEP, vcpu);
1058 if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
1059 kvm_riscv_reset_vcpu(vcpu);
1061 if (kvm_check_request(KVM_REQ_UPDATE_HGATP, vcpu))
1062 kvm_riscv_gstage_update_hgatp(vcpu);
1064 if (kvm_check_request(KVM_REQ_FENCE_I, vcpu))
1065 kvm_riscv_fence_i_process(vcpu);
1068 * The generic KVM_REQ_TLB_FLUSH is same as
1069 * KVM_REQ_HFENCE_GVMA_VMID_ALL
1071 if (kvm_check_request(KVM_REQ_HFENCE_GVMA_VMID_ALL, vcpu))
1072 kvm_riscv_hfence_gvma_vmid_all_process(vcpu);
1074 if (kvm_check_request(KVM_REQ_HFENCE_VVMA_ALL, vcpu))
1075 kvm_riscv_hfence_vvma_all_process(vcpu);
1077 if (kvm_check_request(KVM_REQ_HFENCE, vcpu))
1078 kvm_riscv_hfence_process(vcpu);
1082 static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu)
1084 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
1086 csr_write(CSR_HVIP, csr->hvip);
1087 kvm_riscv_vcpu_aia_update_hvip(vcpu);
1091 * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while
1092 * the vCPU is running.
1094 * This must be noinstr as instrumentation may make use of RCU, and this is not
1095 * safe during the EQS.
1097 static void noinstr kvm_riscv_vcpu_enter_exit(struct kvm_vcpu *vcpu)
1099 guest_state_enter_irqoff();
1100 __kvm_riscv_switch_to(&vcpu->arch);
1101 vcpu->arch.last_exit_cpu = vcpu->cpu;
1102 guest_state_exit_irqoff();
1105 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
1108 struct kvm_cpu_trap trap;
1109 struct kvm_run *run = vcpu->run;
1111 /* Mark this VCPU ran at least once */
1112 vcpu->arch.ran_atleast_once = true;
1114 kvm_vcpu_srcu_read_lock(vcpu);
1116 switch (run->exit_reason) {
1118 /* Process MMIO value returned from user-space */
1119 ret = kvm_riscv_vcpu_mmio_return(vcpu, vcpu->run);
1121 case KVM_EXIT_RISCV_SBI:
1122 /* Process SBI value returned from user-space */
1123 ret = kvm_riscv_vcpu_sbi_return(vcpu, vcpu->run);
1125 case KVM_EXIT_RISCV_CSR:
1126 /* Process CSR value returned from user-space */
1127 ret = kvm_riscv_vcpu_csr_return(vcpu, vcpu->run);
1134 kvm_vcpu_srcu_read_unlock(vcpu);
1138 if (run->immediate_exit) {
1139 kvm_vcpu_srcu_read_unlock(vcpu);
1145 kvm_sigset_activate(vcpu);
1148 run->exit_reason = KVM_EXIT_UNKNOWN;
1150 /* Check conditions before entering the guest */
1151 ret = xfer_to_guest_mode_handle_work(vcpu);
1156 kvm_riscv_gstage_vmid_update(vcpu);
1158 kvm_riscv_check_vcpu_requests(vcpu);
1162 /* Update AIA HW state before entering guest */
1163 ret = kvm_riscv_vcpu_aia_update(vcpu);
1169 local_irq_disable();
1172 * Ensure we set mode to IN_GUEST_MODE after we disable
1173 * interrupts and before the final VCPU requests check.
1174 * See the comment in kvm_vcpu_exiting_guest_mode() and
1175 * Documentation/virt/kvm/vcpu-requests.rst
1177 vcpu->mode = IN_GUEST_MODE;
1179 kvm_vcpu_srcu_read_unlock(vcpu);
1180 smp_mb__after_srcu_read_unlock();
1183 * We might have got VCPU interrupts updated asynchronously
1184 * so update it in HW.
1186 kvm_riscv_vcpu_flush_interrupts(vcpu);
1188 /* Update HVIP CSR for current CPU */
1189 kvm_riscv_update_hvip(vcpu);
1192 kvm_riscv_gstage_vmid_ver_changed(&vcpu->kvm->arch.vmid) ||
1193 kvm_request_pending(vcpu) ||
1194 xfer_to_guest_mode_work_pending()) {
1195 vcpu->mode = OUTSIDE_GUEST_MODE;
1198 kvm_vcpu_srcu_read_lock(vcpu);
1203 * Cleanup stale TLB enteries
1205 * Note: This should be done after G-stage VMID has been
1206 * updated using kvm_riscv_gstage_vmid_ver_changed()
1208 kvm_riscv_local_tlb_sanitize(vcpu);
1210 guest_timing_enter_irqoff();
1212 kvm_riscv_vcpu_enter_exit(vcpu);
1214 vcpu->mode = OUTSIDE_GUEST_MODE;
1218 * Save SCAUSE, STVAL, HTVAL, and HTINST because we might
1219 * get an interrupt between __kvm_riscv_switch_to() and
1220 * local_irq_enable() which can potentially change CSRs.
1222 trap.sepc = vcpu->arch.guest_context.sepc;
1223 trap.scause = csr_read(CSR_SCAUSE);
1224 trap.stval = csr_read(CSR_STVAL);
1225 trap.htval = csr_read(CSR_HTVAL);
1226 trap.htinst = csr_read(CSR_HTINST);
1228 /* Syncup interrupts state with HW */
1229 kvm_riscv_vcpu_sync_interrupts(vcpu);
1232 * We must ensure that any pending interrupts are taken before
1233 * we exit guest timing so that timer ticks are accounted as
1234 * guest time. Transiently unmask interrupts so that any
1235 * pending interrupts are taken.
1237 * There's no barrier which ensures that pending interrupts are
1238 * recognised, so we just hope that the CPU takes any pending
1239 * interrupts between the enable and disable.
1242 local_irq_disable();
1244 guest_timing_exit_irqoff();
1250 kvm_vcpu_srcu_read_lock(vcpu);
1252 ret = kvm_riscv_vcpu_exit(vcpu, run, &trap);
1255 kvm_sigset_deactivate(vcpu);
1259 kvm_vcpu_srcu_read_unlock(vcpu);