1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4 * Copyright (C) 2023 Ventana Micro Systems Inc.
7 * Anup Patel <apatel@ventanamicro.com>
10 #include <linux/bitops.h>
11 #include <linux/errno.h>
12 #include <linux/err.h>
13 #include <linux/uaccess.h>
14 #include <linux/kvm_host.h>
15 #include <asm/cacheflush.h>
16 #include <asm/hwcap.h>
17 #include <asm/kvm_vcpu_vector.h>
18 #include <asm/vector.h>
20 #define KVM_RISCV_BASE_ISA_MASK GENMASK(25, 0)
22 #define KVM_ISA_EXT_ARR(ext) \
23 [KVM_RISCV_ISA_EXT_##ext] = RISCV_ISA_EXT_##ext
25 /* Mapping between KVM ISA Extension ID & Host ISA extension ID */
26 static const unsigned long kvm_isa_ext_arr[] = {
27 /* Single letter extensions (alphabetically sorted) */
28 [KVM_RISCV_ISA_EXT_A] = RISCV_ISA_EXT_a,
29 [KVM_RISCV_ISA_EXT_C] = RISCV_ISA_EXT_c,
30 [KVM_RISCV_ISA_EXT_D] = RISCV_ISA_EXT_d,
31 [KVM_RISCV_ISA_EXT_F] = RISCV_ISA_EXT_f,
32 [KVM_RISCV_ISA_EXT_H] = RISCV_ISA_EXT_h,
33 [KVM_RISCV_ISA_EXT_I] = RISCV_ISA_EXT_i,
34 [KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m,
35 [KVM_RISCV_ISA_EXT_V] = RISCV_ISA_EXT_v,
36 /* Multi letter extensions (alphabetically sorted) */
37 KVM_ISA_EXT_ARR(SSAIA),
38 KVM_ISA_EXT_ARR(SSTC),
39 KVM_ISA_EXT_ARR(SVINVAL),
40 KVM_ISA_EXT_ARR(SVNAPOT),
41 KVM_ISA_EXT_ARR(SVPBMT),
45 KVM_ISA_EXT_ARR(ZICBOM),
46 KVM_ISA_EXT_ARR(ZICBOZ),
47 KVM_ISA_EXT_ARR(ZICNTR),
48 KVM_ISA_EXT_ARR(ZICSR),
49 KVM_ISA_EXT_ARR(ZIFENCEI),
50 KVM_ISA_EXT_ARR(ZIHINTPAUSE),
51 KVM_ISA_EXT_ARR(ZIHPM),
54 static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)
58 for (i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
59 if (kvm_isa_ext_arr[i] == base_ext)
63 return KVM_RISCV_ISA_EXT_MAX;
66 static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)
69 case KVM_RISCV_ISA_EXT_H:
71 case KVM_RISCV_ISA_EXT_V:
72 return riscv_v_vstate_ctrl_user_allowed();
80 static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
83 case KVM_RISCV_ISA_EXT_A:
84 case KVM_RISCV_ISA_EXT_C:
85 case KVM_RISCV_ISA_EXT_I:
86 case KVM_RISCV_ISA_EXT_M:
87 case KVM_RISCV_ISA_EXT_SSAIA:
88 case KVM_RISCV_ISA_EXT_SSTC:
89 case KVM_RISCV_ISA_EXT_SVINVAL:
90 case KVM_RISCV_ISA_EXT_SVNAPOT:
91 case KVM_RISCV_ISA_EXT_ZBA:
92 case KVM_RISCV_ISA_EXT_ZBB:
93 case KVM_RISCV_ISA_EXT_ZBS:
94 case KVM_RISCV_ISA_EXT_ZICNTR:
95 case KVM_RISCV_ISA_EXT_ZICSR:
96 case KVM_RISCV_ISA_EXT_ZIFENCEI:
97 case KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
98 case KVM_RISCV_ISA_EXT_ZIHPM:
107 void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu)
109 unsigned long host_isa, i;
111 for (i = 0; i < ARRAY_SIZE(kvm_isa_ext_arr); i++) {
112 host_isa = kvm_isa_ext_arr[i];
113 if (__riscv_isa_extension_available(NULL, host_isa) &&
114 kvm_riscv_vcpu_isa_enable_allowed(i))
115 set_bit(host_isa, vcpu->arch.isa);
119 static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
120 const struct kvm_one_reg *reg)
122 unsigned long __user *uaddr =
123 (unsigned long __user *)(unsigned long)reg->addr;
124 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
126 KVM_REG_RISCV_CONFIG);
127 unsigned long reg_val;
129 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
133 case KVM_REG_RISCV_CONFIG_REG(isa):
134 reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK;
136 case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
137 if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
139 reg_val = riscv_cbom_block_size;
141 case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
142 if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
144 reg_val = riscv_cboz_block_size;
146 case KVM_REG_RISCV_CONFIG_REG(mvendorid):
147 reg_val = vcpu->arch.mvendorid;
149 case KVM_REG_RISCV_CONFIG_REG(marchid):
150 reg_val = vcpu->arch.marchid;
152 case KVM_REG_RISCV_CONFIG_REG(mimpid):
153 reg_val = vcpu->arch.mimpid;
155 case KVM_REG_RISCV_CONFIG_REG(satp_mode):
156 reg_val = satp_mode >> SATP_MODE_SHIFT;
162 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
168 static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
169 const struct kvm_one_reg *reg)
171 unsigned long __user *uaddr =
172 (unsigned long __user *)(unsigned long)reg->addr;
173 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
175 KVM_REG_RISCV_CONFIG);
176 unsigned long i, isa_ext, reg_val;
178 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
181 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
185 case KVM_REG_RISCV_CONFIG_REG(isa):
187 * This ONE REG interface is only defined for
188 * single letter extensions.
190 if (fls(reg_val) >= RISCV_ISA_EXT_BASE)
194 * Return early (i.e. do nothing) if reg_val is the same
195 * value retrievable via kvm_riscv_vcpu_get_reg_config().
197 if (reg_val == (vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK))
200 if (!vcpu->arch.ran_atleast_once) {
201 /* Ignore the enable/disable request for certain extensions */
202 for (i = 0; i < RISCV_ISA_EXT_BASE; i++) {
203 isa_ext = kvm_riscv_vcpu_base2isa_ext(i);
204 if (isa_ext >= KVM_RISCV_ISA_EXT_MAX) {
208 if (!kvm_riscv_vcpu_isa_enable_allowed(isa_ext))
209 if (reg_val & BIT(i))
211 if (!kvm_riscv_vcpu_isa_disable_allowed(isa_ext))
212 if (!(reg_val & BIT(i)))
215 reg_val &= riscv_isa_extension_base(NULL);
216 /* Do not modify anything beyond single letter extensions */
217 reg_val = (vcpu->arch.isa[0] & ~KVM_RISCV_BASE_ISA_MASK) |
218 (reg_val & KVM_RISCV_BASE_ISA_MASK);
219 vcpu->arch.isa[0] = reg_val;
220 kvm_riscv_vcpu_fp_reset(vcpu);
225 case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
226 if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
228 if (reg_val != riscv_cbom_block_size)
231 case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
232 if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
234 if (reg_val != riscv_cboz_block_size)
237 case KVM_REG_RISCV_CONFIG_REG(mvendorid):
238 if (reg_val == vcpu->arch.mvendorid)
240 if (!vcpu->arch.ran_atleast_once)
241 vcpu->arch.mvendorid = reg_val;
245 case KVM_REG_RISCV_CONFIG_REG(marchid):
246 if (reg_val == vcpu->arch.marchid)
248 if (!vcpu->arch.ran_atleast_once)
249 vcpu->arch.marchid = reg_val;
253 case KVM_REG_RISCV_CONFIG_REG(mimpid):
254 if (reg_val == vcpu->arch.mimpid)
256 if (!vcpu->arch.ran_atleast_once)
257 vcpu->arch.mimpid = reg_val;
261 case KVM_REG_RISCV_CONFIG_REG(satp_mode):
262 if (reg_val != (satp_mode >> SATP_MODE_SHIFT))
272 static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu,
273 const struct kvm_one_reg *reg)
275 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
276 unsigned long __user *uaddr =
277 (unsigned long __user *)(unsigned long)reg->addr;
278 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
281 unsigned long reg_val;
283 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
285 if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
288 if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
289 reg_val = cntx->sepc;
290 else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
291 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
292 reg_val = ((unsigned long *)cntx)[reg_num];
293 else if (reg_num == KVM_REG_RISCV_CORE_REG(mode))
294 reg_val = (cntx->sstatus & SR_SPP) ?
295 KVM_RISCV_MODE_S : KVM_RISCV_MODE_U;
299 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
305 static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu,
306 const struct kvm_one_reg *reg)
308 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
309 unsigned long __user *uaddr =
310 (unsigned long __user *)(unsigned long)reg->addr;
311 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
314 unsigned long reg_val;
316 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
318 if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
321 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
324 if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
325 cntx->sepc = reg_val;
326 else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
327 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
328 ((unsigned long *)cntx)[reg_num] = reg_val;
329 else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) {
330 if (reg_val == KVM_RISCV_MODE_S)
331 cntx->sstatus |= SR_SPP;
333 cntx->sstatus &= ~SR_SPP;
340 static int kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu *vcpu,
341 unsigned long reg_num,
342 unsigned long *out_val)
344 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
346 if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
349 if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
350 kvm_riscv_vcpu_flush_interrupts(vcpu);
351 *out_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK;
352 *out_val |= csr->hvip & ~IRQ_LOCAL_MASK;
354 *out_val = ((unsigned long *)csr)[reg_num];
359 static int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu *vcpu,
360 unsigned long reg_num,
361 unsigned long reg_val)
363 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
365 if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
368 if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
369 reg_val &= VSIP_VALID_MASK;
370 reg_val <<= VSIP_TO_HVIP_SHIFT;
373 ((unsigned long *)csr)[reg_num] = reg_val;
375 if (reg_num == KVM_REG_RISCV_CSR_REG(sip))
376 WRITE_ONCE(vcpu->arch.irqs_pending_mask[0], 0);
381 static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
382 const struct kvm_one_reg *reg)
385 unsigned long __user *uaddr =
386 (unsigned long __user *)(unsigned long)reg->addr;
387 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
390 unsigned long reg_val, reg_subtype;
392 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
395 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
396 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
397 switch (reg_subtype) {
398 case KVM_REG_RISCV_CSR_GENERAL:
399 rc = kvm_riscv_vcpu_general_get_csr(vcpu, reg_num, ®_val);
401 case KVM_REG_RISCV_CSR_AIA:
402 rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, ®_val);
411 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
417 static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
418 const struct kvm_one_reg *reg)
421 unsigned long __user *uaddr =
422 (unsigned long __user *)(unsigned long)reg->addr;
423 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
426 unsigned long reg_val, reg_subtype;
428 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
431 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
434 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
435 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
436 switch (reg_subtype) {
437 case KVM_REG_RISCV_CSR_GENERAL:
438 rc = kvm_riscv_vcpu_general_set_csr(vcpu, reg_num, reg_val);
440 case KVM_REG_RISCV_CSR_AIA:
441 rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val);
453 static int riscv_vcpu_get_isa_ext_single(struct kvm_vcpu *vcpu,
454 unsigned long reg_num,
455 unsigned long *reg_val)
457 unsigned long host_isa_ext;
459 if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
460 reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
463 host_isa_ext = kvm_isa_ext_arr[reg_num];
464 if (!__riscv_isa_extension_available(NULL, host_isa_ext))
468 if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext))
469 *reg_val = 1; /* Mark the given extension as available */
474 static int riscv_vcpu_set_isa_ext_single(struct kvm_vcpu *vcpu,
475 unsigned long reg_num,
476 unsigned long reg_val)
478 unsigned long host_isa_ext;
480 if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
481 reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
484 host_isa_ext = kvm_isa_ext_arr[reg_num];
485 if (!__riscv_isa_extension_available(NULL, host_isa_ext))
488 if (reg_val == test_bit(host_isa_ext, vcpu->arch.isa))
491 if (!vcpu->arch.ran_atleast_once) {
493 * All multi-letter extension and a few single letter
494 * extension can be disabled
497 kvm_riscv_vcpu_isa_enable_allowed(reg_num))
498 set_bit(host_isa_ext, vcpu->arch.isa);
500 kvm_riscv_vcpu_isa_disable_allowed(reg_num))
501 clear_bit(host_isa_ext, vcpu->arch.isa);
504 kvm_riscv_vcpu_fp_reset(vcpu);
512 static int riscv_vcpu_get_isa_ext_multi(struct kvm_vcpu *vcpu,
513 unsigned long reg_num,
514 unsigned long *reg_val)
516 unsigned long i, ext_id, ext_val;
518 if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
521 for (i = 0; i < BITS_PER_LONG; i++) {
522 ext_id = i + reg_num * BITS_PER_LONG;
523 if (ext_id >= KVM_RISCV_ISA_EXT_MAX)
527 riscv_vcpu_get_isa_ext_single(vcpu, ext_id, &ext_val);
529 *reg_val |= KVM_REG_RISCV_ISA_MULTI_MASK(ext_id);
535 static int riscv_vcpu_set_isa_ext_multi(struct kvm_vcpu *vcpu,
536 unsigned long reg_num,
537 unsigned long reg_val, bool enable)
539 unsigned long i, ext_id;
541 if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
544 for_each_set_bit(i, ®_val, BITS_PER_LONG) {
545 ext_id = i + reg_num * BITS_PER_LONG;
546 if (ext_id >= KVM_RISCV_ISA_EXT_MAX)
549 riscv_vcpu_set_isa_ext_single(vcpu, ext_id, enable);
555 static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu,
556 const struct kvm_one_reg *reg)
559 unsigned long __user *uaddr =
560 (unsigned long __user *)(unsigned long)reg->addr;
561 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
563 KVM_REG_RISCV_ISA_EXT);
564 unsigned long reg_val, reg_subtype;
566 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
569 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
570 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
573 switch (reg_subtype) {
574 case KVM_REG_RISCV_ISA_SINGLE:
575 rc = riscv_vcpu_get_isa_ext_single(vcpu, reg_num, ®_val);
577 case KVM_REG_RISCV_ISA_MULTI_EN:
578 case KVM_REG_RISCV_ISA_MULTI_DIS:
579 rc = riscv_vcpu_get_isa_ext_multi(vcpu, reg_num, ®_val);
580 if (!rc && reg_subtype == KVM_REG_RISCV_ISA_MULTI_DIS)
589 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
595 static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
596 const struct kvm_one_reg *reg)
598 unsigned long __user *uaddr =
599 (unsigned long __user *)(unsigned long)reg->addr;
600 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
602 KVM_REG_RISCV_ISA_EXT);
603 unsigned long reg_val, reg_subtype;
605 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
608 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
609 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
611 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
614 switch (reg_subtype) {
615 case KVM_REG_RISCV_ISA_SINGLE:
616 return riscv_vcpu_set_isa_ext_single(vcpu, reg_num, reg_val);
617 case KVM_REG_RISCV_SBI_MULTI_EN:
618 return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, true);
619 case KVM_REG_RISCV_SBI_MULTI_DIS:
620 return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, false);
628 static int copy_config_reg_indices(const struct kvm_vcpu *vcpu,
629 u64 __user *uindices)
633 for (int i = 0; i < sizeof(struct kvm_riscv_config)/sizeof(unsigned long);
639 * Avoid reporting config reg if the corresponding extension
642 if (i == KVM_REG_RISCV_CONFIG_REG(zicbom_block_size) &&
643 !riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
645 else if (i == KVM_REG_RISCV_CONFIG_REG(zicboz_block_size) &&
646 !riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
649 size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
650 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CONFIG | i;
653 if (put_user(reg, uindices))
664 static unsigned long num_config_regs(const struct kvm_vcpu *vcpu)
666 return copy_config_reg_indices(vcpu, NULL);
669 static inline unsigned long num_core_regs(void)
671 return sizeof(struct kvm_riscv_core) / sizeof(unsigned long);
674 static int copy_core_reg_indices(u64 __user *uindices)
676 int n = num_core_regs();
678 for (int i = 0; i < n; i++) {
679 u64 size = IS_ENABLED(CONFIG_32BIT) ?
680 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
681 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CORE | i;
684 if (put_user(reg, uindices))
693 static inline unsigned long num_csr_regs(const struct kvm_vcpu *vcpu)
695 unsigned long n = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
697 if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA))
698 n += sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
703 static int copy_csr_reg_indices(const struct kvm_vcpu *vcpu,
704 u64 __user *uindices)
706 int n1 = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
709 /* copy general csr regs */
710 for (int i = 0; i < n1; i++) {
711 u64 size = IS_ENABLED(CONFIG_32BIT) ?
712 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
713 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
714 KVM_REG_RISCV_CSR_GENERAL | i;
717 if (put_user(reg, uindices))
723 /* copy AIA csr regs */
724 if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA)) {
725 n2 = sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
727 for (int i = 0; i < n2; i++) {
728 u64 size = IS_ENABLED(CONFIG_32BIT) ?
729 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
730 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
731 KVM_REG_RISCV_CSR_AIA | i;
734 if (put_user(reg, uindices))
744 static inline unsigned long num_timer_regs(void)
746 return sizeof(struct kvm_riscv_timer) / sizeof(u64);
749 static int copy_timer_reg_indices(u64 __user *uindices)
751 int n = num_timer_regs();
753 for (int i = 0; i < n; i++) {
754 u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
755 KVM_REG_RISCV_TIMER | i;
758 if (put_user(reg, uindices))
767 static inline unsigned long num_fp_f_regs(const struct kvm_vcpu *vcpu)
769 const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
771 if (riscv_isa_extension_available(vcpu->arch.isa, f))
772 return sizeof(cntx->fp.f) / sizeof(u32);
777 static int copy_fp_f_reg_indices(const struct kvm_vcpu *vcpu,
778 u64 __user *uindices)
780 int n = num_fp_f_regs(vcpu);
782 for (int i = 0; i < n; i++) {
783 u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 |
784 KVM_REG_RISCV_FP_F | i;
787 if (put_user(reg, uindices))
796 static inline unsigned long num_fp_d_regs(const struct kvm_vcpu *vcpu)
798 const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
800 if (riscv_isa_extension_available(vcpu->arch.isa, d))
801 return sizeof(cntx->fp.d.f) / sizeof(u64) + 1;
806 static int copy_fp_d_reg_indices(const struct kvm_vcpu *vcpu,
807 u64 __user *uindices)
810 int n = num_fp_d_regs(vcpu);
813 /* copy fp.d.f indices */
814 for (i = 0; i < n-1; i++) {
815 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
816 KVM_REG_RISCV_FP_D | i;
819 if (put_user(reg, uindices))
825 /* copy fp.d.fcsr indices */
826 reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_D | i;
828 if (put_user(reg, uindices))
836 static int copy_isa_ext_reg_indices(const struct kvm_vcpu *vcpu,
837 u64 __user *uindices)
840 unsigned long isa_ext;
842 for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
843 u64 size = IS_ENABLED(CONFIG_32BIT) ?
844 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
845 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_ISA_EXT | i;
847 isa_ext = kvm_isa_ext_arr[i];
848 if (!__riscv_isa_extension_available(NULL, isa_ext))
852 if (put_user(reg, uindices))
863 static inline unsigned long num_isa_ext_regs(const struct kvm_vcpu *vcpu)
865 return copy_isa_ext_reg_indices(vcpu, NULL);;
868 static inline unsigned long num_sbi_ext_regs(void)
871 * number of KVM_REG_RISCV_SBI_SINGLE +
872 * 2 x (number of KVM_REG_RISCV_SBI_MULTI)
874 return KVM_RISCV_SBI_EXT_MAX + 2*(KVM_REG_RISCV_SBI_MULTI_REG_LAST+1);
877 static int copy_sbi_ext_reg_indices(u64 __user *uindices)
881 /* copy KVM_REG_RISCV_SBI_SINGLE */
882 n = KVM_RISCV_SBI_EXT_MAX;
883 for (int i = 0; i < n; i++) {
884 u64 size = IS_ENABLED(CONFIG_32BIT) ?
885 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
886 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
887 KVM_REG_RISCV_SBI_SINGLE | i;
890 if (put_user(reg, uindices))
896 /* copy KVM_REG_RISCV_SBI_MULTI */
897 n = KVM_REG_RISCV_SBI_MULTI_REG_LAST + 1;
898 for (int i = 0; i < n; i++) {
899 u64 size = IS_ENABLED(CONFIG_32BIT) ?
900 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
901 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
902 KVM_REG_RISCV_SBI_MULTI_EN | i;
905 if (put_user(reg, uindices))
910 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
911 KVM_REG_RISCV_SBI_MULTI_DIS | i;
914 if (put_user(reg, uindices))
920 return num_sbi_ext_regs();
924 * kvm_riscv_vcpu_num_regs - how many registers do we present via KVM_GET/SET_ONE_REG
926 * This is for all registers.
928 unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu)
930 unsigned long res = 0;
932 res += num_config_regs(vcpu);
933 res += num_core_regs();
934 res += num_csr_regs(vcpu);
935 res += num_timer_regs();
936 res += num_fp_f_regs(vcpu);
937 res += num_fp_d_regs(vcpu);
938 res += num_isa_ext_regs(vcpu);
939 res += num_sbi_ext_regs();
945 * kvm_riscv_vcpu_copy_reg_indices - get indices of all registers.
947 int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu,
948 u64 __user *uindices)
952 ret = copy_config_reg_indices(vcpu, uindices);
957 ret = copy_core_reg_indices(uindices);
962 ret = copy_csr_reg_indices(vcpu, uindices);
967 ret = copy_timer_reg_indices(uindices);
972 ret = copy_fp_f_reg_indices(vcpu, uindices);
977 ret = copy_fp_d_reg_indices(vcpu, uindices);
982 ret = copy_isa_ext_reg_indices(vcpu, uindices);
987 ret = copy_sbi_ext_reg_indices(uindices);
994 int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
995 const struct kvm_one_reg *reg)
997 switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
998 case KVM_REG_RISCV_CONFIG:
999 return kvm_riscv_vcpu_set_reg_config(vcpu, reg);
1000 case KVM_REG_RISCV_CORE:
1001 return kvm_riscv_vcpu_set_reg_core(vcpu, reg);
1002 case KVM_REG_RISCV_CSR:
1003 return kvm_riscv_vcpu_set_reg_csr(vcpu, reg);
1004 case KVM_REG_RISCV_TIMER:
1005 return kvm_riscv_vcpu_set_reg_timer(vcpu, reg);
1006 case KVM_REG_RISCV_FP_F:
1007 return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
1008 KVM_REG_RISCV_FP_F);
1009 case KVM_REG_RISCV_FP_D:
1010 return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
1011 KVM_REG_RISCV_FP_D);
1012 case KVM_REG_RISCV_ISA_EXT:
1013 return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg);
1014 case KVM_REG_RISCV_SBI_EXT:
1015 return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu, reg);
1016 case KVM_REG_RISCV_VECTOR:
1017 return kvm_riscv_vcpu_set_reg_vector(vcpu, reg);
1025 int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
1026 const struct kvm_one_reg *reg)
1028 switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
1029 case KVM_REG_RISCV_CONFIG:
1030 return kvm_riscv_vcpu_get_reg_config(vcpu, reg);
1031 case KVM_REG_RISCV_CORE:
1032 return kvm_riscv_vcpu_get_reg_core(vcpu, reg);
1033 case KVM_REG_RISCV_CSR:
1034 return kvm_riscv_vcpu_get_reg_csr(vcpu, reg);
1035 case KVM_REG_RISCV_TIMER:
1036 return kvm_riscv_vcpu_get_reg_timer(vcpu, reg);
1037 case KVM_REG_RISCV_FP_F:
1038 return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
1039 KVM_REG_RISCV_FP_F);
1040 case KVM_REG_RISCV_FP_D:
1041 return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
1042 KVM_REG_RISCV_FP_D);
1043 case KVM_REG_RISCV_ISA_EXT:
1044 return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg);
1045 case KVM_REG_RISCV_SBI_EXT:
1046 return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu, reg);
1047 case KVM_REG_RISCV_VECTOR:
1048 return kvm_riscv_vcpu_get_reg_vector(vcpu, reg);