Merge tag 'kvm-riscv-6.6-1' of https://github.com/kvm-riscv/linux into HEAD
authorPaolo Bonzini <pbonzini@redhat.com>
Thu, 31 Aug 2023 17:25:55 +0000 (13:25 -0400)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 31 Aug 2023 17:25:55 +0000 (13:25 -0400)
KVM/riscv changes for 6.6

- Zba, Zbs, Zicntr, Zicsr, Zifencei, and Zihpm support for Guest/VM
- Added ONE_REG interface for SATP mode
- Added ONE_REG interface to enable/disable multiple ISA extensions
- Improved error codes returned by ONE_REG interfaces
- Added KVM_GET_REG_LIST ioctl() implementation for KVM RISC-V
- Added get-reg-list selftest for KVM RISC-V

21 files changed:
Documentation/virt/kvm/api.rst
arch/riscv/include/asm/csr.h
arch/riscv/include/asm/kvm_host.h
arch/riscv/include/asm/kvm_vcpu_vector.h
arch/riscv/include/uapi/asm/kvm.h
arch/riscv/kvm/Makefile
arch/riscv/kvm/aia.c
arch/riscv/kvm/vcpu.c
arch/riscv/kvm/vcpu_fp.c
arch/riscv/kvm/vcpu_onereg.c [new file with mode: 0644]
arch/riscv/kvm/vcpu_sbi.c
arch/riscv/kvm/vcpu_timer.c
arch/riscv/kvm/vcpu_vector.c
tools/testing/selftests/kvm/Makefile
tools/testing/selftests/kvm/aarch64/get-reg-list.c
tools/testing/selftests/kvm/get-reg-list.c [new file with mode: 0644]
tools/testing/selftests/kvm/include/kvm_util_base.h
tools/testing/selftests/kvm/include/riscv/processor.h
tools/testing/selftests/kvm/include/test_util.h
tools/testing/selftests/kvm/lib/test_util.c
tools/testing/selftests/kvm/riscv/get-reg-list.c [new file with mode: 0644]

index c0ddd30..660d9ca 100644 (file)
@@ -2259,6 +2259,8 @@ Errors:
   EINVAL   invalid register ID, or no such register or used with VMs in
            protected virtualization mode on s390
   EPERM    (arm64) register access not allowed before vcpu finalization
+  EBUSY    (riscv) changing register value not allowed after the vcpu
+           has run at least once
   ======   ============================================================
 
 (These error codes are indicative only: do not rely on a specific error
@@ -3499,7 +3501,7 @@ VCPU matching underlying host.
 ---------------------
 
 :Capability: basic
-:Architectures: arm64, mips
+:Architectures: arm64, mips, riscv
 :Type: vcpu ioctl
 :Parameters: struct kvm_reg_list (in/out)
 :Returns: 0 on success; -1 on error
index 7bac43a..777cb82 100644 (file)
@@ -54,6 +54,7 @@
 #ifndef CONFIG_64BIT
 #define SATP_PPN       _AC(0x003FFFFF, UL)
 #define SATP_MODE_32   _AC(0x80000000, UL)
+#define SATP_MODE_SHIFT        31
 #define SATP_ASID_BITS 9
 #define SATP_ASID_SHIFT        22
 #define SATP_ASID_MASK _AC(0x1FF, UL)
@@ -62,6 +63,7 @@
 #define SATP_MODE_39   _AC(0x8000000000000000, UL)
 #define SATP_MODE_48   _AC(0x9000000000000000, UL)
 #define SATP_MODE_57   _AC(0xa000000000000000, UL)
+#define SATP_MODE_SHIFT        60
 #define SATP_ASID_BITS 16
 #define SATP_ASID_SHIFT        44
 #define SATP_ASID_MASK _AC(0xFFFF, UL)
index 2d8ee53..1ebf20d 100644 (file)
@@ -337,6 +337,15 @@ int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
 
 void __kvm_riscv_switch_to(struct kvm_vcpu_arch *vcpu_arch);
 
+void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu);
+unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu);
+int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu,
+                                   u64 __user *uindices);
+int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
+                          const struct kvm_one_reg *reg);
+int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
+                          const struct kvm_one_reg *reg);
+
 int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
 int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
 void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu);
index ff994fd..27f5bcc 100644 (file)
@@ -74,9 +74,7 @@ static inline void kvm_riscv_vcpu_free_vector_context(struct kvm_vcpu *vcpu)
 #endif
 
 int kvm_riscv_vcpu_get_reg_vector(struct kvm_vcpu *vcpu,
-                                 const struct kvm_one_reg *reg,
-                                 unsigned long rtype);
+                                 const struct kvm_one_reg *reg);
 int kvm_riscv_vcpu_set_reg_vector(struct kvm_vcpu *vcpu,
-                                 const struct kvm_one_reg *reg,
-                                 unsigned long rtype);
+                                 const struct kvm_one_reg *reg);
 #endif
index 930fdc4..992c5e4 100644 (file)
@@ -55,6 +55,7 @@ struct kvm_riscv_config {
        unsigned long marchid;
        unsigned long mimpid;
        unsigned long zicboz_block_size;
+       unsigned long satp_mode;
 };
 
 /* CORE registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */
@@ -124,6 +125,12 @@ enum KVM_RISCV_ISA_EXT_ID {
        KVM_RISCV_ISA_EXT_SSAIA,
        KVM_RISCV_ISA_EXT_V,
        KVM_RISCV_ISA_EXT_SVNAPOT,
+       KVM_RISCV_ISA_EXT_ZBA,
+       KVM_RISCV_ISA_EXT_ZBS,
+       KVM_RISCV_ISA_EXT_ZICNTR,
+       KVM_RISCV_ISA_EXT_ZICSR,
+       KVM_RISCV_ISA_EXT_ZIFENCEI,
+       KVM_RISCV_ISA_EXT_ZIHPM,
        KVM_RISCV_ISA_EXT_MAX,
 };
 
@@ -193,6 +200,15 @@ enum KVM_RISCV_SBI_EXT_ID {
 
 /* ISA Extension registers are mapped as type 7 */
 #define KVM_REG_RISCV_ISA_EXT          (0x07 << KVM_REG_RISCV_TYPE_SHIFT)
+#define KVM_REG_RISCV_ISA_SINGLE       (0x0 << KVM_REG_RISCV_SUBTYPE_SHIFT)
+#define KVM_REG_RISCV_ISA_MULTI_EN     (0x1 << KVM_REG_RISCV_SUBTYPE_SHIFT)
+#define KVM_REG_RISCV_ISA_MULTI_DIS    (0x2 << KVM_REG_RISCV_SUBTYPE_SHIFT)
+#define KVM_REG_RISCV_ISA_MULTI_REG(__ext_id)  \
+               ((__ext_id) / __BITS_PER_LONG)
+#define KVM_REG_RISCV_ISA_MULTI_MASK(__ext_id) \
+               (1UL << ((__ext_id) % __BITS_PER_LONG))
+#define KVM_REG_RISCV_ISA_MULTI_REG_LAST       \
+               KVM_REG_RISCV_ISA_MULTI_REG(KVM_RISCV_ISA_EXT_MAX - 1)
 
 /* SBI extension registers are mapped as type 8 */
 #define KVM_REG_RISCV_SBI_EXT          (0x08 << KVM_REG_RISCV_TYPE_SHIFT)
index fee0671..4c2067f 100644 (file)
@@ -19,6 +19,7 @@ kvm-y += vcpu_exit.o
 kvm-y += vcpu_fp.o
 kvm-y += vcpu_vector.o
 kvm-y += vcpu_insn.o
+kvm-y += vcpu_onereg.o
 kvm-y += vcpu_switch.o
 kvm-y += vcpu_sbi.o
 kvm-$(CONFIG_RISCV_SBI_V01) += vcpu_sbi_v01.o
index 585a3b4..74bb274 100644 (file)
@@ -176,7 +176,7 @@ int kvm_riscv_vcpu_aia_get_csr(struct kvm_vcpu *vcpu,
        struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
 
        if (reg_num >= sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long))
-               return -EINVAL;
+               return -ENOENT;
 
        *out_val = 0;
        if (kvm_riscv_aia_available())
@@ -192,7 +192,7 @@ int kvm_riscv_vcpu_aia_set_csr(struct kvm_vcpu *vcpu,
        struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
 
        if (reg_num >= sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long))
-               return -EINVAL;
+               return -ENOENT;
 
        if (kvm_riscv_aia_available()) {
                ((unsigned long *)csr)[reg_num] = val;
index d12ef99..82229db 100644 (file)
 #include <linux/kdebug.h>
 #include <linux/module.h>
 #include <linux/percpu.h>
-#include <linux/uaccess.h>
 #include <linux/vmalloc.h>
 #include <linux/sched/signal.h>
 #include <linux/fs.h>
 #include <linux/kvm_host.h>
 #include <asm/csr.h>
 #include <asm/cacheflush.h>
-#include <asm/hwcap.h>
-#include <asm/sbi.h>
-#include <asm/vector.h>
 #include <asm/kvm_vcpu_vector.h>
 
 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
@@ -46,79 +42,6 @@ const struct kvm_stats_header kvm_vcpu_stats_header = {
                       sizeof(kvm_vcpu_stats_desc),
 };
 
-#define KVM_RISCV_BASE_ISA_MASK                GENMASK(25, 0)
-
-#define KVM_ISA_EXT_ARR(ext)           [KVM_RISCV_ISA_EXT_##ext] = RISCV_ISA_EXT_##ext
-
-/* Mapping between KVM ISA Extension ID & Host ISA extension ID */
-static const unsigned long kvm_isa_ext_arr[] = {
-       [KVM_RISCV_ISA_EXT_A] = RISCV_ISA_EXT_a,
-       [KVM_RISCV_ISA_EXT_C] = RISCV_ISA_EXT_c,
-       [KVM_RISCV_ISA_EXT_D] = RISCV_ISA_EXT_d,
-       [KVM_RISCV_ISA_EXT_F] = RISCV_ISA_EXT_f,
-       [KVM_RISCV_ISA_EXT_H] = RISCV_ISA_EXT_h,
-       [KVM_RISCV_ISA_EXT_I] = RISCV_ISA_EXT_i,
-       [KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m,
-       [KVM_RISCV_ISA_EXT_V] = RISCV_ISA_EXT_v,
-
-       KVM_ISA_EXT_ARR(SSAIA),
-       KVM_ISA_EXT_ARR(SSTC),
-       KVM_ISA_EXT_ARR(SVINVAL),
-       KVM_ISA_EXT_ARR(SVNAPOT),
-       KVM_ISA_EXT_ARR(SVPBMT),
-       KVM_ISA_EXT_ARR(ZBB),
-       KVM_ISA_EXT_ARR(ZIHINTPAUSE),
-       KVM_ISA_EXT_ARR(ZICBOM),
-       KVM_ISA_EXT_ARR(ZICBOZ),
-};
-
-static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)
-{
-       unsigned long i;
-
-       for (i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
-               if (kvm_isa_ext_arr[i] == base_ext)
-                       return i;
-       }
-
-       return KVM_RISCV_ISA_EXT_MAX;
-}
-
-static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)
-{
-       switch (ext) {
-       case KVM_RISCV_ISA_EXT_H:
-               return false;
-       case KVM_RISCV_ISA_EXT_V:
-               return riscv_v_vstate_ctrl_user_allowed();
-       default:
-               break;
-       }
-
-       return true;
-}
-
-static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
-{
-       switch (ext) {
-       case KVM_RISCV_ISA_EXT_A:
-       case KVM_RISCV_ISA_EXT_C:
-       case KVM_RISCV_ISA_EXT_I:
-       case KVM_RISCV_ISA_EXT_M:
-       case KVM_RISCV_ISA_EXT_SSAIA:
-       case KVM_RISCV_ISA_EXT_SSTC:
-       case KVM_RISCV_ISA_EXT_SVINVAL:
-       case KVM_RISCV_ISA_EXT_SVNAPOT:
-       case KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
-       case KVM_RISCV_ISA_EXT_ZBB:
-               return false;
-       default:
-               break;
-       }
-
-       return true;
-}
-
 static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
 {
        struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
@@ -176,7 +99,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
        int rc;
        struct kvm_cpu_context *cntx;
        struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
-       unsigned long host_isa, i;
 
        /* Mark this VCPU never ran */
        vcpu->arch.ran_atleast_once = false;
@@ -184,12 +106,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
        bitmap_zero(vcpu->arch.isa, RISCV_ISA_EXT_MAX);
 
        /* Setup ISA features available to VCPU */
-       for (i = 0; i < ARRAY_SIZE(kvm_isa_ext_arr); i++) {
-               host_isa = kvm_isa_ext_arr[i];
-               if (__riscv_isa_extension_available(NULL, host_isa) &&
-                   kvm_riscv_vcpu_isa_enable_allowed(i))
-                       set_bit(host_isa, vcpu->arch.isa);
-       }
+       kvm_riscv_vcpu_setup_isa(vcpu);
 
        /* Setup vendor, arch, and implementation details */
        vcpu->arch.mvendorid = sbi_get_mvendorid();
@@ -294,450 +211,6 @@ vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
        return VM_FAULT_SIGBUS;
 }
 
-static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
-                                        const struct kvm_one_reg *reg)
-{
-       unsigned long __user *uaddr =
-                       (unsigned long __user *)(unsigned long)reg->addr;
-       unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
-                                           KVM_REG_SIZE_MASK |
-                                           KVM_REG_RISCV_CONFIG);
-       unsigned long reg_val;
-
-       if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
-               return -EINVAL;
-
-       switch (reg_num) {
-       case KVM_REG_RISCV_CONFIG_REG(isa):
-               reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK;
-               break;
-       case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
-               if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
-                       return -EINVAL;
-               reg_val = riscv_cbom_block_size;
-               break;
-       case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
-               if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
-                       return -EINVAL;
-               reg_val = riscv_cboz_block_size;
-               break;
-       case KVM_REG_RISCV_CONFIG_REG(mvendorid):
-               reg_val = vcpu->arch.mvendorid;
-               break;
-       case KVM_REG_RISCV_CONFIG_REG(marchid):
-               reg_val = vcpu->arch.marchid;
-               break;
-       case KVM_REG_RISCV_CONFIG_REG(mimpid):
-               reg_val = vcpu->arch.mimpid;
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
-               return -EFAULT;
-
-       return 0;
-}
-
-static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
-                                        const struct kvm_one_reg *reg)
-{
-       unsigned long __user *uaddr =
-                       (unsigned long __user *)(unsigned long)reg->addr;
-       unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
-                                           KVM_REG_SIZE_MASK |
-                                           KVM_REG_RISCV_CONFIG);
-       unsigned long i, isa_ext, reg_val;
-
-       if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
-               return -EINVAL;
-
-       if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
-               return -EFAULT;
-
-       switch (reg_num) {
-       case KVM_REG_RISCV_CONFIG_REG(isa):
-               /*
-                * This ONE REG interface is only defined for
-                * single letter extensions.
-                */
-               if (fls(reg_val) >= RISCV_ISA_EXT_BASE)
-                       return -EINVAL;
-
-               if (!vcpu->arch.ran_atleast_once) {
-                       /* Ignore the enable/disable request for certain extensions */
-                       for (i = 0; i < RISCV_ISA_EXT_BASE; i++) {
-                               isa_ext = kvm_riscv_vcpu_base2isa_ext(i);
-                               if (isa_ext >= KVM_RISCV_ISA_EXT_MAX) {
-                                       reg_val &= ~BIT(i);
-                                       continue;
-                               }
-                               if (!kvm_riscv_vcpu_isa_enable_allowed(isa_ext))
-                                       if (reg_val & BIT(i))
-                                               reg_val &= ~BIT(i);
-                               if (!kvm_riscv_vcpu_isa_disable_allowed(isa_ext))
-                                       if (!(reg_val & BIT(i)))
-                                               reg_val |= BIT(i);
-                       }
-                       reg_val &= riscv_isa_extension_base(NULL);
-                       /* Do not modify anything beyond single letter extensions */
-                       reg_val = (vcpu->arch.isa[0] & ~KVM_RISCV_BASE_ISA_MASK) |
-                                 (reg_val & KVM_RISCV_BASE_ISA_MASK);
-                       vcpu->arch.isa[0] = reg_val;
-                       kvm_riscv_vcpu_fp_reset(vcpu);
-               } else {
-                       return -EOPNOTSUPP;
-               }
-               break;
-       case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
-               return -EOPNOTSUPP;
-       case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
-               return -EOPNOTSUPP;
-       case KVM_REG_RISCV_CONFIG_REG(mvendorid):
-               if (!vcpu->arch.ran_atleast_once)
-                       vcpu->arch.mvendorid = reg_val;
-               else
-                       return -EBUSY;
-               break;
-       case KVM_REG_RISCV_CONFIG_REG(marchid):
-               if (!vcpu->arch.ran_atleast_once)
-                       vcpu->arch.marchid = reg_val;
-               else
-                       return -EBUSY;
-               break;
-       case KVM_REG_RISCV_CONFIG_REG(mimpid):
-               if (!vcpu->arch.ran_atleast_once)
-                       vcpu->arch.mimpid = reg_val;
-               else
-                       return -EBUSY;
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu,
-                                      const struct kvm_one_reg *reg)
-{
-       struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
-       unsigned long __user *uaddr =
-                       (unsigned long __user *)(unsigned long)reg->addr;
-       unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
-                                           KVM_REG_SIZE_MASK |
-                                           KVM_REG_RISCV_CORE);
-       unsigned long reg_val;
-
-       if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
-               return -EINVAL;
-       if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
-               return -EINVAL;
-
-       if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
-               reg_val = cntx->sepc;
-       else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
-                reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
-               reg_val = ((unsigned long *)cntx)[reg_num];
-       else if (reg_num == KVM_REG_RISCV_CORE_REG(mode))
-               reg_val = (cntx->sstatus & SR_SPP) ?
-                               KVM_RISCV_MODE_S : KVM_RISCV_MODE_U;
-       else
-               return -EINVAL;
-
-       if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
-               return -EFAULT;
-
-       return 0;
-}
-
-static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu,
-                                      const struct kvm_one_reg *reg)
-{
-       struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
-       unsigned long __user *uaddr =
-                       (unsigned long __user *)(unsigned long)reg->addr;
-       unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
-                                           KVM_REG_SIZE_MASK |
-                                           KVM_REG_RISCV_CORE);
-       unsigned long reg_val;
-
-       if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
-               return -EINVAL;
-       if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
-               return -EINVAL;
-
-       if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
-               return -EFAULT;
-
-       if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
-               cntx->sepc = reg_val;
-       else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
-                reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
-               ((unsigned long *)cntx)[reg_num] = reg_val;
-       else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) {
-               if (reg_val == KVM_RISCV_MODE_S)
-                       cntx->sstatus |= SR_SPP;
-               else
-                       cntx->sstatus &= ~SR_SPP;
-       } else
-               return -EINVAL;
-
-       return 0;
-}
-
-static int kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu *vcpu,
-                                         unsigned long reg_num,
-                                         unsigned long *out_val)
-{
-       struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
-
-       if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
-               return -EINVAL;
-
-       if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
-               kvm_riscv_vcpu_flush_interrupts(vcpu);
-               *out_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK;
-               *out_val |= csr->hvip & ~IRQ_LOCAL_MASK;
-       } else
-               *out_val = ((unsigned long *)csr)[reg_num];
-
-       return 0;
-}
-
-static inline int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu *vcpu,
-                                                unsigned long reg_num,
-                                                unsigned long reg_val)
-{
-       struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
-
-       if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
-               return -EINVAL;
-
-       if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
-               reg_val &= VSIP_VALID_MASK;
-               reg_val <<= VSIP_TO_HVIP_SHIFT;
-       }
-
-       ((unsigned long *)csr)[reg_num] = reg_val;
-
-       if (reg_num == KVM_REG_RISCV_CSR_REG(sip))
-               WRITE_ONCE(vcpu->arch.irqs_pending_mask[0], 0);
-
-       return 0;
-}
-
-static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
-                                     const struct kvm_one_reg *reg)
-{
-       int rc;
-       unsigned long __user *uaddr =
-                       (unsigned long __user *)(unsigned long)reg->addr;
-       unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
-                                           KVM_REG_SIZE_MASK |
-                                           KVM_REG_RISCV_CSR);
-       unsigned long reg_val, reg_subtype;
-
-       if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
-               return -EINVAL;
-
-       reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
-       reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
-       switch (reg_subtype) {
-       case KVM_REG_RISCV_CSR_GENERAL:
-               rc = kvm_riscv_vcpu_general_get_csr(vcpu, reg_num, &reg_val);
-               break;
-       case KVM_REG_RISCV_CSR_AIA:
-               rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, &reg_val);
-               break;
-       default:
-               rc = -EINVAL;
-               break;
-       }
-       if (rc)
-               return rc;
-
-       if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
-               return -EFAULT;
-
-       return 0;
-}
-
-static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
-                                     const struct kvm_one_reg *reg)
-{
-       int rc;
-       unsigned long __user *uaddr =
-                       (unsigned long __user *)(unsigned long)reg->addr;
-       unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
-                                           KVM_REG_SIZE_MASK |
-                                           KVM_REG_RISCV_CSR);
-       unsigned long reg_val, reg_subtype;
-
-       if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
-               return -EINVAL;
-
-       if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
-               return -EFAULT;
-
-       reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
-       reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
-       switch (reg_subtype) {
-       case KVM_REG_RISCV_CSR_GENERAL:
-               rc = kvm_riscv_vcpu_general_set_csr(vcpu, reg_num, reg_val);
-               break;
-       case KVM_REG_RISCV_CSR_AIA:
-               rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val);
-               break;
-       default:
-               rc = -EINVAL;
-               break;
-       }
-       if (rc)
-               return rc;
-
-       return 0;
-}
-
-static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu,
-                                         const struct kvm_one_reg *reg)
-{
-       unsigned long __user *uaddr =
-                       (unsigned long __user *)(unsigned long)reg->addr;
-       unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
-                                           KVM_REG_SIZE_MASK |
-                                           KVM_REG_RISCV_ISA_EXT);
-       unsigned long reg_val = 0;
-       unsigned long host_isa_ext;
-
-       if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
-               return -EINVAL;
-
-       if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
-           reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
-               return -EINVAL;
-
-       host_isa_ext = kvm_isa_ext_arr[reg_num];
-       if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext))
-               reg_val = 1; /* Mark the given extension as available */
-
-       if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
-               return -EFAULT;
-
-       return 0;
-}
-
-static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
-                                         const struct kvm_one_reg *reg)
-{
-       unsigned long __user *uaddr =
-                       (unsigned long __user *)(unsigned long)reg->addr;
-       unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
-                                           KVM_REG_SIZE_MASK |
-                                           KVM_REG_RISCV_ISA_EXT);
-       unsigned long reg_val;
-       unsigned long host_isa_ext;
-
-       if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
-               return -EINVAL;
-
-       if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
-           reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
-               return -EINVAL;
-
-       if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
-               return -EFAULT;
-
-       host_isa_ext = kvm_isa_ext_arr[reg_num];
-       if (!__riscv_isa_extension_available(NULL, host_isa_ext))
-               return  -EOPNOTSUPP;
-
-       if (!vcpu->arch.ran_atleast_once) {
-               /*
-                * All multi-letter extension and a few single letter
-                * extension can be disabled
-                */
-               if (reg_val == 1 &&
-                   kvm_riscv_vcpu_isa_enable_allowed(reg_num))
-                       set_bit(host_isa_ext, vcpu->arch.isa);
-               else if (!reg_val &&
-                        kvm_riscv_vcpu_isa_disable_allowed(reg_num))
-                       clear_bit(host_isa_ext, vcpu->arch.isa);
-               else
-                       return -EINVAL;
-               kvm_riscv_vcpu_fp_reset(vcpu);
-       } else {
-               return -EOPNOTSUPP;
-       }
-
-       return 0;
-}
-
-static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
-                                 const struct kvm_one_reg *reg)
-{
-       switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
-       case KVM_REG_RISCV_CONFIG:
-               return kvm_riscv_vcpu_set_reg_config(vcpu, reg);
-       case KVM_REG_RISCV_CORE:
-               return kvm_riscv_vcpu_set_reg_core(vcpu, reg);
-       case KVM_REG_RISCV_CSR:
-               return kvm_riscv_vcpu_set_reg_csr(vcpu, reg);
-       case KVM_REG_RISCV_TIMER:
-               return kvm_riscv_vcpu_set_reg_timer(vcpu, reg);
-       case KVM_REG_RISCV_FP_F:
-               return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
-                                                KVM_REG_RISCV_FP_F);
-       case KVM_REG_RISCV_FP_D:
-               return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
-                                                KVM_REG_RISCV_FP_D);
-       case KVM_REG_RISCV_ISA_EXT:
-               return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg);
-       case KVM_REG_RISCV_SBI_EXT:
-               return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu, reg);
-       case KVM_REG_RISCV_VECTOR:
-               return kvm_riscv_vcpu_set_reg_vector(vcpu, reg,
-                                                KVM_REG_RISCV_VECTOR);
-       default:
-               break;
-       }
-
-       return -EINVAL;
-}
-
-static int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
-                                 const struct kvm_one_reg *reg)
-{
-       switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
-       case KVM_REG_RISCV_CONFIG:
-               return kvm_riscv_vcpu_get_reg_config(vcpu, reg);
-       case KVM_REG_RISCV_CORE:
-               return kvm_riscv_vcpu_get_reg_core(vcpu, reg);
-       case KVM_REG_RISCV_CSR:
-               return kvm_riscv_vcpu_get_reg_csr(vcpu, reg);
-       case KVM_REG_RISCV_TIMER:
-               return kvm_riscv_vcpu_get_reg_timer(vcpu, reg);
-       case KVM_REG_RISCV_FP_F:
-               return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
-                                                KVM_REG_RISCV_FP_F);
-       case KVM_REG_RISCV_FP_D:
-               return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
-                                                KVM_REG_RISCV_FP_D);
-       case KVM_REG_RISCV_ISA_EXT:
-               return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg);
-       case KVM_REG_RISCV_SBI_EXT:
-               return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu, reg);
-       case KVM_REG_RISCV_VECTOR:
-               return kvm_riscv_vcpu_get_reg_vector(vcpu, reg,
-                                                KVM_REG_RISCV_VECTOR);
-       default:
-               break;
-       }
-
-       return -EINVAL;
-}
-
 long kvm_arch_vcpu_async_ioctl(struct file *filp,
                               unsigned int ioctl, unsigned long arg)
 {
@@ -781,6 +254,24 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                        r = kvm_riscv_vcpu_get_reg(vcpu, &reg);
                break;
        }
+       case KVM_GET_REG_LIST: {
+               struct kvm_reg_list __user *user_list = argp;
+               struct kvm_reg_list reg_list;
+               unsigned int n;
+
+               r = -EFAULT;
+               if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
+                       break;
+               n = reg_list.n;
+               reg_list.n = kvm_riscv_vcpu_num_regs(vcpu);
+               if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
+                       break;
+               r = -E2BIG;
+               if (n < reg_list.n)
+                       break;
+               r = kvm_riscv_vcpu_copy_reg_indices(vcpu, user_list->reg);
+               break;
+       }
        default:
                break;
        }
index 9d8cbc4..08ba48a 100644 (file)
@@ -96,7 +96,7 @@ int kvm_riscv_vcpu_get_reg_fp(struct kvm_vcpu *vcpu,
                          reg_num <= KVM_REG_RISCV_FP_F_REG(f[31]))
                        reg_val = &cntx->fp.f.f[reg_num];
                else
-                       return -EINVAL;
+                       return -ENOENT;
        } else if ((rtype == KVM_REG_RISCV_FP_D) &&
                   riscv_isa_extension_available(vcpu->arch.isa, d)) {
                if (reg_num == KVM_REG_RISCV_FP_D_REG(fcsr)) {
@@ -109,9 +109,9 @@ int kvm_riscv_vcpu_get_reg_fp(struct kvm_vcpu *vcpu,
                                return -EINVAL;
                        reg_val = &cntx->fp.d.f[reg_num];
                } else
-                       return -EINVAL;
+                       return -ENOENT;
        } else
-               return -EINVAL;
+               return -ENOENT;
 
        if (copy_to_user(uaddr, reg_val, KVM_REG_SIZE(reg->id)))
                return -EFAULT;
@@ -141,7 +141,7 @@ int kvm_riscv_vcpu_set_reg_fp(struct kvm_vcpu *vcpu,
                          reg_num <= KVM_REG_RISCV_FP_F_REG(f[31]))
                        reg_val = &cntx->fp.f.f[reg_num];
                else
-                       return -EINVAL;
+                       return -ENOENT;
        } else if ((rtype == KVM_REG_RISCV_FP_D) &&
                   riscv_isa_extension_available(vcpu->arch.isa, d)) {
                if (reg_num == KVM_REG_RISCV_FP_D_REG(fcsr)) {
@@ -154,9 +154,9 @@ int kvm_riscv_vcpu_set_reg_fp(struct kvm_vcpu *vcpu,
                                return -EINVAL;
                        reg_val = &cntx->fp.d.f[reg_num];
                } else
-                       return -EINVAL;
+                       return -ENOENT;
        } else
-               return -EINVAL;
+               return -ENOENT;
 
        if (copy_from_user(reg_val, uaddr, KVM_REG_SIZE(reg->id)))
                return -EFAULT;
diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c
new file mode 100644 (file)
index 0000000..1b7e9fa
--- /dev/null
@@ -0,0 +1,1051 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2023 Ventana Micro Systems Inc.
+ *
+ * Authors:
+ *     Anup Patel <apatel@ventanamicro.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/uaccess.h>
+#include <linux/kvm_host.h>
+#include <asm/cacheflush.h>
+#include <asm/hwcap.h>
+#include <asm/kvm_vcpu_vector.h>
+#include <asm/vector.h>
+
+#define KVM_RISCV_BASE_ISA_MASK                GENMASK(25, 0)
+
+#define KVM_ISA_EXT_ARR(ext)           \
+[KVM_RISCV_ISA_EXT_##ext] = RISCV_ISA_EXT_##ext
+
+/* Mapping between KVM ISA Extension ID & Host ISA extension ID */
+static const unsigned long kvm_isa_ext_arr[] = {
+       /* Single letter extensions (alphabetically sorted) */
+       [KVM_RISCV_ISA_EXT_A] = RISCV_ISA_EXT_a,
+       [KVM_RISCV_ISA_EXT_C] = RISCV_ISA_EXT_c,
+       [KVM_RISCV_ISA_EXT_D] = RISCV_ISA_EXT_d,
+       [KVM_RISCV_ISA_EXT_F] = RISCV_ISA_EXT_f,
+       [KVM_RISCV_ISA_EXT_H] = RISCV_ISA_EXT_h,
+       [KVM_RISCV_ISA_EXT_I] = RISCV_ISA_EXT_i,
+       [KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m,
+       [KVM_RISCV_ISA_EXT_V] = RISCV_ISA_EXT_v,
+       /* Multi letter extensions (alphabetically sorted) */
+       KVM_ISA_EXT_ARR(SSAIA),
+       KVM_ISA_EXT_ARR(SSTC),
+       KVM_ISA_EXT_ARR(SVINVAL),
+       KVM_ISA_EXT_ARR(SVNAPOT),
+       KVM_ISA_EXT_ARR(SVPBMT),
+       KVM_ISA_EXT_ARR(ZBA),
+       KVM_ISA_EXT_ARR(ZBB),
+       KVM_ISA_EXT_ARR(ZBS),
+       KVM_ISA_EXT_ARR(ZICBOM),
+       KVM_ISA_EXT_ARR(ZICBOZ),
+       KVM_ISA_EXT_ARR(ZICNTR),
+       KVM_ISA_EXT_ARR(ZICSR),
+       KVM_ISA_EXT_ARR(ZIFENCEI),
+       KVM_ISA_EXT_ARR(ZIHINTPAUSE),
+       KVM_ISA_EXT_ARR(ZIHPM),
+};
+
+static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)
+{
+       unsigned long i;
+
+       for (i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
+               if (kvm_isa_ext_arr[i] == base_ext)
+                       return i;
+       }
+
+       return KVM_RISCV_ISA_EXT_MAX;
+}
+
+static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)
+{
+       switch (ext) {
+       case KVM_RISCV_ISA_EXT_H:
+               return false;
+       case KVM_RISCV_ISA_EXT_V:
+               return riscv_v_vstate_ctrl_user_allowed();
+       default:
+               break;
+       }
+
+       return true;
+}
+
+static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
+{
+       switch (ext) {
+       case KVM_RISCV_ISA_EXT_A:
+       case KVM_RISCV_ISA_EXT_C:
+       case KVM_RISCV_ISA_EXT_I:
+       case KVM_RISCV_ISA_EXT_M:
+       case KVM_RISCV_ISA_EXT_SSAIA:
+       case KVM_RISCV_ISA_EXT_SSTC:
+       case KVM_RISCV_ISA_EXT_SVINVAL:
+       case KVM_RISCV_ISA_EXT_SVNAPOT:
+       case KVM_RISCV_ISA_EXT_ZBA:
+       case KVM_RISCV_ISA_EXT_ZBB:
+       case KVM_RISCV_ISA_EXT_ZBS:
+       case KVM_RISCV_ISA_EXT_ZICNTR:
+       case KVM_RISCV_ISA_EXT_ZICSR:
+       case KVM_RISCV_ISA_EXT_ZIFENCEI:
+       case KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
+       case KVM_RISCV_ISA_EXT_ZIHPM:
+               return false;
+       default:
+               break;
+       }
+
+       return true;
+}
+
+void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu)
+{
+       unsigned long host_isa, i;
+
+       for (i = 0; i < ARRAY_SIZE(kvm_isa_ext_arr); i++) {
+               host_isa = kvm_isa_ext_arr[i];
+               if (__riscv_isa_extension_available(NULL, host_isa) &&
+                   kvm_riscv_vcpu_isa_enable_allowed(i))
+                       set_bit(host_isa, vcpu->arch.isa);
+       }
+}
+
+static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
+                                        const struct kvm_one_reg *reg)
+{
+       unsigned long __user *uaddr =
+                       (unsigned long __user *)(unsigned long)reg->addr;
+       unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
+                                           KVM_REG_SIZE_MASK |
+                                           KVM_REG_RISCV_CONFIG);
+       unsigned long reg_val;
+
+       if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
+               return -EINVAL;
+
+       switch (reg_num) {
+       case KVM_REG_RISCV_CONFIG_REG(isa):
+               reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK;
+               break;
+       case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
+               if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
+                       return -ENOENT;
+               reg_val = riscv_cbom_block_size;
+               break;
+       case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
+               if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
+                       return -ENOENT;
+               reg_val = riscv_cboz_block_size;
+               break;
+       case KVM_REG_RISCV_CONFIG_REG(mvendorid):
+               reg_val = vcpu->arch.mvendorid;
+               break;
+       case KVM_REG_RISCV_CONFIG_REG(marchid):
+               reg_val = vcpu->arch.marchid;
+               break;
+       case KVM_REG_RISCV_CONFIG_REG(mimpid):
+               reg_val = vcpu->arch.mimpid;
+               break;
+       case KVM_REG_RISCV_CONFIG_REG(satp_mode):
+               reg_val = satp_mode >> SATP_MODE_SHIFT;
+               break;
+       default:
+               return -ENOENT;
+       }
+
+       if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
+               return -EFAULT;
+
+       return 0;
+}
+
+static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
+                                        const struct kvm_one_reg *reg)
+{
+       unsigned long __user *uaddr =
+                       (unsigned long __user *)(unsigned long)reg->addr;
+       unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
+                                           KVM_REG_SIZE_MASK |
+                                           KVM_REG_RISCV_CONFIG);
+       unsigned long i, isa_ext, reg_val;
+
+       if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
+               return -EINVAL;
+
+       if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
+               return -EFAULT;
+
+       switch (reg_num) {
+       case KVM_REG_RISCV_CONFIG_REG(isa):
+               /*
+                * This ONE REG interface is only defined for
+                * single letter extensions.
+                */
+               if (fls(reg_val) >= RISCV_ISA_EXT_BASE)
+                       return -EINVAL;
+
+               /*
+                * Return early (i.e. do nothing) if reg_val is the same
+                * value retrievable via kvm_riscv_vcpu_get_reg_config().
+                */
+               if (reg_val == (vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK))
+                       break;
+
+               if (!vcpu->arch.ran_atleast_once) {
+                       /* Ignore the enable/disable request for certain extensions */
+                       for (i = 0; i < RISCV_ISA_EXT_BASE; i++) {
+                               isa_ext = kvm_riscv_vcpu_base2isa_ext(i);
+                               if (isa_ext >= KVM_RISCV_ISA_EXT_MAX) {
+                                       reg_val &= ~BIT(i);
+                                       continue;
+                               }
+                               if (!kvm_riscv_vcpu_isa_enable_allowed(isa_ext))
+                                       if (reg_val & BIT(i))
+                                               reg_val &= ~BIT(i);
+                               if (!kvm_riscv_vcpu_isa_disable_allowed(isa_ext))
+                                       if (!(reg_val & BIT(i)))
+                                               reg_val |= BIT(i);
+                       }
+                       reg_val &= riscv_isa_extension_base(NULL);
+                       /* Do not modify anything beyond single letter extensions */
+                       reg_val = (vcpu->arch.isa[0] & ~KVM_RISCV_BASE_ISA_MASK) |
+                                 (reg_val & KVM_RISCV_BASE_ISA_MASK);
+                       vcpu->arch.isa[0] = reg_val;
+                       kvm_riscv_vcpu_fp_reset(vcpu);
+               } else {
+                       return -EBUSY;
+               }
+               break;
+       case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
+               if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
+                       return -ENOENT;
+               if (reg_val != riscv_cbom_block_size)
+                       return -EINVAL;
+               break;
+       case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
+               if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
+                       return -ENOENT;
+               if (reg_val != riscv_cboz_block_size)
+                       return -EINVAL;
+               break;
+       case KVM_REG_RISCV_CONFIG_REG(mvendorid):
+               if (reg_val == vcpu->arch.mvendorid)
+                       break;
+               if (!vcpu->arch.ran_atleast_once)
+                       vcpu->arch.mvendorid = reg_val;
+               else
+                       return -EBUSY;
+               break;
+       case KVM_REG_RISCV_CONFIG_REG(marchid):
+               if (reg_val == vcpu->arch.marchid)
+                       break;
+               if (!vcpu->arch.ran_atleast_once)
+                       vcpu->arch.marchid = reg_val;
+               else
+                       return -EBUSY;
+               break;
+       case KVM_REG_RISCV_CONFIG_REG(mimpid):
+               if (reg_val == vcpu->arch.mimpid)
+                       break;
+               if (!vcpu->arch.ran_atleast_once)
+                       vcpu->arch.mimpid = reg_val;
+               else
+                       return -EBUSY;
+               break;
+       case KVM_REG_RISCV_CONFIG_REG(satp_mode):
+               if (reg_val != (satp_mode >> SATP_MODE_SHIFT))
+                       return -EINVAL;
+               break;
+       default:
+               return -ENOENT;
+       }
+
+       return 0;
+}
+
+static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu,
+                                      const struct kvm_one_reg *reg)
+{
+       struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
+       unsigned long __user *uaddr =
+                       (unsigned long __user *)(unsigned long)reg->addr;
+       unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
+                                           KVM_REG_SIZE_MASK |
+                                           KVM_REG_RISCV_CORE);
+       unsigned long reg_val;
+
+       if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
+               return -EINVAL;
+       if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
+               return -ENOENT;
+
+       if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
+               reg_val = cntx->sepc;
+       else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
+                reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
+               reg_val = ((unsigned long *)cntx)[reg_num];
+       else if (reg_num == KVM_REG_RISCV_CORE_REG(mode))
+               reg_val = (cntx->sstatus & SR_SPP) ?
+                               KVM_RISCV_MODE_S : KVM_RISCV_MODE_U;
+       else
+               return -ENOENT;
+
+       if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
+               return -EFAULT;
+
+       return 0;
+}
+
+static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu,
+                                      const struct kvm_one_reg *reg)
+{
+       struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
+       unsigned long __user *uaddr =
+                       (unsigned long __user *)(unsigned long)reg->addr;
+       unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
+                                           KVM_REG_SIZE_MASK |
+                                           KVM_REG_RISCV_CORE);
+       unsigned long reg_val;
+
+       if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
+               return -EINVAL;
+       if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
+               return -ENOENT;
+
+       if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
+               return -EFAULT;
+
+       if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
+               cntx->sepc = reg_val;
+       else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
+                reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
+               ((unsigned long *)cntx)[reg_num] = reg_val;
+       else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) {
+               if (reg_val == KVM_RISCV_MODE_S)
+                       cntx->sstatus |= SR_SPP;
+               else
+                       cntx->sstatus &= ~SR_SPP;
+       } else
+               return -ENOENT;
+
+       return 0;
+}
+
+static int kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu *vcpu,
+                                         unsigned long reg_num,
+                                         unsigned long *out_val)
+{
+       struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
+
+       if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
+               return -ENOENT;
+
+       if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
+               kvm_riscv_vcpu_flush_interrupts(vcpu);
+               *out_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK;
+               *out_val |= csr->hvip & ~IRQ_LOCAL_MASK;
+       } else
+               *out_val = ((unsigned long *)csr)[reg_num];
+
+       return 0;
+}
+
+static int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu *vcpu,
+                                         unsigned long reg_num,
+                                         unsigned long reg_val)
+{
+       struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
+
+       if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
+               return -ENOENT;
+
+       if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
+               reg_val &= VSIP_VALID_MASK;
+               reg_val <<= VSIP_TO_HVIP_SHIFT;
+       }
+
+       ((unsigned long *)csr)[reg_num] = reg_val;
+
+       if (reg_num == KVM_REG_RISCV_CSR_REG(sip))
+               WRITE_ONCE(vcpu->arch.irqs_pending_mask[0], 0);
+
+       return 0;
+}
+
+static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
+                                     const struct kvm_one_reg *reg)
+{
+       int rc;
+       unsigned long __user *uaddr =
+                       (unsigned long __user *)(unsigned long)reg->addr;
+       unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
+                                           KVM_REG_SIZE_MASK |
+                                           KVM_REG_RISCV_CSR);
+       unsigned long reg_val, reg_subtype;
+
+       if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
+               return -EINVAL;
+
+       reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
+       reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
+       switch (reg_subtype) {
+       case KVM_REG_RISCV_CSR_GENERAL:
+               rc = kvm_riscv_vcpu_general_get_csr(vcpu, reg_num, &reg_val);
+               break;
+       case KVM_REG_RISCV_CSR_AIA:
+               rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, &reg_val);
+               break;
+       default:
+               rc = -ENOENT;
+               break;
+       }
+       if (rc)
+               return rc;
+
+       if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
+               return -EFAULT;
+
+       return 0;
+}
+
+static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
+                                     const struct kvm_one_reg *reg)
+{
+       int rc;
+       unsigned long __user *uaddr =
+                       (unsigned long __user *)(unsigned long)reg->addr;
+       unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
+                                           KVM_REG_SIZE_MASK |
+                                           KVM_REG_RISCV_CSR);
+       unsigned long reg_val, reg_subtype;
+
+       if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
+               return -EINVAL;
+
+       if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
+               return -EFAULT;
+
+       reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
+       reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
+       switch (reg_subtype) {
+       case KVM_REG_RISCV_CSR_GENERAL:
+               rc = kvm_riscv_vcpu_general_set_csr(vcpu, reg_num, reg_val);
+               break;
+       case KVM_REG_RISCV_CSR_AIA:
+               rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val);
+               break;
+       default:
+               rc = -ENOENT;
+               break;
+       }
+       if (rc)
+               return rc;
+
+       return 0;
+}
+
+static int riscv_vcpu_get_isa_ext_single(struct kvm_vcpu *vcpu,
+                                        unsigned long reg_num,
+                                        unsigned long *reg_val)
+{
+       unsigned long host_isa_ext;
+
+       if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
+           reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
+               return -ENOENT;
+
+       *reg_val = 0;
+       host_isa_ext = kvm_isa_ext_arr[reg_num];
+       if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext))
+               *reg_val = 1; /* Mark the given extension as available */
+
+       return 0;
+}
+
+static int riscv_vcpu_set_isa_ext_single(struct kvm_vcpu *vcpu,
+                                        unsigned long reg_num,
+                                        unsigned long reg_val)
+{
+       unsigned long host_isa_ext;
+
+       if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
+           reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
+               return -ENOENT;
+
+       host_isa_ext = kvm_isa_ext_arr[reg_num];
+       if (!__riscv_isa_extension_available(NULL, host_isa_ext))
+               return -ENOENT;
+
+       if (reg_val == test_bit(host_isa_ext, vcpu->arch.isa))
+               return 0;
+
+       if (!vcpu->arch.ran_atleast_once) {
+               /*
+                * All multi-letter extension and a few single letter
+                * extension can be disabled
+                */
+               if (reg_val == 1 &&
+                   kvm_riscv_vcpu_isa_enable_allowed(reg_num))
+                       set_bit(host_isa_ext, vcpu->arch.isa);
+               else if (!reg_val &&
+                        kvm_riscv_vcpu_isa_disable_allowed(reg_num))
+                       clear_bit(host_isa_ext, vcpu->arch.isa);
+               else
+                       return -EINVAL;
+               kvm_riscv_vcpu_fp_reset(vcpu);
+       } else {
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
+static int riscv_vcpu_get_isa_ext_multi(struct kvm_vcpu *vcpu,
+                                       unsigned long reg_num,
+                                       unsigned long *reg_val)
+{
+       unsigned long i, ext_id, ext_val;
+
+       if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
+               return -ENOENT;
+
+       for (i = 0; i < BITS_PER_LONG; i++) {
+               ext_id = i + reg_num * BITS_PER_LONG;
+               if (ext_id >= KVM_RISCV_ISA_EXT_MAX)
+                       break;
+
+               ext_val = 0;
+               riscv_vcpu_get_isa_ext_single(vcpu, ext_id, &ext_val);
+               if (ext_val)
+                       *reg_val |= KVM_REG_RISCV_ISA_MULTI_MASK(ext_id);
+       }
+
+       return 0;
+}
+
+static int riscv_vcpu_set_isa_ext_multi(struct kvm_vcpu *vcpu,
+                                       unsigned long reg_num,
+                                       unsigned long reg_val, bool enable)
+{
+       unsigned long i, ext_id;
+
+       if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
+               return -ENOENT;
+
+       for_each_set_bit(i, &reg_val, BITS_PER_LONG) {
+               ext_id = i + reg_num * BITS_PER_LONG;
+               if (ext_id >= KVM_RISCV_ISA_EXT_MAX)
+                       break;
+
+               riscv_vcpu_set_isa_ext_single(vcpu, ext_id, enable);
+       }
+
+       return 0;
+}
+
+static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu,
+                                         const struct kvm_one_reg *reg)
+{
+       int rc;
+       unsigned long __user *uaddr =
+                       (unsigned long __user *)(unsigned long)reg->addr;
+       unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
+                                           KVM_REG_SIZE_MASK |
+                                           KVM_REG_RISCV_ISA_EXT);
+       unsigned long reg_val, reg_subtype;
+
+       if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
+               return -EINVAL;
+
+       reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
+       reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
+
+       reg_val = 0;
+       switch (reg_subtype) {
+       case KVM_REG_RISCV_ISA_SINGLE:
+               rc = riscv_vcpu_get_isa_ext_single(vcpu, reg_num, &reg_val);
+               break;
+       case KVM_REG_RISCV_ISA_MULTI_EN:
+       case KVM_REG_RISCV_ISA_MULTI_DIS:
+               rc = riscv_vcpu_get_isa_ext_multi(vcpu, reg_num, &reg_val);
+               if (!rc && reg_subtype == KVM_REG_RISCV_ISA_MULTI_DIS)
+                       reg_val = ~reg_val;
+               break;
+       default:
+               rc = -ENOENT;
+       }
+       if (rc)
+               return rc;
+
+       if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
+               return -EFAULT;
+
+       return 0;
+}
+
+static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
+                                         const struct kvm_one_reg *reg)
+{
+       unsigned long __user *uaddr =
+                       (unsigned long __user *)(unsigned long)reg->addr;
+       unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
+                                           KVM_REG_SIZE_MASK |
+                                           KVM_REG_RISCV_ISA_EXT);
+       unsigned long reg_val, reg_subtype;
+
+       if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
+               return -EINVAL;
+
+       reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
+       reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
+
+       if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
+               return -EFAULT;
+
+       switch (reg_subtype) {
+       case KVM_REG_RISCV_ISA_SINGLE:
+               return riscv_vcpu_set_isa_ext_single(vcpu, reg_num, reg_val);
+       case KVM_REG_RISCV_SBI_MULTI_EN:
+               return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, true);
+       case KVM_REG_RISCV_SBI_MULTI_DIS:
+               return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, false);
+       default:
+               return -ENOENT;
+       }
+
+       return 0;
+}
+
+static int copy_config_reg_indices(const struct kvm_vcpu *vcpu,
+                               u64 __user *uindices)
+{
+       int n = 0;
+
+       for (int i = 0; i < sizeof(struct kvm_riscv_config)/sizeof(unsigned long);
+                i++) {
+               u64 size;
+               u64 reg;
+
+               /*
+                * Avoid reporting config reg if the corresponding extension
+                * was not available.
+                */
+               if (i == KVM_REG_RISCV_CONFIG_REG(zicbom_block_size) &&
+                       !riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
+                       continue;
+               else if (i == KVM_REG_RISCV_CONFIG_REG(zicboz_block_size) &&
+                       !riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
+                       continue;
+
+               size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
+               reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CONFIG | i;
+
+               if (uindices) {
+                       if (put_user(reg, uindices))
+                               return -EFAULT;
+                       uindices++;
+               }
+
+               n++;
+       }
+
+       return n;
+}
+
+static unsigned long num_config_regs(const struct kvm_vcpu *vcpu)
+{
+       return copy_config_reg_indices(vcpu, NULL);
+}
+
+static inline unsigned long num_core_regs(void)
+{
+       return sizeof(struct kvm_riscv_core) / sizeof(unsigned long);
+}
+
+static int copy_core_reg_indices(u64 __user *uindices)
+{
+       int n = num_core_regs();
+
+       for (int i = 0; i < n; i++) {
+               u64 size = IS_ENABLED(CONFIG_32BIT) ?
+                          KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
+               u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CORE | i;
+
+               if (uindices) {
+                       if (put_user(reg, uindices))
+                               return -EFAULT;
+                       uindices++;
+               }
+       }
+
+       return n;
+}
+
+static inline unsigned long num_csr_regs(const struct kvm_vcpu *vcpu)
+{
+       unsigned long n = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
+
+       if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA))
+               n += sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
+
+       return n;
+}
+
+static int copy_csr_reg_indices(const struct kvm_vcpu *vcpu,
+                               u64 __user *uindices)
+{
+       int n1 = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
+       int n2 = 0;
+
+       /* copy general csr regs */
+       for (int i = 0; i < n1; i++) {
+               u64 size = IS_ENABLED(CONFIG_32BIT) ?
+                          KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
+               u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
+                                 KVM_REG_RISCV_CSR_GENERAL | i;
+
+               if (uindices) {
+                       if (put_user(reg, uindices))
+                               return -EFAULT;
+                       uindices++;
+               }
+       }
+
+       /* copy AIA csr regs */
+       if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA)) {
+               n2 = sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
+
+               for (int i = 0; i < n2; i++) {
+                       u64 size = IS_ENABLED(CONFIG_32BIT) ?
+                                  KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
+                       u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
+                                         KVM_REG_RISCV_CSR_AIA | i;
+
+                       if (uindices) {
+                               if (put_user(reg, uindices))
+                                       return -EFAULT;
+                               uindices++;
+                       }
+               }
+       }
+
+       return n1 + n2;
+}
+
+static inline unsigned long num_timer_regs(void)
+{
+       return sizeof(struct kvm_riscv_timer) / sizeof(u64);
+}
+
+static int copy_timer_reg_indices(u64 __user *uindices)
+{
+       int n = num_timer_regs();
+
+       for (int i = 0; i < n; i++) {
+               u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
+                         KVM_REG_RISCV_TIMER | i;
+
+               if (uindices) {
+                       if (put_user(reg, uindices))
+                               return -EFAULT;
+                       uindices++;
+               }
+       }
+
+       return n;
+}
+
+static inline unsigned long num_fp_f_regs(const struct kvm_vcpu *vcpu)
+{
+       const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
+
+       if (riscv_isa_extension_available(vcpu->arch.isa, f))
+               return sizeof(cntx->fp.f) / sizeof(u32);
+       else
+               return 0;
+}
+
+static int copy_fp_f_reg_indices(const struct kvm_vcpu *vcpu,
+                               u64 __user *uindices)
+{
+       int n = num_fp_f_regs(vcpu);
+
+       for (int i = 0; i < n; i++) {
+               u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 |
+                         KVM_REG_RISCV_FP_F | i;
+
+               if (uindices) {
+                       if (put_user(reg, uindices))
+                               return -EFAULT;
+                       uindices++;
+               }
+       }
+
+       return n;
+}
+
+static inline unsigned long num_fp_d_regs(const struct kvm_vcpu *vcpu)
+{
+       const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
+
+       if (riscv_isa_extension_available(vcpu->arch.isa, d))
+               return sizeof(cntx->fp.d.f) / sizeof(u64) + 1;
+       else
+               return 0;
+}
+
+static int copy_fp_d_reg_indices(const struct kvm_vcpu *vcpu,
+                               u64 __user *uindices)
+{
+       int i;
+       int n = num_fp_d_regs(vcpu);
+       u64 reg;
+
+       /* copy fp.d.f indices */
+       for (i = 0; i < n-1; i++) {
+               reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
+                     KVM_REG_RISCV_FP_D | i;
+
+               if (uindices) {
+                       if (put_user(reg, uindices))
+                               return -EFAULT;
+                       uindices++;
+               }
+       }
+
+       /* copy fp.d.fcsr indices */
+       reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_D | i;
+       if (uindices) {
+               if (put_user(reg, uindices))
+                       return -EFAULT;
+               uindices++;
+       }
+
+       return n;
+}
+
+static int copy_isa_ext_reg_indices(const struct kvm_vcpu *vcpu,
+                               u64 __user *uindices)
+{
+       unsigned int n = 0;
+       unsigned long isa_ext;
+
+       for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
+               u64 size = IS_ENABLED(CONFIG_32BIT) ?
+                          KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
+               u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_ISA_EXT | i;
+
+               isa_ext = kvm_isa_ext_arr[i];
+               if (!__riscv_isa_extension_available(vcpu->arch.isa, isa_ext))
+                       continue;
+
+               if (uindices) {
+                       if (put_user(reg, uindices))
+                               return -EFAULT;
+                       uindices++;
+               }
+
+               n++;
+       }
+
+       return n;
+}
+
+static inline unsigned long num_isa_ext_regs(const struct kvm_vcpu *vcpu)
+{
+       return copy_isa_ext_reg_indices(vcpu, NULL);;
+}
+
+static inline unsigned long num_sbi_ext_regs(void)
+{
+       /*
+        * number of KVM_REG_RISCV_SBI_SINGLE +
+        * 2 x (number of KVM_REG_RISCV_SBI_MULTI)
+        */
+       return KVM_RISCV_SBI_EXT_MAX + 2*(KVM_REG_RISCV_SBI_MULTI_REG_LAST+1);
+}
+
+static int copy_sbi_ext_reg_indices(u64 __user *uindices)
+{
+       int n;
+
+       /* copy KVM_REG_RISCV_SBI_SINGLE */
+       n = KVM_RISCV_SBI_EXT_MAX;
+       for (int i = 0; i < n; i++) {
+               u64 size = IS_ENABLED(CONFIG_32BIT) ?
+                          KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
+               u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
+                         KVM_REG_RISCV_SBI_SINGLE | i;
+
+               if (uindices) {
+                       if (put_user(reg, uindices))
+                               return -EFAULT;
+                       uindices++;
+               }
+       }
+
+       /* copy KVM_REG_RISCV_SBI_MULTI */
+       n = KVM_REG_RISCV_SBI_MULTI_REG_LAST + 1;
+       for (int i = 0; i < n; i++) {
+               u64 size = IS_ENABLED(CONFIG_32BIT) ?
+                          KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
+               u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
+                         KVM_REG_RISCV_SBI_MULTI_EN | i;
+
+               if (uindices) {
+                       if (put_user(reg, uindices))
+                               return -EFAULT;
+                       uindices++;
+               }
+
+               reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
+                         KVM_REG_RISCV_SBI_MULTI_DIS | i;
+
+               if (uindices) {
+                       if (put_user(reg, uindices))
+                               return -EFAULT;
+                       uindices++;
+               }
+       }
+
+       return num_sbi_ext_regs();
+}
+
+/*
+ * kvm_riscv_vcpu_num_regs - how many registers do we present via KVM_GET/SET_ONE_REG
+ *
+ * This is for all registers.
+ */
+unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu)
+{
+       unsigned long res = 0;
+
+       res += num_config_regs(vcpu);
+       res += num_core_regs();
+       res += num_csr_regs(vcpu);
+       res += num_timer_regs();
+       res += num_fp_f_regs(vcpu);
+       res += num_fp_d_regs(vcpu);
+       res += num_isa_ext_regs(vcpu);
+       res += num_sbi_ext_regs();
+
+       return res;
+}
+
+/*
+ * kvm_riscv_vcpu_copy_reg_indices - get indices of all registers.
+ */
+int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu,
+                                   u64 __user *uindices)
+{
+       int ret;
+
+       ret = copy_config_reg_indices(vcpu, uindices);
+       if (ret < 0)
+               return ret;
+       uindices += ret;
+
+       ret = copy_core_reg_indices(uindices);
+       if (ret < 0)
+               return ret;
+       uindices += ret;
+
+       ret = copy_csr_reg_indices(vcpu, uindices);
+       if (ret < 0)
+               return ret;
+       uindices += ret;
+
+       ret = copy_timer_reg_indices(uindices);
+       if (ret < 0)
+               return ret;
+       uindices += ret;
+
+       ret = copy_fp_f_reg_indices(vcpu, uindices);
+       if (ret < 0)
+               return ret;
+       uindices += ret;
+
+       ret = copy_fp_d_reg_indices(vcpu, uindices);
+       if (ret < 0)
+               return ret;
+       uindices += ret;
+
+       ret = copy_isa_ext_reg_indices(vcpu, uindices);
+       if (ret < 0)
+               return ret;
+       uindices += ret;
+
+       ret = copy_sbi_ext_reg_indices(uindices);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
+                          const struct kvm_one_reg *reg)
+{
+       switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
+       case KVM_REG_RISCV_CONFIG:
+               return kvm_riscv_vcpu_set_reg_config(vcpu, reg);
+       case KVM_REG_RISCV_CORE:
+               return kvm_riscv_vcpu_set_reg_core(vcpu, reg);
+       case KVM_REG_RISCV_CSR:
+               return kvm_riscv_vcpu_set_reg_csr(vcpu, reg);
+       case KVM_REG_RISCV_TIMER:
+               return kvm_riscv_vcpu_set_reg_timer(vcpu, reg);
+       case KVM_REG_RISCV_FP_F:
+               return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
+                                                KVM_REG_RISCV_FP_F);
+       case KVM_REG_RISCV_FP_D:
+               return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
+                                                KVM_REG_RISCV_FP_D);
+       case KVM_REG_RISCV_ISA_EXT:
+               return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg);
+       case KVM_REG_RISCV_SBI_EXT:
+               return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu, reg);
+       case KVM_REG_RISCV_VECTOR:
+               return kvm_riscv_vcpu_set_reg_vector(vcpu, reg);
+       default:
+               break;
+       }
+
+       return -ENOENT;
+}
+
+int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
+                          const struct kvm_one_reg *reg)
+{
+       switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
+       case KVM_REG_RISCV_CONFIG:
+               return kvm_riscv_vcpu_get_reg_config(vcpu, reg);
+       case KVM_REG_RISCV_CORE:
+               return kvm_riscv_vcpu_get_reg_core(vcpu, reg);
+       case KVM_REG_RISCV_CSR:
+               return kvm_riscv_vcpu_get_reg_csr(vcpu, reg);
+       case KVM_REG_RISCV_TIMER:
+               return kvm_riscv_vcpu_get_reg_timer(vcpu, reg);
+       case KVM_REG_RISCV_FP_F:
+               return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
+                                                KVM_REG_RISCV_FP_F);
+       case KVM_REG_RISCV_FP_D:
+               return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
+                                                KVM_REG_RISCV_FP_D);
+       case KVM_REG_RISCV_ISA_EXT:
+               return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg);
+       case KVM_REG_RISCV_SBI_EXT:
+               return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu, reg);
+       case KVM_REG_RISCV_VECTOR:
+               return kvm_riscv_vcpu_get_reg_vector(vcpu, reg);
+       default:
+               break;
+       }
+
+       return -ENOENT;
+}
index 7b46e04..9cd9709 100644 (file)
@@ -140,8 +140,10 @@ static int riscv_vcpu_set_sbi_ext_single(struct kvm_vcpu *vcpu,
        const struct kvm_riscv_sbi_extension_entry *sext = NULL;
        struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
 
-       if (reg_num >= KVM_RISCV_SBI_EXT_MAX ||
-           (reg_val != 1 && reg_val != 0))
+       if (reg_num >= KVM_RISCV_SBI_EXT_MAX)
+               return -ENOENT;
+
+       if (reg_val != 1 && reg_val != 0)
                return -EINVAL;
 
        for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
@@ -175,7 +177,7 @@ static int riscv_vcpu_get_sbi_ext_single(struct kvm_vcpu *vcpu,
        struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
 
        if (reg_num >= KVM_RISCV_SBI_EXT_MAX)
-               return -EINVAL;
+               return -ENOENT;
 
        for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
                if (sbi_ext[i].ext_idx == reg_num) {
@@ -206,7 +208,7 @@ static int riscv_vcpu_set_sbi_ext_multi(struct kvm_vcpu *vcpu,
        unsigned long i, ext_id;
 
        if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
-               return -EINVAL;
+               return -ENOENT;
 
        for_each_set_bit(i, &reg_val, BITS_PER_LONG) {
                ext_id = i + reg_num * BITS_PER_LONG;
@@ -226,7 +228,7 @@ static int riscv_vcpu_get_sbi_ext_multi(struct kvm_vcpu *vcpu,
        unsigned long i, ext_id, ext_val;
 
        if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
-               return -EINVAL;
+               return -ENOENT;
 
        for (i = 0; i < BITS_PER_LONG; i++) {
                ext_id = i + reg_num * BITS_PER_LONG;
@@ -272,7 +274,7 @@ int kvm_riscv_vcpu_set_reg_sbi_ext(struct kvm_vcpu *vcpu,
        case KVM_REG_RISCV_SBI_MULTI_DIS:
                return riscv_vcpu_set_sbi_ext_multi(vcpu, reg_num, reg_val, false);
        default:
-               return -EINVAL;
+               return -ENOENT;
        }
 
        return 0;
@@ -307,7 +309,7 @@ int kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu *vcpu,
                        reg_val = ~reg_val;
                break;
        default:
-               rc = -EINVAL;
+               rc = -ENOENT;
        }
        if (rc)
                return rc;
index 3ac2ff6..75486b2 100644 (file)
@@ -170,7 +170,7 @@ int kvm_riscv_vcpu_get_reg_timer(struct kvm_vcpu *vcpu,
        if (KVM_REG_SIZE(reg->id) != sizeof(u64))
                return -EINVAL;
        if (reg_num >= sizeof(struct kvm_riscv_timer) / sizeof(u64))
-               return -EINVAL;
+               return -ENOENT;
 
        switch (reg_num) {
        case KVM_REG_RISCV_TIMER_REG(frequency):
@@ -187,7 +187,7 @@ int kvm_riscv_vcpu_get_reg_timer(struct kvm_vcpu *vcpu,
                                          KVM_RISCV_TIMER_STATE_OFF;
                break;
        default:
-               return -EINVAL;
+               return -ENOENT;
        }
 
        if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
@@ -211,14 +211,15 @@ int kvm_riscv_vcpu_set_reg_timer(struct kvm_vcpu *vcpu,
        if (KVM_REG_SIZE(reg->id) != sizeof(u64))
                return -EINVAL;
        if (reg_num >= sizeof(struct kvm_riscv_timer) / sizeof(u64))
-               return -EINVAL;
+               return -ENOENT;
 
        if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
                return -EFAULT;
 
        switch (reg_num) {
        case KVM_REG_RISCV_TIMER_REG(frequency):
-               ret = -EOPNOTSUPP;
+               if (reg_val != riscv_timebase)
+                       return -EINVAL;
                break;
        case KVM_REG_RISCV_TIMER_REG(time):
                gt->time_delta = reg_val - get_cycles64();
@@ -233,7 +234,7 @@ int kvm_riscv_vcpu_set_reg_timer(struct kvm_vcpu *vcpu,
                        ret = kvm_riscv_vcpu_timer_cancel(t);
                break;
        default:
-               ret = -EINVAL;
+               ret = -ENOENT;
                break;
        }
 
index edd2eec..b430cbb 100644 (file)
@@ -91,95 +91,93 @@ void kvm_riscv_vcpu_free_vector_context(struct kvm_vcpu *vcpu)
 }
 #endif
 
-static void *kvm_riscv_vcpu_vreg_addr(struct kvm_vcpu *vcpu,
-                                     unsigned long reg_num,
-                                     size_t reg_size)
+static int kvm_riscv_vcpu_vreg_addr(struct kvm_vcpu *vcpu,
+                                   unsigned long reg_num,
+                                   size_t reg_size,
+                                   void **reg_addr)
 {
        struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
-       void *reg_val;
        size_t vlenb = riscv_v_vsize / 32;
 
        if (reg_num < KVM_REG_RISCV_VECTOR_REG(0)) {
                if (reg_size != sizeof(unsigned long))
-                       return NULL;
+                       return -EINVAL;
                switch (reg_num) {
                case KVM_REG_RISCV_VECTOR_CSR_REG(vstart):
-                       reg_val = &cntx->vector.vstart;
+                       *reg_addr = &cntx->vector.vstart;
                        break;
                case KVM_REG_RISCV_VECTOR_CSR_REG(vl):
-                       reg_val = &cntx->vector.vl;
+                       *reg_addr = &cntx->vector.vl;
                        break;
                case KVM_REG_RISCV_VECTOR_CSR_REG(vtype):
-                       reg_val = &cntx->vector.vtype;
+                       *reg_addr = &cntx->vector.vtype;
                        break;
                case KVM_REG_RISCV_VECTOR_CSR_REG(vcsr):
-                       reg_val = &cntx->vector.vcsr;
+                       *reg_addr = &cntx->vector.vcsr;
                        break;
                case KVM_REG_RISCV_VECTOR_CSR_REG(datap):
                default:
-                       return NULL;
+                       return -ENOENT;
                }
        } else if (reg_num <= KVM_REG_RISCV_VECTOR_REG(31)) {
                if (reg_size != vlenb)
-                       return NULL;
-               reg_val = cntx->vector.datap
-                         + (reg_num - KVM_REG_RISCV_VECTOR_REG(0)) * vlenb;
+                       return -EINVAL;
+               *reg_addr = cntx->vector.datap +
+                           (reg_num - KVM_REG_RISCV_VECTOR_REG(0)) * vlenb;
        } else {
-               return NULL;
+               return -ENOENT;
        }
 
-       return reg_val;
+       return 0;
 }
 
 int kvm_riscv_vcpu_get_reg_vector(struct kvm_vcpu *vcpu,
-                                 const struct kvm_one_reg *reg,
-                                 unsigned long rtype)
+                                 const struct kvm_one_reg *reg)
 {
        unsigned long *isa = vcpu->arch.isa;
        unsigned long __user *uaddr =
                        (unsigned long __user *)(unsigned long)reg->addr;
        unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
                                            KVM_REG_SIZE_MASK |
-                                           rtype);
-       void *reg_val = NULL;
+                                           KVM_REG_RISCV_VECTOR);
        size_t reg_size = KVM_REG_SIZE(reg->id);
+       void *reg_addr;
+       int rc;
 
-       if (rtype == KVM_REG_RISCV_VECTOR &&
-           riscv_isa_extension_available(isa, v)) {
-               reg_val = kvm_riscv_vcpu_vreg_addr(vcpu, reg_num, reg_size);
-       }
+       if (!riscv_isa_extension_available(isa, v))
+               return -ENOENT;
 
-       if (!reg_val)
-               return -EINVAL;
+       rc = kvm_riscv_vcpu_vreg_addr(vcpu, reg_num, reg_size, &reg_addr);
+       if (rc)
+               return rc;
 
-       if (copy_to_user(uaddr, reg_val, reg_size))
+       if (copy_to_user(uaddr, reg_addr, reg_size))
                return -EFAULT;
 
        return 0;
 }
 
 int kvm_riscv_vcpu_set_reg_vector(struct kvm_vcpu *vcpu,
-                                 const struct kvm_one_reg *reg,
-                                 unsigned long rtype)
+                                 const struct kvm_one_reg *reg)
 {
        unsigned long *isa = vcpu->arch.isa;
        unsigned long __user *uaddr =
                        (unsigned long __user *)(unsigned long)reg->addr;
        unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
                                            KVM_REG_SIZE_MASK |
-                                           rtype);
-       void *reg_val = NULL;
+                                           KVM_REG_RISCV_VECTOR);
        size_t reg_size = KVM_REG_SIZE(reg->id);
+       void *reg_addr;
+       int rc;
 
-       if (rtype == KVM_REG_RISCV_VECTOR &&
-           riscv_isa_extension_available(isa, v)) {
-               reg_val = kvm_riscv_vcpu_vreg_addr(vcpu, reg_num, reg_size);
-       }
+       if (!riscv_isa_extension_available(isa, v))
+               return -ENOENT;
 
-       if (!reg_val)
-               return -EINVAL;
+       rc = kvm_riscv_vcpu_vreg_addr(vcpu, reg_num, reg_size, &reg_addr);
+       if (rc)
+               return rc;
 
-       if (copy_from_user(reg_val, uaddr, reg_size))
+       if (copy_from_user(reg_addr, uaddr, reg_size))
                return -EFAULT;
 
        return 0;
index 6092ccf..a3bb36f 100644 (file)
@@ -142,7 +142,6 @@ TEST_GEN_PROGS_EXTENDED_x86_64 += x86_64/nx_huge_pages_test
 TEST_GEN_PROGS_aarch64 += aarch64/aarch32_id_regs
 TEST_GEN_PROGS_aarch64 += aarch64/arch_timer
 TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions
-TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list
 TEST_GEN_PROGS_aarch64 += aarch64/hypercalls
 TEST_GEN_PROGS_aarch64 += aarch64/page_fault_test
 TEST_GEN_PROGS_aarch64 += aarch64/psci_test
@@ -155,6 +154,7 @@ TEST_GEN_PROGS_aarch64 += demand_paging_test
 TEST_GEN_PROGS_aarch64 += dirty_log_test
 TEST_GEN_PROGS_aarch64 += dirty_log_perf_test
 TEST_GEN_PROGS_aarch64 += guest_print_test
+TEST_GEN_PROGS_aarch64 += get-reg-list
 TEST_GEN_PROGS_aarch64 += kvm_create_max_vcpus
 TEST_GEN_PROGS_aarch64 += kvm_page_table_test
 TEST_GEN_PROGS_aarch64 += memslot_modification_stress_test
@@ -182,11 +182,14 @@ TEST_GEN_PROGS_s390x += kvm_binary_stats_test
 TEST_GEN_PROGS_riscv += demand_paging_test
 TEST_GEN_PROGS_riscv += dirty_log_test
 TEST_GEN_PROGS_riscv += guest_print_test
+TEST_GEN_PROGS_riscv += get-reg-list
 TEST_GEN_PROGS_riscv += kvm_create_max_vcpus
 TEST_GEN_PROGS_riscv += kvm_page_table_test
 TEST_GEN_PROGS_riscv += set_memory_region_test
 TEST_GEN_PROGS_riscv += kvm_binary_stats_test
 
+SPLIT_TESTS += get-reg-list
+
 TEST_PROGS += $(TEST_PROGS_$(ARCH_DIR))
 TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(ARCH_DIR))
 TEST_GEN_PROGS_EXTENDED += $(TEST_GEN_PROGS_EXTENDED_$(ARCH_DIR))
@@ -235,11 +238,14 @@ LIBKVM_C_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBKVM_C))
 LIBKVM_S_OBJ := $(patsubst %.S, $(OUTPUT)/%.o, $(LIBKVM_S))
 LIBKVM_STRING_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBKVM_STRING))
 LIBKVM_OBJS = $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ) $(LIBKVM_STRING_OBJ)
+SPLIT_TESTS_TARGETS := $(patsubst %, $(OUTPUT)/%, $(SPLIT_TESTS))
+SPLIT_TESTS_OBJS := $(patsubst %, $(ARCH_DIR)/%.o, $(SPLIT_TESTS))
 
 TEST_GEN_OBJ = $(patsubst %, %.o, $(TEST_GEN_PROGS))
 TEST_GEN_OBJ += $(patsubst %, %.o, $(TEST_GEN_PROGS_EXTENDED))
 TEST_DEP_FILES = $(patsubst %.o, %.d, $(TEST_GEN_OBJ))
 TEST_DEP_FILES += $(patsubst %.o, %.d, $(LIBKVM_OBJS))
+TEST_DEP_FILES += $(patsubst %.o, %.d, $(SPLIT_TESTS_OBJS))
 -include $(TEST_DEP_FILES)
 
 $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED): %: %.o
@@ -247,7 +253,10 @@ $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED): %: %.o
 $(TEST_GEN_OBJ): $(OUTPUT)/%.o: %.c
        $(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c $< -o $@
 
-EXTRA_CLEAN += $(LIBKVM_OBJS) $(TEST_DEP_FILES) $(TEST_GEN_OBJ) cscope.*
+$(SPLIT_TESTS_TARGETS): %: %.o $(SPLIT_TESTS_OBJS)
+       $(CC) $(CFLAGS) $(CPPFLAGS) $(LDFLAGS) $(TARGET_ARCH) $^ $(LDLIBS) -o $@
+
+EXTRA_CLEAN += $(LIBKVM_OBJS) $(TEST_DEP_FILES) $(TEST_GEN_OBJ) $(SPLIT_TESTS_OBJS) cscope.*
 
 x := $(shell mkdir -p $(sort $(dir $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ))))
 $(LIBKVM_C_OBJ): $(OUTPUT)/%.o: %.c
index 4f10055..709d7d7 100644 (file)
@@ -4,50 +4,17 @@
  *
  * Copyright (C) 2020, Red Hat, Inc.
  *
- * When attempting to migrate from a host with an older kernel to a host
- * with a newer kernel we allow the newer kernel on the destination to
- * list new registers with get-reg-list. We assume they'll be unused, at
- * least until the guest reboots, and so they're relatively harmless.
- * However, if the destination host with the newer kernel is missing
- * registers which the source host with the older kernel has, then that's
- * a regression in get-reg-list. This test checks for that regression by
- * checking the current list against a blessed list. We should never have
- * missing registers, but if new ones appear then they can probably be
- * added to the blessed list. A completely new blessed list can be created
- * by running the test with the --list command line argument.
- *
- * Note, the blessed list should be created from the oldest possible
- * kernel. We can't go older than v4.15, though, because that's the first
- * release to expose the ID system registers in KVM_GET_REG_LIST, see
- * commit 93390c0a1b20 ("arm64: KVM: Hide unsupported AArch64 CPU features
- * from guests"). Also, one must use the --core-reg-fixup command line
- * option when running on an older kernel that doesn't include df205b5c6328
- * ("KVM: arm64: Filter out invalid core register IDs in KVM_GET_REG_LIST")
+ * While the blessed list should be created from the oldest possible
+ * kernel, we can't go older than v5.2, though, because that's the first
+ * release which includes df205b5c6328 ("KVM: arm64: Filter out invalid
+ * core register IDs in KVM_GET_REG_LIST"). Without that commit the core
+ * registers won't match expectations.
  */
 #include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-#include <sys/types.h>
-#include <sys/wait.h>
 #include "kvm_util.h"
 #include "test_util.h"
 #include "processor.h"
 
-static struct kvm_reg_list *reg_list;
-static __u64 *blessed_reg, blessed_n;
-
-struct reg_sublist {
-       const char *name;
-       long capability;
-       int feature;
-       bool finalize;
-       __u64 *regs;
-       __u64 regs_n;
-       __u64 *rejects_set;
-       __u64 rejects_set_n;
-};
-
 struct feature_id_reg {
        __u64 reg;
        __u64 id_reg;
@@ -76,70 +43,7 @@ static struct feature_id_reg feat_id_regs[] = {
        }
 };
 
-struct vcpu_config {
-       char *name;
-       struct reg_sublist sublists[];
-};
-
-static struct vcpu_config *vcpu_configs[];
-static int vcpu_configs_n;
-
-#define for_each_sublist(c, s)                                                 \
-       for ((s) = &(c)->sublists[0]; (s)->regs; ++(s))
-
-#define for_each_reg(i)                                                                \
-       for ((i) = 0; (i) < reg_list->n; ++(i))
-
-#define for_each_reg_filtered(i)                                               \
-       for_each_reg(i)                                                         \
-               if (!filter_reg(reg_list->reg[i]))
-
-#define for_each_missing_reg(i)                                                        \
-       for ((i) = 0; (i) < blessed_n; ++(i))                                   \
-               if (!find_reg(reg_list->reg, reg_list->n, blessed_reg[i]))      \
-                       if (check_supported_feat_reg(vcpu, blessed_reg[i]))
-
-#define for_each_new_reg(i)                                                    \
-       for_each_reg_filtered(i)                                                \
-               if (!find_reg(blessed_reg, blessed_n, reg_list->reg[i]))
-
-static const char *config_name(struct vcpu_config *c)
-{
-       struct reg_sublist *s;
-       int len = 0;
-
-       if (c->name)
-               return c->name;
-
-       for_each_sublist(c, s)
-               len += strlen(s->name) + 1;
-
-       c->name = malloc(len);
-
-       len = 0;
-       for_each_sublist(c, s) {
-               if (!strcmp(s->name, "base"))
-                       continue;
-               strcat(c->name + len, s->name);
-               len += strlen(s->name) + 1;
-               c->name[len - 1] = '+';
-       }
-       c->name[len - 1] = '\0';
-
-       return c->name;
-}
-
-static bool has_cap(struct vcpu_config *c, long capability)
-{
-       struct reg_sublist *s;
-
-       for_each_sublist(c, s)
-               if (s->capability == capability)
-                       return true;
-       return false;
-}
-
-static bool filter_reg(__u64 reg)
+bool filter_reg(__u64 reg)
 {
        /*
         * DEMUX register presence depends on the host's CLIDR_EL1.
@@ -151,16 +55,6 @@ static bool filter_reg(__u64 reg)
        return false;
 }
 
-static bool find_reg(__u64 regs[], __u64 nr_regs, __u64 reg)
-{
-       int i;
-
-       for (i = 0; i < nr_regs; ++i)
-               if (reg == regs[i])
-                       return true;
-       return false;
-}
-
 static bool check_supported_feat_reg(struct kvm_vcpu *vcpu, __u64 reg)
 {
        int i, ret;
@@ -180,17 +74,27 @@ static bool check_supported_feat_reg(struct kvm_vcpu *vcpu, __u64 reg)
        return true;
 }
 
-static const char *str_with_index(const char *template, __u64 index)
+bool check_supported_reg(struct kvm_vcpu *vcpu, __u64 reg)
 {
-       char *str, *p;
-       int n;
+       return check_supported_feat_reg(vcpu, reg);
+}
 
-       str = strdup(template);
-       p = strstr(str, "##");
-       n = sprintf(p, "%lld", index);
-       strcat(p + n, strstr(template, "##") + 2);
+bool check_reject_set(int err)
+{
+       return err == EPERM;
+}
 
-       return (const char *)str;
+void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c)
+{
+       struct vcpu_reg_sublist *s;
+       int feature;
+
+       for_each_sublist(c, s) {
+               if (s->finalize) {
+                       feature = s->feature;
+                       vcpu_ioctl(vcpu, KVM_ARM_VCPU_FINALIZE, &feature);
+               }
+       }
 }
 
 #define REG_MASK (KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_COPROC_MASK)
@@ -199,7 +103,7 @@ static const char *str_with_index(const char *template, __u64 index)
 #define CORE_SPSR_XX_NR_WORDS  2
 #define CORE_FPREGS_XX_NR_WORDS        4
 
-static const char *core_id_to_str(struct vcpu_config *c, __u64 id)
+static const char *core_id_to_str(const char *prefix, __u64 id)
 {
        __u64 core_off = id & ~REG_MASK, idx;
 
@@ -210,8 +114,8 @@ static const char *core_id_to_str(struct vcpu_config *c, __u64 id)
        case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
             KVM_REG_ARM_CORE_REG(regs.regs[30]):
                idx = (core_off - KVM_REG_ARM_CORE_REG(regs.regs[0])) / CORE_REGS_XX_NR_WORDS;
-               TEST_ASSERT(idx < 31, "%s: Unexpected regs.regs index: %lld", config_name(c), idx);
-               return str_with_index("KVM_REG_ARM_CORE_REG(regs.regs[##])", idx);
+               TEST_ASSERT(idx < 31, "%s: Unexpected regs.regs index: %lld", prefix, idx);
+               return strdup_printf("KVM_REG_ARM_CORE_REG(regs.regs[%lld])", idx);
        case KVM_REG_ARM_CORE_REG(regs.sp):
                return "KVM_REG_ARM_CORE_REG(regs.sp)";
        case KVM_REG_ARM_CORE_REG(regs.pc):
@@ -225,24 +129,24 @@ static const char *core_id_to_str(struct vcpu_config *c, __u64 id)
        case KVM_REG_ARM_CORE_REG(spsr[0]) ...
             KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
                idx = (core_off - KVM_REG_ARM_CORE_REG(spsr[0])) / CORE_SPSR_XX_NR_WORDS;
-               TEST_ASSERT(idx < KVM_NR_SPSR, "%s: Unexpected spsr index: %lld", config_name(c), idx);
-               return str_with_index("KVM_REG_ARM_CORE_REG(spsr[##])", idx);
+               TEST_ASSERT(idx < KVM_NR_SPSR, "%s: Unexpected spsr index: %lld", prefix, idx);
+               return strdup_printf("KVM_REG_ARM_CORE_REG(spsr[%lld])", idx);
        case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
             KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
                idx = (core_off - KVM_REG_ARM_CORE_REG(fp_regs.vregs[0])) / CORE_FPREGS_XX_NR_WORDS;
-               TEST_ASSERT(idx < 32, "%s: Unexpected fp_regs.vregs index: %lld", config_name(c), idx);
-               return str_with_index("KVM_REG_ARM_CORE_REG(fp_regs.vregs[##])", idx);
+               TEST_ASSERT(idx < 32, "%s: Unexpected fp_regs.vregs index: %lld", prefix, idx);
+               return strdup_printf("KVM_REG_ARM_CORE_REG(fp_regs.vregs[%lld])", idx);
        case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
                return "KVM_REG_ARM_CORE_REG(fp_regs.fpsr)";
        case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
                return "KVM_REG_ARM_CORE_REG(fp_regs.fpcr)";
        }
 
-       TEST_FAIL("%s: Unknown core reg id: 0x%llx", config_name(c), id);
+       TEST_FAIL("%s: Unknown core reg id: 0x%llx", prefix, id);
        return NULL;
 }
 
-static const char *sve_id_to_str(struct vcpu_config *c, __u64 id)
+static const char *sve_id_to_str(const char *prefix, __u64 id)
 {
        __u64 sve_off, n, i;
 
@@ -252,37 +156,37 @@ static const char *sve_id_to_str(struct vcpu_config *c, __u64 id)
        sve_off = id & ~(REG_MASK | ((1ULL << 5) - 1));
        i = id & (KVM_ARM64_SVE_MAX_SLICES - 1);
 
-       TEST_ASSERT(i == 0, "%s: Currently we don't expect slice > 0, reg id 0x%llx", config_name(c), id);
+       TEST_ASSERT(i == 0, "%s: Currently we don't expect slice > 0, reg id 0x%llx", prefix, id);
 
        switch (sve_off) {
        case KVM_REG_ARM64_SVE_ZREG_BASE ...
             KVM_REG_ARM64_SVE_ZREG_BASE + (1ULL << 5) * KVM_ARM64_SVE_NUM_ZREGS - 1:
                n = (id >> 5) & (KVM_ARM64_SVE_NUM_ZREGS - 1);
                TEST_ASSERT(id == KVM_REG_ARM64_SVE_ZREG(n, 0),
-                           "%s: Unexpected bits set in SVE ZREG id: 0x%llx", config_name(c), id);
-               return str_with_index("KVM_REG_ARM64_SVE_ZREG(##, 0)", n);
+                           "%s: Unexpected bits set in SVE ZREG id: 0x%llx", prefix, id);
+               return strdup_printf("KVM_REG_ARM64_SVE_ZREG(%lld, 0)", n);
        case KVM_REG_ARM64_SVE_PREG_BASE ...
             KVM_REG_ARM64_SVE_PREG_BASE + (1ULL << 5) * KVM_ARM64_SVE_NUM_PREGS - 1:
                n = (id >> 5) & (KVM_ARM64_SVE_NUM_PREGS - 1);
                TEST_ASSERT(id == KVM_REG_ARM64_SVE_PREG(n, 0),
-                           "%s: Unexpected bits set in SVE PREG id: 0x%llx", config_name(c), id);
-               return str_with_index("KVM_REG_ARM64_SVE_PREG(##, 0)", n);
+                           "%s: Unexpected bits set in SVE PREG id: 0x%llx", prefix, id);
+               return strdup_printf("KVM_REG_ARM64_SVE_PREG(%lld, 0)", n);
        case KVM_REG_ARM64_SVE_FFR_BASE:
                TEST_ASSERT(id == KVM_REG_ARM64_SVE_FFR(0),
-                           "%s: Unexpected bits set in SVE FFR id: 0x%llx", config_name(c), id);
+                           "%s: Unexpected bits set in SVE FFR id: 0x%llx", prefix, id);
                return "KVM_REG_ARM64_SVE_FFR(0)";
        }
 
        return NULL;
 }
 
-static void print_reg(struct vcpu_config *c, __u64 id)
+void print_reg(const char *prefix, __u64 id)
 {
        unsigned op0, op1, crn, crm, op2;
        const char *reg_size = NULL;
 
        TEST_ASSERT((id & KVM_REG_ARCH_MASK) == KVM_REG_ARM64,
-                   "%s: KVM_REG_ARM64 missing in reg id: 0x%llx", config_name(c), id);
+                   "%s: KVM_REG_ARM64 missing in reg id: 0x%llx", prefix, id);
 
        switch (id & KVM_REG_SIZE_MASK) {
        case KVM_REG_SIZE_U8:
@@ -314,16 +218,16 @@ static void print_reg(struct vcpu_config *c, __u64 id)
                break;
        default:
                TEST_FAIL("%s: Unexpected reg size: 0x%llx in reg id: 0x%llx",
-                         config_name(c), (id & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT, id);
+                         prefix, (id & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT, id);
        }
 
        switch (id & KVM_REG_ARM_COPROC_MASK) {
        case KVM_REG_ARM_CORE:
-               printf("\tKVM_REG_ARM64 | %s | KVM_REG_ARM_CORE | %s,\n", reg_size, core_id_to_str(c, id));
+               printf("\tKVM_REG_ARM64 | %s | KVM_REG_ARM_CORE | %s,\n", reg_size, core_id_to_str(prefix, id));
                break;
        case KVM_REG_ARM_DEMUX:
                TEST_ASSERT(!(id & ~(REG_MASK | KVM_REG_ARM_DEMUX_ID_MASK | KVM_REG_ARM_DEMUX_VAL_MASK)),
-                           "%s: Unexpected bits set in DEMUX reg id: 0x%llx", config_name(c), id);
+                           "%s: Unexpected bits set in DEMUX reg id: 0x%llx", prefix, id);
                printf("\tKVM_REG_ARM64 | %s | KVM_REG_ARM_DEMUX | KVM_REG_ARM_DEMUX_ID_CCSIDR | %lld,\n",
                       reg_size, id & KVM_REG_ARM_DEMUX_VAL_MASK);
                break;
@@ -334,370 +238,34 @@ static void print_reg(struct vcpu_config *c, __u64 id)
                crm = (id & KVM_REG_ARM64_SYSREG_CRM_MASK) >> KVM_REG_ARM64_SYSREG_CRM_SHIFT;
                op2 = (id & KVM_REG_ARM64_SYSREG_OP2_MASK) >> KVM_REG_ARM64_SYSREG_OP2_SHIFT;
                TEST_ASSERT(id == ARM64_SYS_REG(op0, op1, crn, crm, op2),
-                           "%s: Unexpected bits set in SYSREG reg id: 0x%llx", config_name(c), id);
+                           "%s: Unexpected bits set in SYSREG reg id: 0x%llx", prefix, id);
                printf("\tARM64_SYS_REG(%d, %d, %d, %d, %d),\n", op0, op1, crn, crm, op2);
                break;
        case KVM_REG_ARM_FW:
                TEST_ASSERT(id == KVM_REG_ARM_FW_REG(id & 0xffff),
-                           "%s: Unexpected bits set in FW reg id: 0x%llx", config_name(c), id);
+                           "%s: Unexpected bits set in FW reg id: 0x%llx", prefix, id);
                printf("\tKVM_REG_ARM_FW_REG(%lld),\n", id & 0xffff);
                break;
        case KVM_REG_ARM_FW_FEAT_BMAP:
                TEST_ASSERT(id == KVM_REG_ARM_FW_FEAT_BMAP_REG(id & 0xffff),
-                           "%s: Unexpected bits set in the bitmap feature FW reg id: 0x%llx", config_name(c), id);
+                           "%s: Unexpected bits set in the bitmap feature FW reg id: 0x%llx", prefix, id);
                printf("\tKVM_REG_ARM_FW_FEAT_BMAP_REG(%lld),\n", id & 0xffff);
                break;
        case KVM_REG_ARM64_SVE:
-               if (has_cap(c, KVM_CAP_ARM_SVE))
-                       printf("\t%s,\n", sve_id_to_str(c, id));
-               else
-                       TEST_FAIL("%s: KVM_REG_ARM64_SVE is an unexpected coproc type in reg id: 0x%llx", config_name(c), id);
+               printf("\t%s,\n", sve_id_to_str(prefix, id));
                break;
        default:
                TEST_FAIL("%s: Unexpected coproc type: 0x%llx in reg id: 0x%llx",
-                         config_name(c), (id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT, id);
-       }
-}
-
-/*
- * Older kernels listed each 32-bit word of CORE registers separately.
- * For 64 and 128-bit registers we need to ignore the extra words. We
- * also need to fixup the sizes, because the older kernels stated all
- * registers were 64-bit, even when they weren't.
- */
-static void core_reg_fixup(void)
-{
-       struct kvm_reg_list *tmp;
-       __u64 id, core_off;
-       int i;
-
-       tmp = calloc(1, sizeof(*tmp) + reg_list->n * sizeof(__u64));
-
-       for (i = 0; i < reg_list->n; ++i) {
-               id = reg_list->reg[i];
-
-               if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM_CORE) {
-                       tmp->reg[tmp->n++] = id;
-                       continue;
-               }
-
-               core_off = id & ~REG_MASK;
-
-               switch (core_off) {
-               case 0x52: case 0xd2: case 0xd6:
-                       /*
-                        * These offsets are pointing at padding.
-                        * We need to ignore them too.
-                        */
-                       continue;
-               case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
-                    KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
-                       if (core_off & 3)
-                               continue;
-                       id &= ~KVM_REG_SIZE_MASK;
-                       id |= KVM_REG_SIZE_U128;
-                       tmp->reg[tmp->n++] = id;
-                       continue;
-               case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
-               case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
-                       id &= ~KVM_REG_SIZE_MASK;
-                       id |= KVM_REG_SIZE_U32;
-                       tmp->reg[tmp->n++] = id;
-                       continue;
-               default:
-                       if (core_off & 1)
-                               continue;
-                       tmp->reg[tmp->n++] = id;
-                       break;
-               }
+                         prefix, (id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT, id);
        }
-
-       free(reg_list);
-       reg_list = tmp;
-}
-
-static void prepare_vcpu_init(struct vcpu_config *c, struct kvm_vcpu_init *init)
-{
-       struct reg_sublist *s;
-
-       for_each_sublist(c, s)
-               if (s->capability)
-                       init->features[s->feature / 32] |= 1 << (s->feature % 32);
-}
-
-static void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_config *c)
-{
-       struct reg_sublist *s;
-       int feature;
-
-       for_each_sublist(c, s) {
-               if (s->finalize) {
-                       feature = s->feature;
-                       vcpu_ioctl(vcpu, KVM_ARM_VCPU_FINALIZE, &feature);
-               }
-       }
-}
-
-static void check_supported(struct vcpu_config *c)
-{
-       struct reg_sublist *s;
-
-       for_each_sublist(c, s) {
-               if (!s->capability)
-                       continue;
-
-               __TEST_REQUIRE(kvm_has_cap(s->capability),
-                              "%s: %s not available, skipping tests\n",
-                              config_name(c), s->name);
-       }
-}
-
-static bool print_list;
-static bool print_filtered;
-static bool fixup_core_regs;
-
-static void run_test(struct vcpu_config *c)
-{
-       struct kvm_vcpu_init init = { .target = -1, };
-       int new_regs = 0, missing_regs = 0, i, n;
-       int failed_get = 0, failed_set = 0, failed_reject = 0;
-       struct kvm_vcpu *vcpu;
-       struct kvm_vm *vm;
-       struct reg_sublist *s;
-
-       check_supported(c);
-
-       vm = vm_create_barebones();
-       prepare_vcpu_init(c, &init);
-       vcpu = __vm_vcpu_add(vm, 0);
-       aarch64_vcpu_setup(vcpu, &init);
-       finalize_vcpu(vcpu, c);
-
-       reg_list = vcpu_get_reg_list(vcpu);
-
-       if (fixup_core_regs)
-               core_reg_fixup();
-
-       if (print_list || print_filtered) {
-               putchar('\n');
-               for_each_reg(i) {
-                       __u64 id = reg_list->reg[i];
-                       if ((print_list && !filter_reg(id)) ||
-                           (print_filtered && filter_reg(id)))
-                               print_reg(c, id);
-               }
-               putchar('\n');
-               return;
-       }
-
-       /*
-        * We only test that we can get the register and then write back the
-        * same value. Some registers may allow other values to be written
-        * back, but others only allow some bits to be changed, and at least
-        * for ID registers set will fail if the value does not exactly match
-        * what was returned by get. If registers that allow other values to
-        * be written need to have the other values tested, then we should
-        * create a new set of tests for those in a new independent test
-        * executable.
-        */
-       for_each_reg(i) {
-               uint8_t addr[2048 / 8];
-               struct kvm_one_reg reg = {
-                       .id = reg_list->reg[i],
-                       .addr = (__u64)&addr,
-               };
-               bool reject_reg = false;
-               int ret;
-
-               ret = __vcpu_get_reg(vcpu, reg_list->reg[i], &addr);
-               if (ret) {
-                       printf("%s: Failed to get ", config_name(c));
-                       print_reg(c, reg.id);
-                       putchar('\n');
-                       ++failed_get;
-               }
-
-               /* rejects_set registers are rejected after KVM_ARM_VCPU_FINALIZE */
-               for_each_sublist(c, s) {
-                       if (s->rejects_set && find_reg(s->rejects_set, s->rejects_set_n, reg.id)) {
-                               reject_reg = true;
-                               ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
-                               if (ret != -1 || errno != EPERM) {
-                                       printf("%s: Failed to reject (ret=%d, errno=%d) ", config_name(c), ret, errno);
-                                       print_reg(c, reg.id);
-                                       putchar('\n');
-                                       ++failed_reject;
-                               }
-                               break;
-                       }
-               }
-
-               if (!reject_reg) {
-                       ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
-                       if (ret) {
-                               printf("%s: Failed to set ", config_name(c));
-                               print_reg(c, reg.id);
-                               putchar('\n');
-                               ++failed_set;
-                       }
-               }
-       }
-
-       for_each_sublist(c, s)
-               blessed_n += s->regs_n;
-       blessed_reg = calloc(blessed_n, sizeof(__u64));
-
-       n = 0;
-       for_each_sublist(c, s) {
-               for (i = 0; i < s->regs_n; ++i)
-                       blessed_reg[n++] = s->regs[i];
-       }
-
-       for_each_new_reg(i)
-               ++new_regs;
-
-       for_each_missing_reg(i)
-               ++missing_regs;
-
-       if (new_regs || missing_regs) {
-               n = 0;
-               for_each_reg_filtered(i)
-                       ++n;
-
-               printf("%s: Number blessed registers: %5lld\n", config_name(c), blessed_n);
-               printf("%s: Number registers:         %5lld (includes %lld filtered registers)\n",
-                      config_name(c), reg_list->n, reg_list->n - n);
-       }
-
-       if (new_regs) {
-               printf("\n%s: There are %d new registers.\n"
-                      "Consider adding them to the blessed reg "
-                      "list with the following lines:\n\n", config_name(c), new_regs);
-               for_each_new_reg(i)
-                       print_reg(c, reg_list->reg[i]);
-               putchar('\n');
-       }
-
-       if (missing_regs) {
-               printf("\n%s: There are %d missing registers.\n"
-                      "The following lines are missing registers:\n\n", config_name(c), missing_regs);
-               for_each_missing_reg(i)
-                       print_reg(c, blessed_reg[i]);
-               putchar('\n');
-       }
-
-       TEST_ASSERT(!missing_regs && !failed_get && !failed_set && !failed_reject,
-                   "%s: There are %d missing registers; "
-                   "%d registers failed get; %d registers failed set; %d registers failed reject",
-                   config_name(c), missing_regs, failed_get, failed_set, failed_reject);
-
-       pr_info("%s: PASS\n", config_name(c));
-       blessed_n = 0;
-       free(blessed_reg);
-       free(reg_list);
-       kvm_vm_free(vm);
-}
-
-static void help(void)
-{
-       struct vcpu_config *c;
-       int i;
-
-       printf(
-       "\n"
-       "usage: get-reg-list [--config=<selection>] [--list] [--list-filtered] [--core-reg-fixup]\n\n"
-       " --config=<selection>        Used to select a specific vcpu configuration for the test/listing\n"
-       "                             '<selection>' may be\n");
-
-       for (i = 0; i < vcpu_configs_n; ++i) {
-               c = vcpu_configs[i];
-               printf(
-       "                               '%s'\n", config_name(c));
-       }
-
-       printf(
-       "\n"
-       " --list                      Print the register list rather than test it (requires --config)\n"
-       " --list-filtered             Print registers that would normally be filtered out (requires --config)\n"
-       " --core-reg-fixup            Needed when running on old kernels with broken core reg listings\n"
-       "\n"
-       );
-}
-
-static struct vcpu_config *parse_config(const char *config)
-{
-       struct vcpu_config *c;
-       int i;
-
-       if (config[8] != '=')
-               help(), exit(1);
-
-       for (i = 0; i < vcpu_configs_n; ++i) {
-               c = vcpu_configs[i];
-               if (strcmp(config_name(c), &config[9]) == 0)
-                       break;
-       }
-
-       if (i == vcpu_configs_n)
-               help(), exit(1);
-
-       return c;
-}
-
-int main(int ac, char **av)
-{
-       struct vcpu_config *c, *sel = NULL;
-       int i, ret = 0;
-       pid_t pid;
-
-       for (i = 1; i < ac; ++i) {
-               if (strcmp(av[i], "--core-reg-fixup") == 0)
-                       fixup_core_regs = true;
-               else if (strncmp(av[i], "--config", 8) == 0)
-                       sel = parse_config(av[i]);
-               else if (strcmp(av[i], "--list") == 0)
-                       print_list = true;
-               else if (strcmp(av[i], "--list-filtered") == 0)
-                       print_filtered = true;
-               else if (strcmp(av[i], "--help") == 0 || strcmp(av[1], "-h") == 0)
-                       help(), exit(0);
-               else
-                       help(), exit(1);
-       }
-
-       if (print_list || print_filtered) {
-               /*
-                * We only want to print the register list of a single config.
-                */
-               if (!sel)
-                       help(), exit(1);
-       }
-
-       for (i = 0; i < vcpu_configs_n; ++i) {
-               c = vcpu_configs[i];
-               if (sel && c != sel)
-                       continue;
-
-               pid = fork();
-
-               if (!pid) {
-                       run_test(c);
-                       exit(0);
-               } else {
-                       int wstatus;
-                       pid_t wpid = wait(&wstatus);
-                       TEST_ASSERT(wpid == pid && WIFEXITED(wstatus), "wait: Unexpected return");
-                       if (WEXITSTATUS(wstatus) && WEXITSTATUS(wstatus) != KSFT_SKIP)
-                               ret = KSFT_FAIL;
-               }
-       }
-
-       return ret;
 }
 
 /*
- * The current blessed list was primed with the output of kernel version
+ * The original blessed list was primed with the output of kernel version
  * v4.15 with --core-reg-fixup and then later updated with new registers.
+ * (The --core-reg-fixup option and it's fixup function have been removed
+ * from the test, as it's unlikely to use this type of test on a kernel
+ * older than v5.2.)
  *
  * The blessed list is up to date with kernel version v6.4 (or so we hope)
  */
@@ -1130,14 +698,14 @@ static __u64 pauth_generic_regs[] = {
                .regs_n         = ARRAY_SIZE(pauth_generic_regs),       \
        }
 
-static struct vcpu_config vregs_config = {
+static struct vcpu_reg_list vregs_config = {
        .sublists = {
        BASE_SUBLIST,
        VREGS_SUBLIST,
        {0},
        },
 };
-static struct vcpu_config vregs_pmu_config = {
+static struct vcpu_reg_list vregs_pmu_config = {
        .sublists = {
        BASE_SUBLIST,
        VREGS_SUBLIST,
@@ -1145,14 +713,14 @@ static struct vcpu_config vregs_pmu_config = {
        {0},
        },
 };
-static struct vcpu_config sve_config = {
+static struct vcpu_reg_list sve_config = {
        .sublists = {
        BASE_SUBLIST,
        SVE_SUBLIST,
        {0},
        },
 };
-static struct vcpu_config sve_pmu_config = {
+static struct vcpu_reg_list sve_pmu_config = {
        .sublists = {
        BASE_SUBLIST,
        SVE_SUBLIST,
@@ -1160,7 +728,7 @@ static struct vcpu_config sve_pmu_config = {
        {0},
        },
 };
-static struct vcpu_config pauth_config = {
+static struct vcpu_reg_list pauth_config = {
        .sublists = {
        BASE_SUBLIST,
        VREGS_SUBLIST,
@@ -1168,7 +736,7 @@ static struct vcpu_config pauth_config = {
        {0},
        },
 };
-static struct vcpu_config pauth_pmu_config = {
+static struct vcpu_reg_list pauth_pmu_config = {
        .sublists = {
        BASE_SUBLIST,
        VREGS_SUBLIST,
@@ -1178,7 +746,7 @@ static struct vcpu_config pauth_pmu_config = {
        },
 };
 
-static struct vcpu_config *vcpu_configs[] = {
+struct vcpu_reg_list *vcpu_configs[] = {
        &vregs_config,
        &vregs_pmu_config,
        &sve_config,
@@ -1186,4 +754,4 @@ static struct vcpu_config *vcpu_configs[] = {
        &pauth_config,
        &pauth_pmu_config,
 };
-static int vcpu_configs_n = ARRAY_SIZE(vcpu_configs);
+int vcpu_configs_n = ARRAY_SIZE(vcpu_configs);
diff --git a/tools/testing/selftests/kvm/get-reg-list.c b/tools/testing/selftests/kvm/get-reg-list.c
new file mode 100644 (file)
index 0000000..be7bf52
--- /dev/null
@@ -0,0 +1,401 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Check for KVM_GET_REG_LIST regressions.
+ *
+ * Copyright (C) 2020, Red Hat, Inc.
+ *
+ * When attempting to migrate from a host with an older kernel to a host
+ * with a newer kernel we allow the newer kernel on the destination to
+ * list new registers with get-reg-list. We assume they'll be unused, at
+ * least until the guest reboots, and so they're relatively harmless.
+ * However, if the destination host with the newer kernel is missing
+ * registers which the source host with the older kernel has, then that's
+ * a regression in get-reg-list. This test checks for that regression by
+ * checking the current list against a blessed list. We should never have
+ * missing registers, but if new ones appear then they can probably be
+ * added to the blessed list. A completely new blessed list can be created
+ * by running the test with the --list command line argument.
+ *
+ * The blessed list should be created from the oldest possible kernel.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include "kvm_util.h"
+#include "test_util.h"
+#include "processor.h"
+
+static struct kvm_reg_list *reg_list;
+static __u64 *blessed_reg, blessed_n;
+
+extern struct vcpu_reg_list *vcpu_configs[];
+extern int vcpu_configs_n;
+
+#define for_each_reg(i)                                                                \
+       for ((i) = 0; (i) < reg_list->n; ++(i))
+
+#define for_each_reg_filtered(i)                                               \
+       for_each_reg(i)                                                         \
+               if (!filter_reg(reg_list->reg[i]))
+
+#define for_each_missing_reg(i)                                                        \
+       for ((i) = 0; (i) < blessed_n; ++(i))                                   \
+               if (!find_reg(reg_list->reg, reg_list->n, blessed_reg[i]))      \
+                       if (check_supported_reg(vcpu, blessed_reg[i]))
+
+#define for_each_new_reg(i)                                                    \
+       for_each_reg_filtered(i)                                                \
+               if (!find_reg(blessed_reg, blessed_n, reg_list->reg[i]))
+
+#define for_each_present_blessed_reg(i)                                                \
+       for_each_reg(i)                                                         \
+               if (find_reg(blessed_reg, blessed_n, reg_list->reg[i]))
+
+static const char *config_name(struct vcpu_reg_list *c)
+{
+       struct vcpu_reg_sublist *s;
+       int len = 0;
+
+       if (c->name)
+               return c->name;
+
+       for_each_sublist(c, s)
+               len += strlen(s->name) + 1;
+
+       c->name = malloc(len);
+
+       len = 0;
+       for_each_sublist(c, s) {
+               if (!strcmp(s->name, "base"))
+                       continue;
+               strcat(c->name + len, s->name);
+               len += strlen(s->name) + 1;
+               c->name[len - 1] = '+';
+       }
+       c->name[len - 1] = '\0';
+
+       return c->name;
+}
+
+bool __weak check_supported_reg(struct kvm_vcpu *vcpu, __u64 reg)
+{
+       return true;
+}
+
+bool __weak filter_reg(__u64 reg)
+{
+       return false;
+}
+
+static bool find_reg(__u64 regs[], __u64 nr_regs, __u64 reg)
+{
+       int i;
+
+       for (i = 0; i < nr_regs; ++i)
+               if (reg == regs[i])
+                       return true;
+       return false;
+}
+
+void __weak print_reg(const char *prefix, __u64 id)
+{
+       printf("\t0x%llx,\n", id);
+}
+
+bool __weak check_reject_set(int err)
+{
+       return true;
+}
+
+void __weak finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c)
+{
+}
+
+#ifdef __aarch64__
+static void prepare_vcpu_init(struct vcpu_reg_list *c, struct kvm_vcpu_init *init)
+{
+       struct vcpu_reg_sublist *s;
+
+       for_each_sublist(c, s)
+               if (s->capability)
+                       init->features[s->feature / 32] |= 1 << (s->feature % 32);
+}
+
+static struct kvm_vcpu *vcpu_config_get_vcpu(struct vcpu_reg_list *c, struct kvm_vm *vm)
+{
+       struct kvm_vcpu_init init = { .target = -1, };
+       struct kvm_vcpu *vcpu;
+
+       prepare_vcpu_init(c, &init);
+       vcpu = __vm_vcpu_add(vm, 0);
+       aarch64_vcpu_setup(vcpu, &init);
+
+       return vcpu;
+}
+#else
+static struct kvm_vcpu *vcpu_config_get_vcpu(struct vcpu_reg_list *c, struct kvm_vm *vm)
+{
+       return __vm_vcpu_add(vm, 0);
+}
+#endif
+
+static void check_supported(struct vcpu_reg_list *c)
+{
+       struct vcpu_reg_sublist *s;
+
+       for_each_sublist(c, s) {
+               if (!s->capability)
+                       continue;
+
+               __TEST_REQUIRE(kvm_has_cap(s->capability),
+                              "%s: %s not available, skipping tests\n",
+                              config_name(c), s->name);
+       }
+}
+
+static bool print_list;
+static bool print_filtered;
+
+static void run_test(struct vcpu_reg_list *c)
+{
+       int new_regs = 0, missing_regs = 0, i, n;
+       int failed_get = 0, failed_set = 0, failed_reject = 0;
+       int skipped_set = 0;
+       struct kvm_vcpu *vcpu;
+       struct kvm_vm *vm;
+       struct vcpu_reg_sublist *s;
+
+       check_supported(c);
+
+       vm = vm_create_barebones();
+       vcpu = vcpu_config_get_vcpu(c, vm);
+       finalize_vcpu(vcpu, c);
+
+       reg_list = vcpu_get_reg_list(vcpu);
+
+       if (print_list || print_filtered) {
+               putchar('\n');
+               for_each_reg(i) {
+                       __u64 id = reg_list->reg[i];
+                       if ((print_list && !filter_reg(id)) ||
+                           (print_filtered && filter_reg(id)))
+                               print_reg(config_name(c), id);
+               }
+               putchar('\n');
+               return;
+       }
+
+       for_each_sublist(c, s)
+               blessed_n += s->regs_n;
+       blessed_reg = calloc(blessed_n, sizeof(__u64));
+
+       n = 0;
+       for_each_sublist(c, s) {
+               for (i = 0; i < s->regs_n; ++i)
+                       blessed_reg[n++] = s->regs[i];
+       }
+
+       /*
+        * We only test that we can get the register and then write back the
+        * same value. Some registers may allow other values to be written
+        * back, but others only allow some bits to be changed, and at least
+        * for ID registers set will fail if the value does not exactly match
+        * what was returned by get. If registers that allow other values to
+        * be written need to have the other values tested, then we should
+        * create a new set of tests for those in a new independent test
+        * executable.
+        *
+        * Only do the get/set tests on present, blessed list registers,
+        * since we don't know the capabilities of any new registers.
+        */
+       for_each_present_blessed_reg(i) {
+               uint8_t addr[2048 / 8];
+               struct kvm_one_reg reg = {
+                       .id = reg_list->reg[i],
+                       .addr = (__u64)&addr,
+               };
+               bool reject_reg = false, skip_reg = false;
+               int ret;
+
+               ret = __vcpu_get_reg(vcpu, reg_list->reg[i], &addr);
+               if (ret) {
+                       printf("%s: Failed to get ", config_name(c));
+                       print_reg(config_name(c), reg.id);
+                       putchar('\n');
+                       ++failed_get;
+               }
+
+               for_each_sublist(c, s) {
+                       /* rejects_set registers are rejected for set operation */
+                       if (s->rejects_set && find_reg(s->rejects_set, s->rejects_set_n, reg.id)) {
+                               reject_reg = true;
+                               ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
+                               if (ret != -1 || !check_reject_set(errno)) {
+                                       printf("%s: Failed to reject (ret=%d, errno=%d) ", config_name(c), ret, errno);
+                                       print_reg(config_name(c), reg.id);
+                                       putchar('\n');
+                                       ++failed_reject;
+                               }
+                               break;
+                       }
+
+                       /* skips_set registers are skipped for set operation */
+                       if (s->skips_set && find_reg(s->skips_set, s->skips_set_n, reg.id)) {
+                               skip_reg = true;
+                               ++skipped_set;
+                               break;
+                       }
+               }
+
+               if (!reject_reg && !skip_reg) {
+                       ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
+                       if (ret) {
+                               printf("%s: Failed to set ", config_name(c));
+                               print_reg(config_name(c), reg.id);
+                               putchar('\n');
+                               ++failed_set;
+                       }
+               }
+       }
+
+       for_each_new_reg(i)
+               ++new_regs;
+
+       for_each_missing_reg(i)
+               ++missing_regs;
+
+       if (new_regs || missing_regs) {
+               n = 0;
+               for_each_reg_filtered(i)
+                       ++n;
+
+               printf("%s: Number blessed registers: %5lld\n", config_name(c), blessed_n);
+               printf("%s: Number registers:         %5lld (includes %lld filtered registers)\n",
+                      config_name(c), reg_list->n, reg_list->n - n);
+       }
+
+       if (new_regs) {
+               printf("\n%s: There are %d new registers.\n"
+                      "Consider adding them to the blessed reg "
+                      "list with the following lines:\n\n", config_name(c), new_regs);
+               for_each_new_reg(i)
+                       print_reg(config_name(c), reg_list->reg[i]);
+               putchar('\n');
+       }
+
+       if (missing_regs) {
+               printf("\n%s: There are %d missing registers.\n"
+                      "The following lines are missing registers:\n\n", config_name(c), missing_regs);
+               for_each_missing_reg(i)
+                       print_reg(config_name(c), blessed_reg[i]);
+               putchar('\n');
+       }
+
+       TEST_ASSERT(!missing_regs && !failed_get && !failed_set && !failed_reject,
+                   "%s: There are %d missing registers; %d registers failed get; "
+                   "%d registers failed set; %d registers failed reject; %d registers skipped set",
+                   config_name(c), missing_regs, failed_get, failed_set, failed_reject, skipped_set);
+
+       pr_info("%s: PASS\n", config_name(c));
+       blessed_n = 0;
+       free(blessed_reg);
+       free(reg_list);
+       kvm_vm_free(vm);
+}
+
+static void help(void)
+{
+       struct vcpu_reg_list *c;
+       int i;
+
+       printf(
+       "\n"
+       "usage: get-reg-list [--config=<selection>] [--list] [--list-filtered]\n\n"
+       " --config=<selection>        Used to select a specific vcpu configuration for the test/listing\n"
+       "                             '<selection>' may be\n");
+
+       for (i = 0; i < vcpu_configs_n; ++i) {
+               c = vcpu_configs[i];
+               printf(
+       "                               '%s'\n", config_name(c));
+       }
+
+       printf(
+       "\n"
+       " --list                      Print the register list rather than test it (requires --config)\n"
+       " --list-filtered             Print registers that would normally be filtered out (requires --config)\n"
+       "\n"
+       );
+}
+
+static struct vcpu_reg_list *parse_config(const char *config)
+{
+       struct vcpu_reg_list *c = NULL;
+       int i;
+
+       if (config[8] != '=')
+               help(), exit(1);
+
+       for (i = 0; i < vcpu_configs_n; ++i) {
+               c = vcpu_configs[i];
+               if (strcmp(config_name(c), &config[9]) == 0)
+                       break;
+       }
+
+       if (i == vcpu_configs_n)
+               help(), exit(1);
+
+       return c;
+}
+
+int main(int ac, char **av)
+{
+       struct vcpu_reg_list *c, *sel = NULL;
+       int i, ret = 0;
+       pid_t pid;
+
+       for (i = 1; i < ac; ++i) {
+               if (strncmp(av[i], "--config", 8) == 0)
+                       sel = parse_config(av[i]);
+               else if (strcmp(av[i], "--list") == 0)
+                       print_list = true;
+               else if (strcmp(av[i], "--list-filtered") == 0)
+                       print_filtered = true;
+               else if (strcmp(av[i], "--help") == 0 || strcmp(av[1], "-h") == 0)
+                       help(), exit(0);
+               else
+                       help(), exit(1);
+       }
+
+       if (print_list || print_filtered) {
+               /*
+                * We only want to print the register list of a single config.
+                */
+               if (!sel)
+                       help(), exit(1);
+       }
+
+       for (i = 0; i < vcpu_configs_n; ++i) {
+               c = vcpu_configs[i];
+               if (sel && c != sel)
+                       continue;
+
+               pid = fork();
+
+               if (!pid) {
+                       run_test(c);
+                       exit(0);
+               } else {
+                       int wstatus;
+                       pid_t wpid = wait(&wstatus);
+                       TEST_ASSERT(wpid == pid && WIFEXITED(wstatus), "wait: Unexpected return");
+                       if (WEXITSTATUS(wstatus) && WEXITSTATUS(wstatus) != KSFT_SKIP)
+                               ret = KSFT_FAIL;
+               }
+       }
+
+       return ret;
+}
index eb1ff59..a18db6a 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/kernel.h>
 #include <linux/kvm.h>
 #include "linux/rbtree.h"
+#include <linux/types.h>
 
 #include <asm/atomic.h>
 
@@ -124,6 +125,26 @@ struct kvm_vm {
        uint32_t memslots[NR_MEM_REGIONS];
 };
 
+struct vcpu_reg_sublist {
+       const char *name;
+       long capability;
+       int feature;
+       bool finalize;
+       __u64 *regs;
+       __u64 regs_n;
+       __u64 *rejects_set;
+       __u64 rejects_set_n;
+       __u64 *skips_set;
+       __u64 skips_set_n;
+};
+
+struct vcpu_reg_list {
+       char *name;
+       struct vcpu_reg_sublist sublists[];
+};
+
+#define for_each_sublist(c, s)         \
+       for ((s) = &(c)->sublists[0]; (s)->regs; ++(s))
 
 #define kvm_for_each_vcpu(vm, i, vcpu)                 \
        for ((i) = 0; (i) <= (vm)->last_vcpu_id; (i)++) \
index d00d213..5b62a3d 100644 (file)
@@ -38,6 +38,9 @@ static inline uint64_t __kvm_reg_id(uint64_t type, uint64_t idx,
                                             KVM_REG_RISCV_TIMER_REG(name), \
                                             KVM_REG_SIZE_U64)
 
+#define RISCV_ISA_EXT_REG(idx) __kvm_reg_id(KVM_REG_RISCV_ISA_EXT, \
+                                            idx, KVM_REG_SIZE_ULONG)
+
 /* L3 index Bit[47:39] */
 #define PGTBL_L3_INDEX_MASK                    0x0000FF8000000000ULL
 #define PGTBL_L3_INDEX_SHIFT                   39
index 7a5907d..7e614ad 100644 (file)
@@ -188,4 +188,6 @@ static inline uint32_t atoi_non_negative(const char *name, const char *num_str)
 int guest_vsnprintf(char *buf, int n, const char *fmt, va_list args);
 int guest_snprintf(char *buf, int n, const char *fmt, ...);
 
+char *strdup_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2), nonnull(1)));
+
 #endif /* SELFTEST_KVM_TEST_UTIL_H */
index b772193..3e36019 100644 (file)
@@ -5,6 +5,9 @@
  * Copyright (C) 2020, Google LLC.
  */
 
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <stdarg.h>
 #include <assert.h>
 #include <ctype.h>
 #include <limits.h>
@@ -377,3 +380,15 @@ int atoi_paranoid(const char *num_str)
 
        return num;
 }
+
+char *strdup_printf(const char *fmt, ...)
+{
+       va_list ap;
+       char *str;
+
+       va_start(ap, fmt);
+       vasprintf(&str, fmt, ap);
+       va_end(ap);
+
+       return str;
+}
diff --git a/tools/testing/selftests/kvm/riscv/get-reg-list.c b/tools/testing/selftests/kvm/riscv/get-reg-list.c
new file mode 100644 (file)
index 0000000..d8ecacd
--- /dev/null
@@ -0,0 +1,872 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Check for KVM_GET_REG_LIST regressions.
+ *
+ * Copyright (c) 2023 Intel Corporation
+ *
+ */
+#include <stdio.h>
+#include "kvm_util.h"
+#include "test_util.h"
+#include "processor.h"
+
+#define REG_MASK (KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK)
+
+bool filter_reg(__u64 reg)
+{
+       /*
+        * Some ISA extensions are optional and not present on all host,
+        * but they can't be disabled through ISA_EXT registers when present.
+        * So, to make life easy, just filtering out these kind of registers.
+        */
+       switch (reg & ~REG_MASK) {
+       case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSTC:
+       case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVINVAL:
+       case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
+       case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBB:
+       case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSAIA:
+       case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBA:
+       case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBS:
+       case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICNTR:
+       case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICSR:
+       case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIFENCEI:
+       case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIHPM:
+               return true;
+       default:
+               break;
+       }
+
+       return false;
+}
+
+bool check_reject_set(int err)
+{
+       return err == EINVAL;
+}
+
+static inline bool vcpu_has_ext(struct kvm_vcpu *vcpu, int ext)
+{
+       int ret;
+       unsigned long value;
+
+       ret = __vcpu_get_reg(vcpu, RISCV_ISA_EXT_REG(ext), &value);
+       if (ret) {
+               printf("Failed to get ext %d", ext);
+               return false;
+       }
+
+       return !!value;
+}
+
+void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c)
+{
+       struct vcpu_reg_sublist *s;
+
+       /*
+        * Disable all extensions which were enabled by default
+        * if they were available in the risc-v host.
+        */
+       for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++)
+               __vcpu_set_reg(vcpu, RISCV_ISA_EXT_REG(i), 0);
+
+       for_each_sublist(c, s) {
+               if (!s->feature)
+                       continue;
+
+               /* Try to enable the desired extension */
+               __vcpu_set_reg(vcpu, RISCV_ISA_EXT_REG(s->feature), 1);
+
+               /* Double check whether the desired extension was enabled */
+               __TEST_REQUIRE(vcpu_has_ext(vcpu, s->feature),
+                              "%s not available, skipping tests\n", s->name);
+       }
+}
+
+static const char *config_id_to_str(__u64 id)
+{
+       /* reg_off is the offset into struct kvm_riscv_config */
+       __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_CONFIG);
+
+       switch (reg_off) {
+       case KVM_REG_RISCV_CONFIG_REG(isa):
+               return "KVM_REG_RISCV_CONFIG_REG(isa)";
+       case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
+               return "KVM_REG_RISCV_CONFIG_REG(zicbom_block_size)";
+       case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
+               return "KVM_REG_RISCV_CONFIG_REG(zicboz_block_size)";
+       case KVM_REG_RISCV_CONFIG_REG(mvendorid):
+               return "KVM_REG_RISCV_CONFIG_REG(mvendorid)";
+       case KVM_REG_RISCV_CONFIG_REG(marchid):
+               return "KVM_REG_RISCV_CONFIG_REG(marchid)";
+       case KVM_REG_RISCV_CONFIG_REG(mimpid):
+               return "KVM_REG_RISCV_CONFIG_REG(mimpid)";
+       case KVM_REG_RISCV_CONFIG_REG(satp_mode):
+               return "KVM_REG_RISCV_CONFIG_REG(satp_mode)";
+       }
+
+       /*
+        * Config regs would grow regularly with new pseudo reg added, so
+        * just show raw id to indicate a new pseudo config reg.
+        */
+       return strdup_printf("KVM_REG_RISCV_CONFIG_REG(%lld) /* UNKNOWN */", reg_off);
+}
+
+static const char *core_id_to_str(const char *prefix, __u64 id)
+{
+       /* reg_off is the offset into struct kvm_riscv_core */
+       __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_CORE);
+
+       switch (reg_off) {
+       case KVM_REG_RISCV_CORE_REG(regs.pc):
+               return "KVM_REG_RISCV_CORE_REG(regs.pc)";
+       case KVM_REG_RISCV_CORE_REG(regs.ra):
+               return "KVM_REG_RISCV_CORE_REG(regs.ra)";
+       case KVM_REG_RISCV_CORE_REG(regs.sp):
+               return "KVM_REG_RISCV_CORE_REG(regs.sp)";
+       case KVM_REG_RISCV_CORE_REG(regs.gp):
+               return "KVM_REG_RISCV_CORE_REG(regs.gp)";
+       case KVM_REG_RISCV_CORE_REG(regs.tp):
+               return "KVM_REG_RISCV_CORE_REG(regs.tp)";
+       case KVM_REG_RISCV_CORE_REG(regs.t0) ... KVM_REG_RISCV_CORE_REG(regs.t2):
+               return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.t%lld)",
+                          reg_off - KVM_REG_RISCV_CORE_REG(regs.t0));
+       case KVM_REG_RISCV_CORE_REG(regs.s0) ... KVM_REG_RISCV_CORE_REG(regs.s1):
+               return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.s%lld)",
+                          reg_off - KVM_REG_RISCV_CORE_REG(regs.s0));
+       case KVM_REG_RISCV_CORE_REG(regs.a0) ... KVM_REG_RISCV_CORE_REG(regs.a7):
+               return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.a%lld)",
+                          reg_off - KVM_REG_RISCV_CORE_REG(regs.a0));
+       case KVM_REG_RISCV_CORE_REG(regs.s2) ... KVM_REG_RISCV_CORE_REG(regs.s11):
+               return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.s%lld)",
+                          reg_off - KVM_REG_RISCV_CORE_REG(regs.s2) + 2);
+       case KVM_REG_RISCV_CORE_REG(regs.t3) ... KVM_REG_RISCV_CORE_REG(regs.t6):
+               return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.t%lld)",
+                          reg_off - KVM_REG_RISCV_CORE_REG(regs.t3) + 3);
+       case KVM_REG_RISCV_CORE_REG(mode):
+               return "KVM_REG_RISCV_CORE_REG(mode)";
+       }
+
+       TEST_FAIL("%s: Unknown core reg id: 0x%llx", prefix, id);
+       return NULL;
+}
+
+#define RISCV_CSR_GENERAL(csr) \
+       "KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(" #csr ")"
+#define RISCV_CSR_AIA(csr) \
+       "KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_REG(" #csr ")"
+
+static const char *general_csr_id_to_str(__u64 reg_off)
+{
+       /* reg_off is the offset into struct kvm_riscv_csr */
+       switch (reg_off) {
+       case KVM_REG_RISCV_CSR_REG(sstatus):
+               return RISCV_CSR_GENERAL(sstatus);
+       case KVM_REG_RISCV_CSR_REG(sie):
+               return RISCV_CSR_GENERAL(sie);
+       case KVM_REG_RISCV_CSR_REG(stvec):
+               return RISCV_CSR_GENERAL(stvec);
+       case KVM_REG_RISCV_CSR_REG(sscratch):
+               return RISCV_CSR_GENERAL(sscratch);
+       case KVM_REG_RISCV_CSR_REG(sepc):
+               return RISCV_CSR_GENERAL(sepc);
+       case KVM_REG_RISCV_CSR_REG(scause):
+               return RISCV_CSR_GENERAL(scause);
+       case KVM_REG_RISCV_CSR_REG(stval):
+               return RISCV_CSR_GENERAL(stval);
+       case KVM_REG_RISCV_CSR_REG(sip):
+               return RISCV_CSR_GENERAL(sip);
+       case KVM_REG_RISCV_CSR_REG(satp):
+               return RISCV_CSR_GENERAL(satp);
+       case KVM_REG_RISCV_CSR_REG(scounteren):
+               return RISCV_CSR_GENERAL(scounteren);
+       }
+
+       TEST_FAIL("Unknown general csr reg: 0x%llx", reg_off);
+       return NULL;
+}
+
+static const char *aia_csr_id_to_str(__u64 reg_off)
+{
+       /* reg_off is the offset into struct kvm_riscv_aia_csr */
+       switch (reg_off) {
+       case KVM_REG_RISCV_CSR_AIA_REG(siselect):
+               return RISCV_CSR_AIA(siselect);
+       case KVM_REG_RISCV_CSR_AIA_REG(iprio1):
+               return RISCV_CSR_AIA(iprio1);
+       case KVM_REG_RISCV_CSR_AIA_REG(iprio2):
+               return RISCV_CSR_AIA(iprio2);
+       case KVM_REG_RISCV_CSR_AIA_REG(sieh):
+               return RISCV_CSR_AIA(sieh);
+       case KVM_REG_RISCV_CSR_AIA_REG(siph):
+               return RISCV_CSR_AIA(siph);
+       case KVM_REG_RISCV_CSR_AIA_REG(iprio1h):
+               return RISCV_CSR_AIA(iprio1h);
+       case KVM_REG_RISCV_CSR_AIA_REG(iprio2h):
+               return RISCV_CSR_AIA(iprio2h);
+       }
+
+       TEST_FAIL("Unknown aia csr reg: 0x%llx", reg_off);
+       return NULL;
+}
+
+static const char *csr_id_to_str(const char *prefix, __u64 id)
+{
+       __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_CSR);
+       __u64 reg_subtype = reg_off & KVM_REG_RISCV_SUBTYPE_MASK;
+
+       reg_off &= ~KVM_REG_RISCV_SUBTYPE_MASK;
+
+       switch (reg_subtype) {
+       case KVM_REG_RISCV_CSR_GENERAL:
+               return general_csr_id_to_str(reg_off);
+       case KVM_REG_RISCV_CSR_AIA:
+               return aia_csr_id_to_str(reg_off);
+       }
+
+       TEST_FAIL("%s: Unknown csr subtype: 0x%llx", prefix, reg_subtype);
+       return NULL;
+}
+
+static const char *timer_id_to_str(const char *prefix, __u64 id)
+{
+       /* reg_off is the offset into struct kvm_riscv_timer */
+       __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_TIMER);
+
+       switch (reg_off) {
+       case KVM_REG_RISCV_TIMER_REG(frequency):
+               return "KVM_REG_RISCV_TIMER_REG(frequency)";
+       case KVM_REG_RISCV_TIMER_REG(time):
+               return "KVM_REG_RISCV_TIMER_REG(time)";
+       case KVM_REG_RISCV_TIMER_REG(compare):
+               return "KVM_REG_RISCV_TIMER_REG(compare)";
+       case KVM_REG_RISCV_TIMER_REG(state):
+               return "KVM_REG_RISCV_TIMER_REG(state)";
+       }
+
+       TEST_FAIL("%s: Unknown timer reg id: 0x%llx", prefix, id);
+       return NULL;
+}
+
+static const char *fp_f_id_to_str(const char *prefix, __u64 id)
+{
+       /* reg_off is the offset into struct __riscv_f_ext_state */
+       __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_FP_F);
+
+       switch (reg_off) {
+       case KVM_REG_RISCV_FP_F_REG(f[0]) ...
+            KVM_REG_RISCV_FP_F_REG(f[31]):
+               return strdup_printf("KVM_REG_RISCV_FP_F_REG(f[%lld])", reg_off);
+       case KVM_REG_RISCV_FP_F_REG(fcsr):
+               return "KVM_REG_RISCV_FP_F_REG(fcsr)";
+       }
+
+       TEST_FAIL("%s: Unknown fp_f reg id: 0x%llx", prefix, id);
+       return NULL;
+}
+
+static const char *fp_d_id_to_str(const char *prefix, __u64 id)
+{
+       /* reg_off is the offset into struct __riscv_d_ext_state */
+       __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_FP_D);
+
+       switch (reg_off) {
+       case KVM_REG_RISCV_FP_D_REG(f[0]) ...
+            KVM_REG_RISCV_FP_D_REG(f[31]):
+               return strdup_printf("KVM_REG_RISCV_FP_D_REG(f[%lld])", reg_off);
+       case KVM_REG_RISCV_FP_D_REG(fcsr):
+               return "KVM_REG_RISCV_FP_D_REG(fcsr)";
+       }
+
+       TEST_FAIL("%s: Unknown fp_d reg id: 0x%llx", prefix, id);
+       return NULL;
+}
+
+static const char *isa_ext_id_to_str(__u64 id)
+{
+       /* reg_off is the offset into unsigned long kvm_isa_ext_arr[] */
+       __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_ISA_EXT);
+
+       static const char * const kvm_isa_ext_reg_name[] = {
+               "KVM_RISCV_ISA_EXT_A",
+               "KVM_RISCV_ISA_EXT_C",
+               "KVM_RISCV_ISA_EXT_D",
+               "KVM_RISCV_ISA_EXT_F",
+               "KVM_RISCV_ISA_EXT_H",
+               "KVM_RISCV_ISA_EXT_I",
+               "KVM_RISCV_ISA_EXT_M",
+               "KVM_RISCV_ISA_EXT_SVPBMT",
+               "KVM_RISCV_ISA_EXT_SSTC",
+               "KVM_RISCV_ISA_EXT_SVINVAL",
+               "KVM_RISCV_ISA_EXT_ZIHINTPAUSE",
+               "KVM_RISCV_ISA_EXT_ZICBOM",
+               "KVM_RISCV_ISA_EXT_ZICBOZ",
+               "KVM_RISCV_ISA_EXT_ZBB",
+               "KVM_RISCV_ISA_EXT_SSAIA",
+               "KVM_RISCV_ISA_EXT_V",
+               "KVM_RISCV_ISA_EXT_SVNAPOT",
+               "KVM_RISCV_ISA_EXT_ZBA",
+               "KVM_RISCV_ISA_EXT_ZBS",
+               "KVM_RISCV_ISA_EXT_ZICNTR",
+               "KVM_RISCV_ISA_EXT_ZICSR",
+               "KVM_RISCV_ISA_EXT_ZIFENCEI",
+               "KVM_RISCV_ISA_EXT_ZIHPM",
+       };
+
+       if (reg_off >= ARRAY_SIZE(kvm_isa_ext_reg_name)) {
+               /*
+                * isa_ext regs would grow regularly with new isa extension added, so
+                * just show "reg" to indicate a new extension.
+                */
+               return strdup_printf("%lld /* UNKNOWN */", reg_off);
+       }
+
+       return kvm_isa_ext_reg_name[reg_off];
+}
+
+static const char *sbi_ext_single_id_to_str(__u64 reg_off)
+{
+       /* reg_off is KVM_RISCV_SBI_EXT_ID */
+       static const char * const kvm_sbi_ext_reg_name[] = {
+               "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_V01",
+               "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_TIME",
+               "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_IPI",
+               "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_RFENCE",
+               "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_SRST",
+               "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_HSM",
+               "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_PMU",
+               "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_EXPERIMENTAL",
+               "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_VENDOR",
+       };
+
+       if (reg_off >= ARRAY_SIZE(kvm_sbi_ext_reg_name)) {
+               /*
+                * sbi_ext regs would grow regularly with new sbi extension added, so
+                * just show "reg" to indicate a new extension.
+                */
+               return strdup_printf("KVM_REG_RISCV_SBI_SINGLE | %lld /* UNKNOWN */", reg_off);
+       }
+
+       return kvm_sbi_ext_reg_name[reg_off];
+}
+
+static const char *sbi_ext_multi_id_to_str(__u64 reg_subtype, __u64 reg_off)
+{
+       if (reg_off > KVM_REG_RISCV_SBI_MULTI_REG_LAST) {
+               /*
+                * sbi_ext regs would grow regularly with new sbi extension added, so
+                * just show "reg" to indicate a new extension.
+                */
+               return strdup_printf("%lld /* UNKNOWN */", reg_off);
+       }
+
+       switch (reg_subtype) {
+       case KVM_REG_RISCV_SBI_MULTI_EN:
+               return strdup_printf("KVM_REG_RISCV_SBI_MULTI_EN | %lld", reg_off);
+       case KVM_REG_RISCV_SBI_MULTI_DIS:
+               return strdup_printf("KVM_REG_RISCV_SBI_MULTI_DIS | %lld", reg_off);
+       }
+
+       return NULL;
+}
+
+static const char *sbi_ext_id_to_str(const char *prefix, __u64 id)
+{
+       __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_SBI_EXT);
+       __u64 reg_subtype = reg_off & KVM_REG_RISCV_SUBTYPE_MASK;
+
+       reg_off &= ~KVM_REG_RISCV_SUBTYPE_MASK;
+
+       switch (reg_subtype) {
+       case KVM_REG_RISCV_SBI_SINGLE:
+               return sbi_ext_single_id_to_str(reg_off);
+       case KVM_REG_RISCV_SBI_MULTI_EN:
+       case KVM_REG_RISCV_SBI_MULTI_DIS:
+               return sbi_ext_multi_id_to_str(reg_subtype, reg_off);
+       }
+
+       TEST_FAIL("%s: Unknown sbi ext subtype: 0x%llx", prefix, reg_subtype);
+       return NULL;
+}
+
+void print_reg(const char *prefix, __u64 id)
+{
+       const char *reg_size = NULL;
+
+       TEST_ASSERT((id & KVM_REG_ARCH_MASK) == KVM_REG_RISCV,
+                   "%s: KVM_REG_RISCV missing in reg id: 0x%llx", prefix, id);
+
+       switch (id & KVM_REG_SIZE_MASK) {
+       case KVM_REG_SIZE_U32:
+               reg_size = "KVM_REG_SIZE_U32";
+               break;
+       case KVM_REG_SIZE_U64:
+               reg_size = "KVM_REG_SIZE_U64";
+               break;
+       case KVM_REG_SIZE_U128:
+               reg_size = "KVM_REG_SIZE_U128";
+               break;
+       default:
+               TEST_FAIL("%s: Unexpected reg size: 0x%llx in reg id: 0x%llx",
+                         prefix, (id & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT, id);
+       }
+
+       switch (id & KVM_REG_RISCV_TYPE_MASK) {
+       case KVM_REG_RISCV_CONFIG:
+               printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_CONFIG | %s,\n",
+                               reg_size, config_id_to_str(id));
+               break;
+       case KVM_REG_RISCV_CORE:
+               printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_CORE | %s,\n",
+                               reg_size, core_id_to_str(prefix, id));
+               break;
+       case KVM_REG_RISCV_CSR:
+               printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_CSR | %s,\n",
+                               reg_size, csr_id_to_str(prefix, id));
+               break;
+       case KVM_REG_RISCV_TIMER:
+               printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_TIMER | %s,\n",
+                               reg_size, timer_id_to_str(prefix, id));
+               break;
+       case KVM_REG_RISCV_FP_F:
+               printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_FP_F | %s,\n",
+                               reg_size, fp_f_id_to_str(prefix, id));
+               break;
+       case KVM_REG_RISCV_FP_D:
+               printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_FP_D | %s,\n",
+                               reg_size, fp_d_id_to_str(prefix, id));
+               break;
+       case KVM_REG_RISCV_ISA_EXT:
+               printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_ISA_EXT | %s,\n",
+                               reg_size, isa_ext_id_to_str(id));
+               break;
+       case KVM_REG_RISCV_SBI_EXT:
+               printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_SBI_EXT | %s,\n",
+                               reg_size, sbi_ext_id_to_str(prefix, id));
+               break;
+       default:
+               TEST_FAIL("%s: Unexpected reg type: 0x%llx in reg id: 0x%llx", prefix,
+                               (id & KVM_REG_RISCV_TYPE_MASK) >> KVM_REG_RISCV_TYPE_SHIFT, id);
+       }
+}
+
+/*
+ * The current blessed list was primed with the output of kernel version
+ * v6.5-rc3 and then later updated with new registers.
+ */
+static __u64 base_regs[] = {
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(isa),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(mvendorid),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(marchid),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(mimpid),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(satp_mode),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.pc),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.ra),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.sp),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.gp),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.tp),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t0),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t1),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t2),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s0),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s1),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a0),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a1),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a2),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a3),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a4),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a5),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a6),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a7),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s2),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s3),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s4),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s5),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s6),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s7),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s8),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s9),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s10),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s11),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t3),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t4),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t5),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t6),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(mode),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sstatus),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sie),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(stvec),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sscratch),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sepc),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(scause),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(stval),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sip),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(satp),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(scounteren),
+       KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(frequency),
+       KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(time),
+       KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(compare),
+       KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(state),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_A,
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_C,
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_I,
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_M,
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_V01,
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_TIME,
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_IPI,
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_RFENCE,
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_SRST,
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_HSM,
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_PMU,
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_EXPERIMENTAL,
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_VENDOR,
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_MULTI_EN | 0,
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_MULTI_DIS | 0,
+};
+
+/*
+ * The skips_set list registers that should skip set test.
+ *  - KVM_REG_RISCV_TIMER_REG(state): set would fail if it was not initialized properly.
+ */
+static __u64 base_skips_set[] = {
+       KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(state),
+};
+
+static __u64 h_regs[] = {
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_H,
+};
+
+static __u64 zicbom_regs[] = {
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicbom_block_size),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOM,
+};
+
+static __u64 zicboz_regs[] = {
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicboz_block_size),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOZ,
+};
+
+static __u64 svpbmt_regs[] = {
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVPBMT,
+};
+
+static __u64 sstc_regs[] = {
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSTC,
+};
+
+static __u64 svinval_regs[] = {
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVINVAL,
+};
+
+static __u64 zihintpause_regs[] = {
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIHINTPAUSE,
+};
+
+static __u64 zba_regs[] = {
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBA,
+};
+
+static __u64 zbb_regs[] = {
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBB,
+};
+
+static __u64 zbs_regs[] = {
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBS,
+};
+
+static __u64 zicntr_regs[] = {
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICNTR,
+};
+
+static __u64 zicsr_regs[] = {
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICSR,
+};
+
+static __u64 zifencei_regs[] = {
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIFENCEI,
+};
+
+static __u64 zihpm_regs[] = {
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIHPM,
+};
+
+static __u64 aia_regs[] = {
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siselect),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(sieh),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siph),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1h),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2h),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSAIA,
+};
+
+static __u64 fp_f_regs[] = {
+       KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[0]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[1]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[2]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[3]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[4]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[5]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[6]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[7]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[8]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[9]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[10]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[11]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[12]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[13]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[14]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[15]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[16]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[17]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[18]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[19]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[20]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[21]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[22]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[23]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[24]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[25]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[26]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[27]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[28]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[29]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[30]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[31]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(fcsr),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_F,
+};
+
+static __u64 fp_d_regs[] = {
+       KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[0]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[1]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[2]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[3]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[4]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[5]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[6]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[7]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[8]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[9]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[10]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[11]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[12]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[13]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[14]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[15]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[16]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[17]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[18]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[19]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[20]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[21]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[22]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[23]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[24]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[25]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[26]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[27]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[28]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[29]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[30]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[31]),
+       KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(fcsr),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_D,
+};
+
+#define BASE_SUBLIST \
+       {"base", .regs = base_regs, .regs_n = ARRAY_SIZE(base_regs), \
+        .skips_set = base_skips_set, .skips_set_n = ARRAY_SIZE(base_skips_set),}
+#define H_REGS_SUBLIST \
+       {"h", .feature = KVM_RISCV_ISA_EXT_H, .regs = h_regs, .regs_n = ARRAY_SIZE(h_regs),}
+#define ZICBOM_REGS_SUBLIST \
+       {"zicbom", .feature = KVM_RISCV_ISA_EXT_ZICBOM, .regs = zicbom_regs, .regs_n = ARRAY_SIZE(zicbom_regs),}
+#define ZICBOZ_REGS_SUBLIST \
+       {"zicboz", .feature = KVM_RISCV_ISA_EXT_ZICBOZ, .regs = zicboz_regs, .regs_n = ARRAY_SIZE(zicboz_regs),}
+#define SVPBMT_REGS_SUBLIST \
+       {"svpbmt", .feature = KVM_RISCV_ISA_EXT_SVPBMT, .regs = svpbmt_regs, .regs_n = ARRAY_SIZE(svpbmt_regs),}
+#define SSTC_REGS_SUBLIST \
+       {"sstc", .feature = KVM_RISCV_ISA_EXT_SSTC, .regs = sstc_regs, .regs_n = ARRAY_SIZE(sstc_regs),}
+#define SVINVAL_REGS_SUBLIST \
+       {"svinval", .feature = KVM_RISCV_ISA_EXT_SVINVAL, .regs = svinval_regs, .regs_n = ARRAY_SIZE(svinval_regs),}
+#define ZIHINTPAUSE_REGS_SUBLIST \
+       {"zihintpause", .feature = KVM_RISCV_ISA_EXT_ZIHINTPAUSE, .regs = zihintpause_regs, .regs_n = ARRAY_SIZE(zihintpause_regs),}
+#define ZBA_REGS_SUBLIST \
+       {"zba", .feature = KVM_RISCV_ISA_EXT_ZBA, .regs = zba_regs, .regs_n = ARRAY_SIZE(zba_regs),}
+#define ZBB_REGS_SUBLIST \
+       {"zbb", .feature = KVM_RISCV_ISA_EXT_ZBB, .regs = zbb_regs, .regs_n = ARRAY_SIZE(zbb_regs),}
+#define ZBS_REGS_SUBLIST \
+       {"zbs", .feature = KVM_RISCV_ISA_EXT_ZBS, .regs = zbs_regs, .regs_n = ARRAY_SIZE(zbs_regs),}
+#define ZICNTR_REGS_SUBLIST \
+       {"zicntr", .feature = KVM_RISCV_ISA_EXT_ZICNTR, .regs = zicntr_regs, .regs_n = ARRAY_SIZE(zicntr_regs),}
+#define ZICSR_REGS_SUBLIST \
+       {"zicsr", .feature = KVM_RISCV_ISA_EXT_ZICSR, .regs = zicsr_regs, .regs_n = ARRAY_SIZE(zicsr_regs),}
+#define ZIFENCEI_REGS_SUBLIST \
+       {"zifencei", .feature = KVM_RISCV_ISA_EXT_ZIFENCEI, .regs = zifencei_regs, .regs_n = ARRAY_SIZE(zifencei_regs),}
+#define ZIHPM_REGS_SUBLIST \
+       {"zihpm", .feature = KVM_RISCV_ISA_EXT_ZIHPM, .regs = zihpm_regs, .regs_n = ARRAY_SIZE(zihpm_regs),}
+#define AIA_REGS_SUBLIST \
+       {"aia", .feature = KVM_RISCV_ISA_EXT_SSAIA, .regs = aia_regs, .regs_n = ARRAY_SIZE(aia_regs),}
+#define FP_F_REGS_SUBLIST \
+       {"fp_f", .feature = KVM_RISCV_ISA_EXT_F, .regs = fp_f_regs, \
+               .regs_n = ARRAY_SIZE(fp_f_regs),}
+#define FP_D_REGS_SUBLIST \
+       {"fp_d", .feature = KVM_RISCV_ISA_EXT_D, .regs = fp_d_regs, \
+               .regs_n = ARRAY_SIZE(fp_d_regs),}
+
+static struct vcpu_reg_list h_config = {
+       .sublists = {
+       BASE_SUBLIST,
+       H_REGS_SUBLIST,
+       {0},
+       },
+};
+
+static struct vcpu_reg_list zicbom_config = {
+       .sublists = {
+       BASE_SUBLIST,
+       ZICBOM_REGS_SUBLIST,
+       {0},
+       },
+};
+
+static struct vcpu_reg_list zicboz_config = {
+       .sublists = {
+       BASE_SUBLIST,
+       ZICBOZ_REGS_SUBLIST,
+       {0},
+       },
+};
+
+static struct vcpu_reg_list svpbmt_config = {
+       .sublists = {
+       BASE_SUBLIST,
+       SVPBMT_REGS_SUBLIST,
+       {0},
+       },
+};
+
+static struct vcpu_reg_list sstc_config = {
+       .sublists = {
+       BASE_SUBLIST,
+       SSTC_REGS_SUBLIST,
+       {0},
+       },
+};
+
+static struct vcpu_reg_list svinval_config = {
+       .sublists = {
+       BASE_SUBLIST,
+       SVINVAL_REGS_SUBLIST,
+       {0},
+       },
+};
+
+static struct vcpu_reg_list zihintpause_config = {
+       .sublists = {
+       BASE_SUBLIST,
+       ZIHINTPAUSE_REGS_SUBLIST,
+       {0},
+       },
+};
+
+static struct vcpu_reg_list zba_config = {
+       .sublists = {
+       BASE_SUBLIST,
+       ZBA_REGS_SUBLIST,
+       {0},
+       },
+};
+
+static struct vcpu_reg_list zbb_config = {
+       .sublists = {
+       BASE_SUBLIST,
+       ZBB_REGS_SUBLIST,
+       {0},
+       },
+};
+
+static struct vcpu_reg_list zbs_config = {
+       .sublists = {
+       BASE_SUBLIST,
+       ZBS_REGS_SUBLIST,
+       {0},
+       },
+};
+
+static struct vcpu_reg_list zicntr_config = {
+       .sublists = {
+       BASE_SUBLIST,
+       ZICNTR_REGS_SUBLIST,
+       {0},
+       },
+};
+
+static struct vcpu_reg_list zicsr_config = {
+       .sublists = {
+       BASE_SUBLIST,
+       ZICSR_REGS_SUBLIST,
+       {0},
+       },
+};
+
+static struct vcpu_reg_list zifencei_config = {
+       .sublists = {
+       BASE_SUBLIST,
+       ZIFENCEI_REGS_SUBLIST,
+       {0},
+       },
+};
+
+static struct vcpu_reg_list zihpm_config = {
+       .sublists = {
+       BASE_SUBLIST,
+       ZIHPM_REGS_SUBLIST,
+       {0},
+       },
+};
+
+static struct vcpu_reg_list aia_config = {
+       .sublists = {
+       BASE_SUBLIST,
+       AIA_REGS_SUBLIST,
+       {0},
+       },
+};
+
+static struct vcpu_reg_list fp_f_config = {
+       .sublists = {
+       BASE_SUBLIST,
+       FP_F_REGS_SUBLIST,
+       {0},
+       },
+};
+
+static struct vcpu_reg_list fp_d_config = {
+       .sublists = {
+       BASE_SUBLIST,
+       FP_D_REGS_SUBLIST,
+       {0},
+       },
+};
+
+struct vcpu_reg_list *vcpu_configs[] = {
+       &h_config,
+       &zicbom_config,
+       &zicboz_config,
+       &svpbmt_config,
+       &sstc_config,
+       &svinval_config,
+       &zihintpause_config,
+       &zba_config,
+       &zbb_config,
+       &zbs_config,
+       &zicntr_config,
+       &zicsr_config,
+       &zifencei_config,
+       &zihpm_config,
+       &aia_config,
+       &fp_f_config,
+       &fp_d_config,
+};
+int vcpu_configs_n = ARRAY_SIZE(vcpu_configs);