RISC-V: KVM: Add SBI HSM extension in KVM
authorAtish Patra <atish.patra@wdc.com>
Thu, 18 Nov 2021 08:39:12 +0000 (00:39 -0800)
committerAnup Patel <anup@brainfault.org>
Thu, 6 Jan 2022 09:42:47 +0000 (15:12 +0530)
SBI HSM extension allows OS to start/stop harts any time. It also allows
ordered booting of harts instead of random booting.

Implement SBI HSM exntesion and designate the vcpu 0 as the boot vcpu id.
All other non-zero non-booting vcpus should be brought up by the OS
implementing HSM extension. If the guest OS doesn't implement HSM
extension, only single vcpu will be available to OS.

Signed-off-by: Atish Patra <atish.patra@wdc.com>
Signed-off-by: Atish Patra <atishp@rivosinc.com>
Signed-off-by: Anup Patel <anup.patel@wdc.com>
arch/riscv/include/asm/sbi.h
arch/riscv/kvm/Makefile
arch/riscv/kvm/vcpu.c
arch/riscv/kvm/vcpu_sbi.c
arch/riscv/kvm/vcpu_sbi_hsm.c [new file with mode: 0644]

index 9e4c79fd49d74ea2e6411798f93e75ef2a4cde6e..9c46dd3ff4a23523563e017ad9d938749487e6e3 100644 (file)
@@ -90,6 +90,7 @@ enum sbi_hsm_hart_status {
 #define SBI_ERR_INVALID_PARAM  -3
 #define SBI_ERR_DENIED         -4
 #define SBI_ERR_INVALID_ADDRESS        -5
+#define SBI_ERR_ALREADY_AVAILABLE -6
 
 extern unsigned long sbi_spec_version;
 struct sbiret {
index 7fd9fc184ae96c6083283b2deeb22a2259412107..e5c56182f48f5e355d39aec61fadd46108bef45f 100644 (file)
@@ -22,4 +22,5 @@ kvm-y += vcpu_sbi.o
 kvm-$(CONFIG_RISCV_SBI_V01) += vcpu_sbi_v01.o
 kvm-y += vcpu_sbi_base.o
 kvm-y += vcpu_sbi_replace.o
+kvm-y += vcpu_sbi_hsm.o
 kvm-y += vcpu_timer.o
index 2ae11b4e93d114e5daf0f1ce49b18696e20b2b97..0c5239e05721541785ba7b52de3c02b4fe80a99e 100644 (file)
@@ -53,6 +53,17 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
        struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
        struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
        struct kvm_cpu_context *reset_cntx = &vcpu->arch.guest_reset_context;
+       bool loaded;
+
+       /**
+        * The preemption should be disabled here because it races with
+        * kvm_sched_out/kvm_sched_in(called from preempt notifiers) which
+        * also calls vcpu_load/put.
+        */
+       get_cpu();
+       loaded = (vcpu->cpu != -1);
+       if (loaded)
+               kvm_arch_vcpu_put(vcpu);
 
        memcpy(csr, reset_csr, sizeof(*csr));
 
@@ -64,6 +75,11 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
 
        WRITE_ONCE(vcpu->arch.irqs_pending, 0);
        WRITE_ONCE(vcpu->arch.irqs_pending_mask, 0);
+
+       /* Reset the guest CSRs for hotplug usecase */
+       if (loaded)
+               kvm_arch_vcpu_load(vcpu, smp_processor_id());
+       put_cpu();
 }
 
 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
@@ -101,6 +117,13 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
 
 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
 {
+       /**
+        * vcpu with id 0 is the designated boot cpu.
+        * Keep all vcpus with non-zero id in power-off state so that
+        * they can be brought up using SBI HSM extension.
+        */
+       if (vcpu->vcpu_idx != 0)
+               kvm_riscv_vcpu_power_off(vcpu);
 }
 
 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
index cf284e080f3e2c1f61c582c8a311be2d6ee3cfa8..f62d25bc97339a9df8d0aedc9a23d520ea1d90a7 100644 (file)
@@ -25,6 +25,8 @@ static int kvm_linux_err_map_sbi(int err)
                return SBI_ERR_INVALID_ADDRESS;
        case -EOPNOTSUPP:
                return SBI_ERR_NOT_SUPPORTED;
+       case -EALREADY:
+               return SBI_ERR_ALREADY_AVAILABLE;
        default:
                return SBI_ERR_FAILURE;
        };
@@ -43,6 +45,7 @@ extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_base;
 extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_time;
 extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_ipi;
 extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_rfence;
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_hsm;
 
 static const struct kvm_vcpu_sbi_extension *sbi_ext[] = {
        &vcpu_sbi_ext_v01,
@@ -50,6 +53,7 @@ static const struct kvm_vcpu_sbi_extension *sbi_ext[] = {
        &vcpu_sbi_ext_time,
        &vcpu_sbi_ext_ipi,
        &vcpu_sbi_ext_rfence,
+       &vcpu_sbi_ext_hsm,
 };
 
 void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run)
diff --git a/arch/riscv/kvm/vcpu_sbi_hsm.c b/arch/riscv/kvm/vcpu_sbi_hsm.c
new file mode 100644 (file)
index 0000000..2e38368
--- /dev/null
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ *     Atish Patra <atish.patra@wdc.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <asm/csr.h>
+#include <asm/sbi.h>
+#include <asm/kvm_vcpu_sbi.h>
+
+static int kvm_sbi_hsm_vcpu_start(struct kvm_vcpu *vcpu)
+{
+       struct kvm_cpu_context *reset_cntx;
+       struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
+       struct kvm_vcpu *target_vcpu;
+       unsigned long target_vcpuid = cp->a0;
+
+       target_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, target_vcpuid);
+       if (!target_vcpu)
+               return -EINVAL;
+       if (!target_vcpu->arch.power_off)
+               return -EALREADY;
+
+       reset_cntx = &target_vcpu->arch.guest_reset_context;
+       /* start address */
+       reset_cntx->sepc = cp->a1;
+       /* target vcpu id to start */
+       reset_cntx->a0 = target_vcpuid;
+       /* private data passed from kernel */
+       reset_cntx->a1 = cp->a2;
+       kvm_make_request(KVM_REQ_VCPU_RESET, target_vcpu);
+
+       kvm_riscv_vcpu_power_on(target_vcpu);
+
+       return 0;
+}
+
+static int kvm_sbi_hsm_vcpu_stop(struct kvm_vcpu *vcpu)
+{
+       if (vcpu->arch.power_off)
+               return -EINVAL;
+
+       kvm_riscv_vcpu_power_off(vcpu);
+
+       return 0;
+}
+
+static int kvm_sbi_hsm_vcpu_get_status(struct kvm_vcpu *vcpu)
+{
+       struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
+       unsigned long target_vcpuid = cp->a0;
+       struct kvm_vcpu *target_vcpu;
+
+       target_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, target_vcpuid);
+       if (!target_vcpu)
+               return -EINVAL;
+       if (!target_vcpu->arch.power_off)
+               return SBI_HSM_HART_STATUS_STARTED;
+       else
+               return SBI_HSM_HART_STATUS_STOPPED;
+}
+
+static int kvm_sbi_ext_hsm_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
+                                  unsigned long *out_val,
+                                  struct kvm_cpu_trap *utrap,
+                                  bool *exit)
+{
+       int ret = 0;
+       struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
+       struct kvm *kvm = vcpu->kvm;
+       unsigned long funcid = cp->a6;
+
+       switch (funcid) {
+       case SBI_EXT_HSM_HART_START:
+               mutex_lock(&kvm->lock);
+               ret = kvm_sbi_hsm_vcpu_start(vcpu);
+               mutex_unlock(&kvm->lock);
+               break;
+       case SBI_EXT_HSM_HART_STOP:
+               ret = kvm_sbi_hsm_vcpu_stop(vcpu);
+               break;
+       case SBI_EXT_HSM_HART_STATUS:
+               ret = kvm_sbi_hsm_vcpu_get_status(vcpu);
+               if (ret >= 0) {
+                       *out_val = ret;
+                       ret = 0;
+               }
+               break;
+       default:
+               ret = -EOPNOTSUPP;
+       }
+
+       return ret;
+}
+
+const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_hsm = {
+       .extid_start = SBI_EXT_HSM,
+       .extid_end = SBI_EXT_HSM,
+       .handler = kvm_sbi_ext_hsm_handler,
+};