KVM: arm64: Add SMC handler in nVHE EL2
authorDavid Brazdil <dbrazdil@google.com>
Wed, 2 Dec 2020 18:41:11 +0000 (18:41 +0000)
committerMarc Zyngier <maz@kernel.org>
Fri, 4 Dec 2020 10:08:34 +0000 (10:08 +0000)
Add handler of host SMCs in KVM nVHE trap handler. Forward all SMCs to
EL3 and propagate the result back to EL1. This is done in preparation
for validating host SMCs in KVM protected mode.

The implementation assumes that firmware uses SMCCC v1.2 or older. That
means x0-x17 can be used both for arguments and results, other GPRs are
preserved.

Signed-off-by: David Brazdil <dbrazdil@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20201202184122.26046-16-dbrazdil@google.com
arch/arm64/kvm/hyp/nvhe/host.S
arch/arm64/kvm/hyp/nvhe/hyp-main.c

index fe2740b..2b56f0b 100644 (file)
@@ -180,3 +180,41 @@ SYM_CODE_START(__kvm_hyp_host_vector)
        invalid_host_el1_vect                   // FIQ 32-bit EL1
        invalid_host_el1_vect                   // Error 32-bit EL1
 SYM_CODE_END(__kvm_hyp_host_vector)
+
+/*
+ * Forward SMC with arguments in struct kvm_cpu_context, and
+ * store the result into the same struct. Assumes SMCCC 1.2 or older.
+ *
+ * x0: struct kvm_cpu_context*
+ */
+SYM_CODE_START(__kvm_hyp_host_forward_smc)
+       /*
+        * Use x18 to keep the pointer to the host context because
+        * x18 is callee-saved in SMCCC but not in AAPCS64.
+        */
+       mov     x18, x0
+
+       ldp     x0, x1,   [x18, #CPU_XREG_OFFSET(0)]
+       ldp     x2, x3,   [x18, #CPU_XREG_OFFSET(2)]
+       ldp     x4, x5,   [x18, #CPU_XREG_OFFSET(4)]
+       ldp     x6, x7,   [x18, #CPU_XREG_OFFSET(6)]
+       ldp     x8, x9,   [x18, #CPU_XREG_OFFSET(8)]
+       ldp     x10, x11, [x18, #CPU_XREG_OFFSET(10)]
+       ldp     x12, x13, [x18, #CPU_XREG_OFFSET(12)]
+       ldp     x14, x15, [x18, #CPU_XREG_OFFSET(14)]
+       ldp     x16, x17, [x18, #CPU_XREG_OFFSET(16)]
+
+       smc     #0
+
+       stp     x0, x1,   [x18, #CPU_XREG_OFFSET(0)]
+       stp     x2, x3,   [x18, #CPU_XREG_OFFSET(2)]
+       stp     x4, x5,   [x18, #CPU_XREG_OFFSET(4)]
+       stp     x6, x7,   [x18, #CPU_XREG_OFFSET(6)]
+       stp     x8, x9,   [x18, #CPU_XREG_OFFSET(8)]
+       stp     x10, x11, [x18, #CPU_XREG_OFFSET(10)]
+       stp     x12, x13, [x18, #CPU_XREG_OFFSET(12)]
+       stp     x14, x15, [x18, #CPU_XREG_OFFSET(14)]
+       stp     x16, x17, [x18, #CPU_XREG_OFFSET(16)]
+
+       ret
+SYM_CODE_END(__kvm_hyp_host_forward_smc)
index a4f1cac..f25680e 100644 (file)
@@ -18,6 +18,8 @@
 
 DEFINE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
 
+void __kvm_hyp_host_forward_smc(struct kvm_cpu_context *host_ctxt);
+
 static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
 {
        DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1);
@@ -152,12 +154,39 @@ inval:
        cpu_reg(host_ctxt, 0) = SMCCC_RET_NOT_SUPPORTED;
 }
 
+static void default_host_smc_handler(struct kvm_cpu_context *host_ctxt)
+{
+       __kvm_hyp_host_forward_smc(host_ctxt);
+}
+
+static void skip_host_instruction(void)
+{
+       write_sysreg_el2(read_sysreg_el2(SYS_ELR) + 4, SYS_ELR);
+}
+
+static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
+{
+       default_host_smc_handler(host_ctxt);
+
+       /*
+        * Unlike HVC, the return address of an SMC is the instruction's PC.
+        * Move the return address past the instruction.
+        */
+       skip_host_instruction();
+}
+
 void handle_trap(struct kvm_cpu_context *host_ctxt)
 {
        u64 esr = read_sysreg_el2(SYS_ESR);
 
-       if (unlikely(ESR_ELx_EC(esr) != ESR_ELx_EC_HVC64))
+       switch (ESR_ELx_EC(esr)) {
+       case ESR_ELx_EC_HVC64:
+               handle_host_hcall(host_ctxt);
+               break;
+       case ESR_ELx_EC_SMC64:
+               handle_host_smc(host_ctxt);
+               break;
+       default:
                hyp_panic();
-
-       handle_host_hcall(host_ctxt);
+       }
 }