KVM: arm64: nVHE: Switch to hyp context for EL2
authorAndrew Scull <ascull@google.com>
Tue, 15 Sep 2020 10:46:36 +0000 (11:46 +0100)
committerMarc Zyngier <maz@kernel.org>
Tue, 15 Sep 2020 17:39:03 +0000 (18:39 +0100)
Save and restore the host context when switching to and from hyp. This
gives hyp its own context that the host will not see as a step towards a
full trust boundary between the two.

SP_EL0 and pointer authentication keys are currently shared between the
host and hyp so don't need to be switched yet.

Signed-off-by: Andrew Scull <ascull@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20200915104643.2543892-13-ascull@google.com
arch/arm64/kvm/hyp/include/hyp/switch.h
arch/arm64/kvm/hyp/nvhe/Makefile
arch/arm64/kvm/hyp/nvhe/host.S
arch/arm64/kvm/hyp/nvhe/hyp-main.c [new file with mode: 0644]

index 821721b78ad9b518d5d5d3d5263e0274c3ec359e..4536b50ddc06c97439c59d8d208c905cc05ff863 100644 (file)
@@ -372,6 +372,8 @@ static inline bool esr_is_ptrauth_trap(u32 esr)
        ctxt_sys_reg(ctxt, key ## KEYHI_EL1) = __val;                   \
 } while(0)
 
+DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
+
 static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
 {
        struct kvm_cpu_context *ctxt;
index ddf98eb07b9d8ffc3bad1588349f31380651953b..46c89e8c30bcacc75335162b702578e1c626b78e 100644 (file)
@@ -6,7 +6,7 @@
 asflags-y := -D__KVM_NVHE_HYPERVISOR__
 ccflags-y := -D__KVM_NVHE_HYPERVISOR__
 
-obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o
+obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o hyp-main.o
 obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
         ../fpsimd.o ../hyp-entry.o
 
index 9ab7814e611486b776af5c1bc60885f42345ede2..d26e41773dc4403ec5e4de6e0db457ba662a57cd 100644 (file)
 
        .text
 
+SYM_FUNC_START(__host_exit)
+       stp     x0, x1, [sp, #-16]!
+
+       get_host_ctxt   x0, x1
+
+       ALTERNATIVE(nop, SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
+
+       /* Store the host regs x2 and x3 */
+       stp     x2, x3,   [x0, #CPU_XREG_OFFSET(2)]
+
+       /* Retrieve the host regs x0-x1 from the stack */
+       ldp     x2, x3, [sp], #16       // x0, x1
+
+       /* Store the host regs x0-x1 and x4-x17 */
+       stp     x2, x3,   [x0, #CPU_XREG_OFFSET(0)]
+       stp     x4, x5,   [x0, #CPU_XREG_OFFSET(4)]
+       stp     x6, x7,   [x0, #CPU_XREG_OFFSET(6)]
+       stp     x8, x9,   [x0, #CPU_XREG_OFFSET(8)]
+       stp     x10, x11, [x0, #CPU_XREG_OFFSET(10)]
+       stp     x12, x13, [x0, #CPU_XREG_OFFSET(12)]
+       stp     x14, x15, [x0, #CPU_XREG_OFFSET(14)]
+       stp     x16, x17, [x0, #CPU_XREG_OFFSET(16)]
+
+       /* Store the host regs x18-x29, lr */
+       save_callee_saved_regs x0
+
+       /* Save the host context pointer in x29 across the function call */
+       mov     x29, x0
+       bl      handle_trap
+
+       /* Restore host regs x0-x17 */
+       ldp     x0, x1,   [x29, #CPU_XREG_OFFSET(0)]
+       ldp     x2, x3,   [x29, #CPU_XREG_OFFSET(2)]
+       ldp     x4, x5,   [x29, #CPU_XREG_OFFSET(4)]
+       ldp     x6, x7,   [x29, #CPU_XREG_OFFSET(6)]
+       ldp     x8, x9,   [x29, #CPU_XREG_OFFSET(8)]
+       ldp     x10, x11, [x29, #CPU_XREG_OFFSET(10)]
+       ldp     x12, x13, [x29, #CPU_XREG_OFFSET(12)]
+       ldp     x14, x15, [x29, #CPU_XREG_OFFSET(14)]
+       ldp     x16, x17, [x29, #CPU_XREG_OFFSET(16)]
+
+       /* Restore host regs x18-x29, lr */
+       restore_callee_saved_regs x29
+
+       /* Do not touch any register after this! */
+       eret
+       sb
+SYM_FUNC_END(__host_exit)
+
 SYM_FUNC_START(__hyp_do_panic)
        mov     lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
                      PSR_MODE_EL1h)
@@ -34,7 +83,7 @@ SYM_FUNC_END(__hyp_do_panic)
 
        /* Check for a stub HVC call */
        cmp     x0, #HVC_STUB_HCALL_NR
-       b.hs    1f
+       b.hs    __host_exit
 
        /*
         * Compute the idmap address of __kvm_handle_stub_hvc and
@@ -50,23 +99,6 @@ SYM_FUNC_END(__hyp_do_panic)
        /* x5 = __pa(x5) */
        sub     x5, x5, x6
        br      x5
-
-1:
-       /*
-        * Shuffle the parameters before calling the function
-        * pointed to in x0. Assumes parameters in x[1,2,3].
-        */
-       kern_hyp_va     x0
-       str     lr, [sp, #-16]!
-       mov     lr, x0
-       mov     x0, x1
-       mov     x1, x2
-       mov     x2, x3
-       blr     lr
-       ldr     lr, [sp], #16
-
-       eret
-       sb
 .L__vect_end\@:
 .if ((.L__vect_end\@ - .L__vect_start\@) > 0x80)
        .error "host_el1_sync_vect larger than vector entry"
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
new file mode 100644 (file)
index 0000000..570c389
--- /dev/null
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 - Google Inc
+ * Author: Andrew Scull <ascull@google.com>
+ */
+
+#include <hyp/switch.h>
+
+#include <asm/kvm_asm.h>
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_host.h>
+#include <asm/kvm_hyp.h>
+#include <asm/kvm_mmu.h>
+
+typedef unsigned long (*hypcall_fn_t)
+       (unsigned long, unsigned long, unsigned long);
+
+void handle_trap(struct kvm_cpu_context *host_ctxt)
+{
+       u64 esr = read_sysreg_el2(SYS_ESR);
+       hypcall_fn_t func;
+       unsigned long ret;
+
+       if (ESR_ELx_EC(esr) != ESR_ELx_EC_HVC64)
+               hyp_panic();
+
+       /*
+        * __kvm_call_hyp takes a pointer in the host address space and
+        * up to three arguments.
+        */
+       func = (hypcall_fn_t)kern_hyp_va(host_ctxt->regs.regs[0]);
+       ret = func(host_ctxt->regs.regs[1],
+                  host_ctxt->regs.regs[2],
+                  host_ctxt->regs.regs[3]);
+       host_ctxt->regs.regs[0] = ret;
+}