KVM: SVM: Use asm goto to handle unexpected #UD on SVM instructions
authorSean Christopherson <seanjc@google.com>
Thu, 31 Dec 2020 00:27:00 +0000 (16:27 -0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 4 Feb 2021 10:27:33 +0000 (05:27 -0500)
Add svm_asm*() macros, a la the existing vmx_asm*() macros, to handle
faults on SVM instructions instead of using the generic __ex(), a.k.a.
__kvm_handle_fault_on_reboot().  Using asm goto generates slightly
better code as it eliminates the in-line JMP+CALL sequences that are
needed by __kvm_handle_fault_on_reboot() to avoid triggering BUG()
from fixup (which generates bad stack traces).

Using SVM specific macros also drops the last user of __ex() and the
the last asm linkage to kvm_spurious_fault(), and adds a helper for
VMSAVE, which may gain an addition call site in the future (as part
of optimizing the SVM context switching).

Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20201231002702.2223707-8-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/svm/sev.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm_ops.h [new file with mode: 0644]

index 8dfe8988be8d7f765c4bd0d2562a8d7f7aece809..a3e2b29f484d50515d8a85db921493d0c1b4f8c3 100644 (file)
@@ -22,6 +22,7 @@
 
 #include "x86.h"
 #include "svm.h"
+#include "svm_ops.h"
 #include "cpuid.h"
 #include "trace.h"
 
@@ -2076,7 +2077,7 @@ void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu)
         * of which one step is to perform a VMLOAD. Since hardware does not
         * perform a VMSAVE on VMRUN, the host savearea must be updated.
         */
-       asm volatile(__ex("vmsave %0") : : "a" (__sme_page_pa(sd->save_area)) : "memory");
+       vmsave(__sme_page_pa(sd->save_area));
 
        /*
         * Certain MSRs are restored on VMEXIT, only save ones that aren't
index 504e9474547b2be27dc062102d6e257c60aa637d..58f771b48c2eb9c629e4de598f3e803c67ea1769 100644 (file)
@@ -41,6 +41,7 @@
 #include "trace.h"
 
 #include "svm.h"
+#include "svm_ops.h"
 
 #define __ex(x) __kvm_handle_fault_on_reboot(x)
 
@@ -248,21 +249,6 @@ u32 svm_msrpm_offset(u32 msr)
 
 #define MAX_INST_SIZE 15
 
-static inline void clgi(void)
-{
-       asm volatile (__ex("clgi"));
-}
-
-static inline void stgi(void)
-{
-       asm volatile (__ex("stgi"));
-}
-
-static inline void invlpga(unsigned long addr, u32 asid)
-{
-       asm volatile (__ex("invlpga %1, %0") : : "c"(asid), "a"(addr));
-}
-
 static int get_max_npt_level(void)
 {
 #ifdef CONFIG_X86_64
diff --git a/arch/x86/kvm/svm/svm_ops.h b/arch/x86/kvm/svm/svm_ops.h
new file mode 100644 (file)
index 0000000..9f007bc
--- /dev/null
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __KVM_X86_SVM_OPS_H
+#define __KVM_X86_SVM_OPS_H
+
+#include <linux/compiler_types.h>
+
+#include <asm/kvm_host.h>
+
+#define svm_asm(insn, clobber...)                              \
+do {                                                           \
+       asm_volatile_goto("1: " __stringify(insn) "\n\t"        \
+                         _ASM_EXTABLE(1b, %l[fault])           \
+                         ::: clobber : fault);                 \
+       return;                                                 \
+fault:                                                         \
+       kvm_spurious_fault();                                   \
+} while (0)
+
+#define svm_asm1(insn, op1, clobber...)                                \
+do {                                                           \
+       asm_volatile_goto("1: "  __stringify(insn) " %0\n\t"    \
+                         _ASM_EXTABLE(1b, %l[fault])           \
+                         :: op1 : clobber : fault);            \
+       return;                                                 \
+fault:                                                         \
+       kvm_spurious_fault();                                   \
+} while (0)
+
+#define svm_asm2(insn, op1, op2, clobber...)                           \
+do {                                                                   \
+       asm_volatile_goto("1: "  __stringify(insn) " %1, %0\n\t"        \
+                         _ASM_EXTABLE(1b, %l[fault])                   \
+                         :: op1, op2 : clobber : fault);               \
+       return;                                                         \
+fault:                                                                 \
+       kvm_spurious_fault();                                           \
+} while (0)
+
+static inline void clgi(void)
+{
+       svm_asm(clgi);
+}
+
+static inline void stgi(void)
+{
+       svm_asm(stgi);
+}
+
+static inline void invlpga(unsigned long addr, u32 asid)
+{
+       svm_asm2(invlpga, "c"(asid), "a"(addr));
+}
+
+/*
+ * Despite being a physical address, the portion of rAX that is consumed by
+ * VMSAVE, VMLOAD, etc... is still controlled by the effective address size,
+ * hence 'unsigned long' instead of 'hpa_t'.
+ */
+static inline void vmsave(unsigned long pa)
+{
+       svm_asm1(vmsave, "a" (pa), "memory");
+}
+
+#endif /* __KVM_X86_SVM_OPS_H */