x86/cpu/kvm: Provide UNTRAIN_RET_VM
authorPeter Zijlstra <peterz@infradead.org>
Mon, 14 Aug 2023 11:44:35 +0000 (13:44 +0200)
committerBorislav Petkov (AMD) <bp@alien8.de>
Wed, 16 Aug 2023 19:58:59 +0000 (21:58 +0200)
Similar to how it doesn't make sense to have UNTRAIN_RET have two
untrain calls, it also doesn't make sense for VMEXIT to have an extra
IBPB call.

This cures VMEXIT doing potentially unret+IBPB or double IBPB.
Also, the (SEV) VMEXIT case seems to have been overlooked.

Redefine the meaning of the synthetic IBPB flags to:

 - ENTRY_IBPB     -- issue IBPB on entry  (was: entry + VMEXIT)
 - IBPB_ON_VMEXIT -- issue IBPB on VMEXIT

And have 'retbleed=ibpb' set *BOTH* feature flags to ensure it retains
the previous behaviour and issues IBPB on entry+VMEXIT.

The new 'srso=ibpb_vmexit' option only sets IBPB_ON_VMEXIT.

Create UNTRAIN_RET_VM specifically for the VMEXIT case, and have that
check IBPB_ON_VMEXIT.

All this avoids having the VMEXIT case having to check both ENTRY_IBPB
and IBPB_ON_VMEXIT and simplifies the alternatives.

Fixes: fb3bd914b3ec ("x86/srso: Add a Speculative RAS Overflow mitigation")
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/20230814121149.109557833@infradead.org
arch/x86/include/asm/nospec-branch.h
arch/x86/kernel/cpu/bugs.c
arch/x86/kvm/svm/vmenter.S

index 5285c8e..c55cc24 100644 (file)
 #endif
 .endm
 
+.macro UNTRAIN_RET_VM
+#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \
+       defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO)
+       VALIDATE_UNRET_END
+       ALTERNATIVE_3 "",                                               \
+                     CALL_UNTRAIN_RET, X86_FEATURE_UNRET,              \
+                     "call entry_ibpb", X86_FEATURE_IBPB_ON_VMEXIT,    \
+                     __stringify(RESET_CALL_DEPTH), X86_FEATURE_CALL_DEPTH
+#endif
+.endm
+
 .macro UNTRAIN_RET_FROM_CALL
 #if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \
        defined(CONFIG_CALL_DEPTH_TRACKING)
index 6f3e195..9026e3f 100644 (file)
@@ -1054,6 +1054,7 @@ do_cmd_auto:
 
        case RETBLEED_MITIGATION_IBPB:
                setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
+               setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
                mitigate_smt = true;
                break;
 
index 265452f..ef2ebab 100644 (file)
@@ -222,10 +222,7 @@ SYM_FUNC_START(__svm_vcpu_run)
         * because interrupt handlers won't sanitize 'ret' if the return is
         * from the kernel.
         */
-       UNTRAIN_RET
-
-       /* SRSO */
-       ALTERNATIVE "", "call entry_ibpb", X86_FEATURE_IBPB_ON_VMEXIT
+       UNTRAIN_RET_VM
 
        /*
         * Clear all general purpose registers except RSP and RAX to prevent
@@ -362,7 +359,7 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
         * because interrupt handlers won't sanitize RET if the return is
         * from the kernel.
         */
-       UNTRAIN_RET
+       UNTRAIN_RET_VM
 
        /* "Pop" @spec_ctrl_intercepted.  */
        pop %_ASM_BX