x86/retbleed: Add __x86_return_thunk alignment checks
authorBorislav Petkov (AMD) <bp@alien8.de>
Mon, 15 May 2023 14:07:26 +0000 (16:07 +0200)
committerBorislav Petkov (AMD) <bp@alien8.de>
Wed, 17 May 2023 10:14:21 +0000 (12:14 +0200)
Add a linker assertion and compute the 0xcc padding dynamically so that
__x86_return_thunk is always cacheline-aligned. Leave the SYM_START()
macro in as the untraining doesn't need ENDBR annotations anyway.

Suggested-by: Andrew Cooper <andrew.cooper3@citrix.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Link: https://lore.kernel.org/r/20230515140726.28689-1-bp@alien8.de
arch/x86/kernel/vmlinux.lds.S
arch/x86/lib/retpoline.S

index 25f1552..03c885d 100644 (file)
@@ -508,4 +508,8 @@ INIT_PER_CPU(irq_stack_backing_store);
            "fixed_percpu_data is not at start of per-cpu area");
 #endif
 
+#ifdef CONFIG_RETHUNK
+. = ASSERT((__x86_return_thunk & 0x3f) == 0, "__x86_return_thunk not cacheline-aligned");
+#endif
+
 #endif /* CONFIG_X86_64 */
index b3b1e37..3fd066d 100644 (file)
@@ -143,7 +143,7 @@ SYM_CODE_END(__x86_indirect_jump_thunk_array)
  *    from re-poisioning the BTB prediction.
  */
        .align 64
-       .skip 63, 0xcc
+       .skip 64 - (__x86_return_thunk - zen_untrain_ret), 0xcc
 SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
        ANNOTATE_NOENDBR
        /*