x86: Use return-thunk in asm code
authorPeter Zijlstra <peterz@infradead.org>
Tue, 14 Jun 2022 21:15:45 +0000 (23:15 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 23 Jul 2022 10:54:00 +0000 (12:54 +0200)
commit aa3d480315ba6c3025a60958e1981072ea37c3df upstream.

Use the return thunk in asm code. If the thunk isn't needed, it will
get patched into a RET instruction during boot by apply_returns().

Since alternatives can't handle relocations outside of the first
instruction, putting a 'jmp __x86_return_thunk' in one is not valid,
therefore carve out the memmove ERMS path into a separate label and jump
to it.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Josh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
[cascardo: no RANDSTRUCT_CFLAGS]
Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/x86/entry/vdso/Makefile
arch/x86/include/asm/linkage.h
arch/x86/lib/memmove_64.S

index a2dddcc..c8891d3 100644 (file)
@@ -92,6 +92,7 @@ endif
 endif
 
 $(vobjs): KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO) $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
+$(vobjs): KBUILD_AFLAGS += -DBUILD_VDSO
 
 #
 # vDSO code runs in userspace and -pg doesn't help with profiling anyway.
index 0309079..d04e61c 100644 (file)
 #define __ALIGN_STR    __stringify(__ALIGN)
 #endif
 
+#if defined(CONFIG_RETPOLINE) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO)
+#define RET    jmp __x86_return_thunk
+#else /* CONFIG_RETPOLINE */
 #ifdef CONFIG_SLS
 #define RET    ret; int3
 #else
 #define RET    ret
 #endif
+#endif /* CONFIG_RETPOLINE */
 
 #else /* __ASSEMBLY__ */
 
+#if defined(CONFIG_RETPOLINE) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO)
+#define ASM_RET        "jmp __x86_return_thunk\n\t"
+#else /* CONFIG_RETPOLINE */
 #ifdef CONFIG_SLS
 #define ASM_RET        "ret; int3\n\t"
 #else
 #define ASM_RET        "ret\n\t"
 #endif
+#endif /* CONFIG_RETPOLINE */
 
 #endif /* __ASSEMBLY__ */
 
index 50ea390..4b8ee3a 100644 (file)
@@ -40,7 +40,7 @@ SYM_FUNC_START(__memmove)
        /* FSRM implies ERMS => no length checks, do the copy directly */
 .Lmemmove_begin_forward:
        ALTERNATIVE "cmp $0x20, %rdx; jb 1f", "", X86_FEATURE_FSRM
-       ALTERNATIVE "", __stringify(movq %rdx, %rcx; rep movsb; RET), X86_FEATURE_ERMS
+       ALTERNATIVE "", "jmp .Lmemmove_erms", X86_FEATURE_ERMS
 
        /*
         * movsq instruction have many startup latency
@@ -206,6 +206,11 @@ SYM_FUNC_START(__memmove)
        movb %r11b, (%rdi)
 13:
        RET
+
+.Lmemmove_erms:
+       movq %rdx, %rcx
+       rep movsb
+       RET
 SYM_FUNC_END(__memmove)
 SYM_FUNC_END_ALIAS(memmove)
 EXPORT_SYMBOL(__memmove)