x86/lib/memmove: Decouple ERMS from FSRM
authorBorislav Petkov (AMD) <bp@alien8.de>
Sun, 26 Feb 2023 20:04:26 +0000 (21:04 +0100)
committerBorislav Petkov (AMD) <bp@alien8.de>
Wed, 10 May 2023 12:51:56 +0000 (14:51 +0200)
Up until now it was perceived that FSRM is an improvement to ERMS and
thus it was made dependent on latter.

However, there are AMD BIOSes out there which allow for disabling of
either features and thus preventing kernels from booting due to the CMP
disappearing and thus breaking the logic in the memmove() function.

Similar observation happens on some VM migration scenarios.

Patch the proper sequences depending on which feature is enabled.

Reported-by: Daniel Verkamp <dverkamp@chromium.org>
Reported-by: Jiri Slaby <jirislaby@kernel.org>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/Y/yK0dyzI0MMdTie@zn.tnic
arch/x86/lib/memmove_64.S

index 0266186..0559b20 100644 (file)
@@ -38,10 +38,12 @@ SYM_FUNC_START(__memmove)
        cmp %rdi, %r8
        jg 2f
 
-       /* FSRM implies ERMS => no length checks, do the copy directly */
+#define CHECK_LEN      cmp $0x20, %rdx; jb 1f
+#define MEMMOVE_BYTES  movq %rdx, %rcx; rep movsb; RET
 .Lmemmove_begin_forward:
-       ALTERNATIVE "cmp $0x20, %rdx; jb 1f", "", X86_FEATURE_FSRM
-       ALTERNATIVE "", "jmp .Lmemmove_erms", X86_FEATURE_ERMS
+       ALTERNATIVE_2 __stringify(CHECK_LEN), \
+                     __stringify(CHECK_LEN; MEMMOVE_BYTES), X86_FEATURE_ERMS, \
+                     __stringify(MEMMOVE_BYTES), X86_FEATURE_FSRM
 
        /*
         * movsq instruction have many startup latency
@@ -207,11 +209,6 @@ SYM_FUNC_START(__memmove)
        movb %r11b, (%rdi)
 13:
        RET
-
-.Lmemmove_erms:
-       movq %rdx, %rcx
-       rep movsb
-       RET
 SYM_FUNC_END(__memmove)
 EXPORT_SYMBOL(__memmove)