x86/sev: Avoid using __x86_return_thunk
authorKim Phillips <kim.phillips@amd.com>
Tue, 14 Jun 2022 21:15:44 +0000 (23:15 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 23 Jul 2022 10:54:00 +0000 (12:54 +0200)
commit 0ee9073000e8791f8b134a8ded31bcc767f7f232 upstream.

Specifically, it's because __enc_copy() encrypts the kernel after
being relocated outside the kernel in sme_encrypt_execute(), and the
RET macro's jmp offset isn't amended prior to execution.

Signed-off-by: Kim Phillips <kim.phillips@amd.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Josh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/x86/mm/mem_encrypt_boot.S

index 3d1dba0..d94dea4 100644 (file)
@@ -65,7 +65,9 @@ SYM_FUNC_START(sme_encrypt_execute)
        movq    %rbp, %rsp              /* Restore original stack pointer */
        pop     %rbp
 
-       RET
+       /* Offset to __x86_return_thunk would be wrong here */
+       ret
+       int3
 SYM_FUNC_END(sme_encrypt_execute)
 
 SYM_FUNC_START(__enc_copy)
@@ -151,6 +153,8 @@ SYM_FUNC_START(__enc_copy)
        pop     %r12
        pop     %r15
 
-       RET
+       /* Offset to __x86_return_thunk would be wrong here */
+       ret
+       int3
 .L__enc_copy_end:
 SYM_FUNC_END(__enc_copy)