x86/sev: Avoid using __x86_return_thunk
authorKim Phillips <kim.phillips@amd.com>
Tue, 14 Jun 2022 21:15:44 +0000 (23:15 +0200)
committerBorislav Petkov <bp@suse.de>
Mon, 27 Jun 2022 08:33:58 +0000 (10:33 +0200)
Specifically, it's because __enc_copy() encrypts the kernel after
being relocated outside the kernel in sme_encrypt_execute(), and the
RET macro's jmp offset isn't amended prior to execution.

Signed-off-by: Kim Phillips <kim.phillips@amd.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Josh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
arch/x86/mm/mem_encrypt_boot.S

index 3d1dba0..d94dea4 100644 (file)
@@ -65,7 +65,9 @@ SYM_FUNC_START(sme_encrypt_execute)
        movq    %rbp, %rsp              /* Restore original stack pointer */
        pop     %rbp
 
-       RET
+       /* Offset to __x86_return_thunk would be wrong here */
+       ret
+       int3
 SYM_FUNC_END(sme_encrypt_execute)
 
 SYM_FUNC_START(__enc_copy)
@@ -151,6 +153,8 @@ SYM_FUNC_START(__enc_copy)
        pop     %r12
        pop     %r15
 
-       RET
+       /* Offset to __x86_return_thunk would be wrong here */
+       ret
+       int3
 .L__enc_copy_end:
 SYM_FUNC_END(__enc_copy)