x86: vdso: Expose sigreturn address on vdso to the kernel
authorGabriel Krisman Bertazi <krisman@collabora.com>
Fri, 27 Nov 2020 19:32:32 +0000 (14:32 -0500)
committerThomas Gleixner <tglx@linutronix.de>
Wed, 2 Dec 2020 09:32:16 +0000 (10:32 +0100)
Syscall user redirection requires the signal trampoline code to not be
captured, in order to support returning with a locked selector while
avoiding recursion back into the signal handler.  For ia-32, which has
the trampoline in the vDSO, expose the entry points to the kernel, such
that it can avoid dispatching syscalls from that region to userspace.

Suggested-by: Andy Lutomirski <luto@kernel.org>
Signed-off-by: Gabriel Krisman Bertazi <krisman@collabora.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Kees Cook <keescook@chromium.org>
Reviewed-by: Andy Lutomirski <luto@kernel.org>
Acked-by: Andy Lutomirski <luto@kernel.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20201127193238.821364-2-krisman@collabora.com
arch/x86/entry/vdso/vdso2c.c
arch/x86/entry/vdso/vdso32/sigreturn.S
arch/x86/entry/vdso/vma.c
arch/x86/include/asm/elf.h
arch/x86/include/asm/vdso.h

index 7380908..2d0f3d8 100644 (file)
@@ -101,6 +101,8 @@ struct vdso_sym required_syms[] = {
        {"__kernel_sigreturn", true},
        {"__kernel_rt_sigreturn", true},
        {"int80_landing_pad", true},
+       {"vdso32_rt_sigreturn_landing_pad", true},
+       {"vdso32_sigreturn_landing_pad", true},
 };
 
 __attribute__((format(printf, 1, 2))) __attribute__((noreturn))
index c3233ee..1bd068f 100644 (file)
@@ -18,6 +18,7 @@ __kernel_sigreturn:
        movl $__NR_sigreturn, %eax
        SYSCALL_ENTER_KERNEL
 .LEND_sigreturn:
+SYM_INNER_LABEL(vdso32_sigreturn_landing_pad, SYM_L_GLOBAL)
        nop
        .size __kernel_sigreturn,.-.LSTART_sigreturn
 
@@ -29,6 +30,7 @@ __kernel_rt_sigreturn:
        movl $__NR_rt_sigreturn, %eax
        SYSCALL_ENTER_KERNEL
 .LEND_rt_sigreturn:
+SYM_INNER_LABEL(vdso32_rt_sigreturn_landing_pad, SYM_L_GLOBAL)
        nop
        .size __kernel_rt_sigreturn,.-.LSTART_rt_sigreturn
        .previous
index 50e5d3a..de60cd3 100644 (file)
@@ -436,6 +436,21 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 }
 #endif
 
+bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs)
+{
+#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
+       const struct vdso_image *image = current->mm->context.vdso_image;
+       unsigned long vdso = (unsigned long) current->mm->context.vdso;
+
+       if (in_ia32_syscall() && image == &vdso_image_32) {
+               if (regs->ip == vdso + image->sym_vdso32_sigreturn_landing_pad ||
+                   regs->ip == vdso + image->sym_vdso32_rt_sigreturn_landing_pad)
+                       return true;
+       }
+#endif
+       return false;
+}
+
 #ifdef CONFIG_X86_64
 static __init int vdso_setup(char *s)
 {
index 44a9b99..66bdfe8 100644 (file)
@@ -388,6 +388,8 @@ extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
        compat_arch_setup_additional_pages(bprm, interpreter,           \
                                           (ex->e_machine == EM_X86_64))
 
+extern bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs);
+
 /* Do not change the values. See get_align_mask() */
 enum align_flags {
        ALIGN_VA_32     = BIT(0),
index bbcdc7b..589f489 100644 (file)
@@ -27,6 +27,8 @@ struct vdso_image {
        long sym___kernel_rt_sigreturn;
        long sym___kernel_vsyscall;
        long sym_int80_landing_pad;
+       long sym_vdso32_sigreturn_landing_pad;
+       long sym_vdso32_rt_sigreturn_landing_pad;
 };
 
 #ifdef CONFIG_X86_64