arm64: compat: Allow 32-bit vdso and sigpage to co-exist
authorWill Deacon <will@kernel.org>
Mon, 22 Jun 2020 11:35:41 +0000 (12:35 +0100)
committerWill Deacon <will@kernel.org>
Tue, 23 Jun 2020 13:47:03 +0000 (14:47 +0100)
In preparation for removing the signal trampoline from the compat vDSO,
allow the sigpage and the compat vDSO to co-exist.

For the moment the vDSO signal trampoline will still be used when built.
Subsequent patches will move to the sigpage consistently.

Acked-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Vincenzo Frascino <vincenzo.frascino@arm.com>
Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Will Deacon <will@kernel.org>
arch/arm64/include/asm/mmu.h
arch/arm64/kernel/Makefile
arch/arm64/kernel/signal32.c
arch/arm64/kernel/vdso.c

index 68140fdd89d6b9387c05e6d5548746670bce0c4b..8444df0001813ccb681074c3f107019ffb391215 100644 (file)
@@ -19,6 +19,9 @@
 
 typedef struct {
        atomic64_t      id;
+#ifdef CONFIG_COMPAT
+       void            *sigpage;
+#endif
        void            *vdso;
        unsigned long   flags;
 } mm_context_t;
index 151f28521f1ece843ca9ac74bf92811354da090f..a561cbb91d4dc5f5f91a367e96d5d89b663454e6 100644 (file)
@@ -29,9 +29,7 @@ $(obj)/%.stub.o: $(obj)/%.o FORCE
 
 obj-$(CONFIG_COMPAT)                   += sys32.o signal32.o                   \
                                           sys_compat.o
-ifneq ($(CONFIG_COMPAT_VDSO), y)
 obj-$(CONFIG_COMPAT)                   += sigreturn32.o
-endif
 obj-$(CONFIG_KUSER_HELPERS)            += kuser32.o
 obj-$(CONFIG_FUNCTION_TRACER)          += ftrace.o entry-ftrace.o
 obj-$(CONFIG_MODULES)                  += module.o
index 82feca6f70521c4f699a75badc92f535201d7671..0aa0b33744decbec27467a969e18b457df433c7c 100644 (file)
@@ -371,7 +371,7 @@ static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka,
                if (ka->sa.sa_flags & SA_SIGINFO)
                        idx += 3;
 
-               retcode = (unsigned long)current->mm->context.vdso +
+               retcode = (unsigned long)current->mm->context.sigpage +
                          (idx << 2) + thumb;
 #endif
        }
index 4e016574bd914e2d5fd8dd99b70862845ea38e33..e546df0efefb496695408af64abe2894118d85e9 100644 (file)
@@ -191,15 +191,12 @@ enum aarch32_map {
 #ifdef CONFIG_COMPAT_VDSO
        AA32_MAP_VVAR,
        AA32_MAP_VDSO,
-#else
-       AA32_MAP_SIGPAGE
 #endif
+       AA32_MAP_SIGPAGE
 };
 
 static struct page *aarch32_vectors_page __ro_after_init;
-#ifndef CONFIG_COMPAT_VDSO
 static struct page *aarch32_sig_page __ro_after_init;
-#endif
 
 static struct vm_special_mapping aarch32_vdso_maps[] = {
        [AA32_MAP_VECTORS] = {
@@ -214,12 +211,11 @@ static struct vm_special_mapping aarch32_vdso_maps[] = {
                .name = "[vdso]",
                .mremap = aarch32_vdso_mremap,
        },
-#else
+#endif /* CONFIG_COMPAT_VDSO */
        [AA32_MAP_SIGPAGE] = {
                .name   = "[sigpage]", /* ABI */
                .pages  = &aarch32_sig_page,
        },
-#endif /* CONFIG_COMPAT_VDSO */
 };
 
 static int aarch32_alloc_kuser_vdso_page(void)
@@ -242,27 +238,11 @@ static int aarch32_alloc_kuser_vdso_page(void)
        return 0;
 }
 
-#ifdef CONFIG_COMPAT_VDSO
-static int __aarch32_alloc_vdso_pages(void)
-{
-       int ret;
-
-       vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR];
-       vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];
-
-       ret = __vdso_init(VDSO_ABI_AA32);
-       if (ret)
-               return ret;
-
-       return aarch32_alloc_kuser_vdso_page();
-}
-#else
-static int __aarch32_alloc_vdso_pages(void)
+static int aarch32_alloc_sigpage(void)
 {
        extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
        int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
        unsigned long sigpage;
-       int ret;
 
        sigpage = get_zeroed_page(GFP_ATOMIC);
        if (!sigpage)
@@ -271,18 +251,34 @@ static int __aarch32_alloc_vdso_pages(void)
        memcpy((void *)sigpage, __aarch32_sigret_code_start, sigret_sz);
        aarch32_sig_page = virt_to_page(sigpage);
        flush_dcache_page(aarch32_sig_page);
+       return 0;
+}
 
-       ret = aarch32_alloc_kuser_vdso_page();
-       if (ret)
-               free_page(sigpage);
+#ifdef CONFIG_COMPAT_VDSO
+static int __aarch32_alloc_vdso_pages(void)
+{
+       vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR];
+       vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];
 
-       return ret;
+       return __vdso_init(VDSO_ABI_AA32);
 }
 #endif /* CONFIG_COMPAT_VDSO */
 
 static int __init aarch32_alloc_vdso_pages(void)
 {
-       return __aarch32_alloc_vdso_pages();
+       int ret;
+
+#ifdef CONFIG_COMPAT_VDSO
+       ret = __aarch32_alloc_vdso_pages();
+       if (ret)
+               return ret;
+#endif
+
+       ret = aarch32_alloc_sigpage();
+       if (ret)
+               return ret;
+
+       return aarch32_alloc_kuser_vdso_page();
 }
 arch_initcall(aarch32_alloc_vdso_pages);
 
@@ -305,7 +301,6 @@ static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
        return PTR_ERR_OR_ZERO(ret);
 }
 
-#ifndef CONFIG_COMPAT_VDSO
 static int aarch32_sigreturn_setup(struct mm_struct *mm)
 {
        unsigned long addr;
@@ -328,12 +323,11 @@ static int aarch32_sigreturn_setup(struct mm_struct *mm)
        if (IS_ERR(ret))
                goto out;
 
-       mm->context.vdso = (void *)addr;
+       mm->context.sigpage = (void *)addr;
 
 out:
        return PTR_ERR_OR_ZERO(ret);
 }
-#endif /* !CONFIG_COMPAT_VDSO */
 
 int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 {
@@ -352,10 +346,11 @@ int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
                                       mm,
                                       bprm,
                                       uses_interp);
-#else
-       ret = aarch32_sigreturn_setup(mm);
+       if (ret)
+               goto out;
 #endif /* CONFIG_COMPAT_VDSO */
 
+       ret = aarch32_sigreturn_setup(mm);
 out:
        mmap_write_unlock(mm);
        return ret;