From 1255a7341bee6cd96df056ed0c13e44b59023687 Mon Sep 17 00:00:00 2001 From: Vincenzo Frascino Date: Mon, 15 Apr 2019 10:49:36 +0100 Subject: [PATCH] arm64: compat: Refactor aarch32_alloc_vdso_pages() aarch32_alloc_vdso_pages() needs to be refactored to make it easier to disable kuser helpers. Divide the function in aarch32_alloc_kuser_vdso_page() and aarch32_alloc_sigreturn_vdso_page(). Cc: Catalin Marinas Cc: Will Deacon Signed-off-by: Vincenzo Frascino Reviewed-by: Catalin Marinas [will: Inlined sigpage allocation to simplify error paths] Signed-off-by: Will Deacon --- arch/arm64/kernel/vdso.c | 52 ++++++++++++++++++++++++------------------------ 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index 6bb7038..41f4d75 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -68,43 +68,43 @@ static const struct vm_special_mapping aarch32_vdso_spec[C_PAGES] = { }, }; -static int __init aarch32_alloc_vdso_pages(void) +static int aarch32_alloc_kuser_vdso_page(void) { extern char __kuser_helper_start[], __kuser_helper_end[]; - extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[]; - int kuser_sz = __kuser_helper_end - __kuser_helper_start; - int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start; - unsigned long vdso_pages[C_PAGES]; - - vdso_pages[C_VECTORS] = get_zeroed_page(GFP_ATOMIC); - if (!vdso_pages[C_VECTORS]) - return -ENOMEM; + unsigned long vdso_page; - vdso_pages[C_SIGPAGE] = get_zeroed_page(GFP_ATOMIC); - if (!vdso_pages[C_SIGPAGE]) { - free_page(vdso_pages[C_VECTORS]); + vdso_page = get_zeroed_page(GFP_ATOMIC); + if (!vdso_page) return -ENOMEM; - } - /* kuser helpers */ - memcpy((void *)(vdso_pages[C_VECTORS] + 0x1000 - kuser_sz), - __kuser_helper_start, + memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start, kuser_sz); + aarch32_vdso_pages[C_VECTORS] = virt_to_page(vdso_page); + flush_dcache_page(aarch32_vdso_pages[C_VECTORS]); + return 0; +} - /* sigreturn code */ - memcpy((void *)vdso_pages[C_SIGPAGE], __aarch32_sigret_code_start, - sigret_sz); +static int __init aarch32_alloc_vdso_pages(void) +{ + extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[]; + int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start; + unsigned long sigpage; + int ret; - flush_icache_range(vdso_pages[C_VECTORS], - vdso_pages[C_VECTORS] + PAGE_SIZE); - flush_icache_range(vdso_pages[C_SIGPAGE], - vdso_pages[C_SIGPAGE] + PAGE_SIZE); + sigpage = get_zeroed_page(GFP_ATOMIC); + if (!sigpage) + return -ENOMEM; - aarch32_vdso_pages[C_VECTORS] = virt_to_page(vdso_pages[C_VECTORS]); - aarch32_vdso_pages[C_SIGPAGE] = virt_to_page(vdso_pages[C_SIGPAGE]); + memcpy((void *)sigpage, __aarch32_sigret_code_start, sigret_sz); + aarch32_vdso_pages[C_SIGPAGE] = virt_to_page(sigpage); + flush_dcache_page(aarch32_vdso_pages[C_SIGPAGE]); - return 0; + ret = aarch32_alloc_kuser_vdso_page(); + if (ret) + free_page(sigpage); + + return ret; } arch_initcall(aarch32_alloc_vdso_pages); -- 2.7.4