From 3a608cfee97e99b3fff9ffe62246a098042e725d Mon Sep 17 00:00:00 2001 From: "Fabio M. De Francesco" Date: Wed, 3 Aug 2022 20:28:56 +0200 Subject: [PATCH] exec: Replace kmap{,_atomic}() with kmap_local_page() MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit The use of kmap() and kmap_atomic() are being deprecated in favor of kmap_local_page(). There are two main problems with kmap(): (1) It comes with an overhead as mapping space is restricted and protected by a global lock for synchronization and (2) it also requires global TLB invalidation when the kmap’s pool wraps and it might block when the mapping space is fully utilized until a slot becomes available. With kmap_local_page() the mappings are per thread, CPU local, can take page faults, and can be called from any context (including interrupts). It is faster than kmap() in kernels with HIGHMEM enabled. Furthermore, the tasks can be preempted and, when they are scheduled to run again, the kernel virtual addresses are restored and are still valid. Since the use of kmap_local_page() in exec.c is safe, it should be preferred everywhere in exec.c. As said, since kmap_local_page() can be also called from atomic context, and since remove_arg_zero() doesn't (and shouldn't ever) rely on an implicit preempt_disable(), this function can also safely replace kmap_atomic(). Therefore, replace kmap() and kmap_atomic() with kmap_local_page() in fs/exec.c. Tested with xfstests on a QEMU/KVM x86_32 VM, 6GB RAM, booting a kernel with HIGHMEM64GB enabled. Cc: Eric W. Biederman Suggested-by: Ira Weiny Reviewed-by: Ira Weiny Signed-off-by: Fabio M. De Francesco Signed-off-by: Kees Cook Link: https://lore.kernel.org/r/20220803182856.28246-1-fmdefrancesco@gmail.com --- fs/exec.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/fs/exec.c b/fs/exec.c index 7e842d1..ba9eca9 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -584,11 +584,11 @@ static int copy_strings(int argc, struct user_arg_ptr argv, if (kmapped_page) { flush_dcache_page(kmapped_page); - kunmap(kmapped_page); + kunmap_local(kaddr); put_arg_page(kmapped_page); } kmapped_page = page; - kaddr = kmap(kmapped_page); + kaddr = kmap_local_page(kmapped_page); kpos = pos & PAGE_MASK; flush_arg_page(bprm, kpos, kmapped_page); } @@ -602,7 +602,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv, out: if (kmapped_page) { flush_dcache_page(kmapped_page); - kunmap(kmapped_page); + kunmap_local(kaddr); put_arg_page(kmapped_page); } return ret; @@ -880,11 +880,11 @@ int transfer_args_to_stack(struct linux_binprm *bprm, for (index = MAX_ARG_PAGES - 1; index >= stop; index--) { unsigned int offset = index == stop ? bprm->p & ~PAGE_MASK : 0; - char *src = kmap(bprm->page[index]) + offset; + char *src = kmap_local_page(bprm->page[index]) + offset; sp -= PAGE_SIZE - offset; if (copy_to_user((void *) sp, src, PAGE_SIZE - offset) != 0) ret = -EFAULT; - kunmap(bprm->page[index]); + kunmap_local(src); if (ret) goto out; } @@ -1683,13 +1683,13 @@ int remove_arg_zero(struct linux_binprm *bprm) ret = -EFAULT; goto out; } - kaddr = kmap_atomic(page); + kaddr = kmap_local_page(page); for (; offset < PAGE_SIZE && kaddr[offset]; offset++, bprm->p++) ; - kunmap_atomic(kaddr); + kunmap_local(kaddr); put_arg_page(page); } while (offset == PAGE_SIZE); -- 2.7.4