mm/fault: convert remaining simple cases to lock_mm_and_find_vma()
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 24 Jun 2023 17:55:38 +0000 (10:55 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 1 Jul 2023 11:16:25 +0000 (13:16 +0200)
commit a050ba1e7422f2cc60ff8bfde3f96d34d00cb585 upstream.

This does the simple pattern conversion of alpha, arc, csky, hexagon,
loongarch, nios2, sh, sparc32, and xtensa to the lock_mm_and_find_vma()
helper.  They all have the regular fault handling pattern without odd
special cases.

The remaining architectures all have something that keeps us from a
straightforward conversion: ia64 and parisc have stacks that can grow
both up as well as down (and ia64 has special address region checks).

And m68k, microblaze, openrisc, sparc64, and um end up having extra
rules about only expanding the stack down a limited amount below the
user space stack pointer.  That is something that x86 used to do too
(long long ago), and it probably could just be skipped, but it still
makes the conversion less than trivial.

Note that this conversion was done manually and with the exception of
alpha without any build testing, because I have a fairly limited cross-
building environment.  The cases are all simple, and I went through the
changes several times, but...

Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Samuel Mendoza-Jonas <samjonas@amazon.com>
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
18 files changed:
arch/alpha/Kconfig
arch/alpha/mm/fault.c
arch/arc/Kconfig
arch/arc/mm/fault.c
arch/csky/Kconfig
arch/csky/mm/fault.c
arch/hexagon/Kconfig
arch/hexagon/mm/vm_fault.c
arch/loongarch/Kconfig
arch/loongarch/mm/fault.c
arch/nios2/Kconfig
arch/nios2/mm/fault.c
arch/sh/Kconfig
arch/sh/mm/fault.c
arch/sparc/Kconfig
arch/sparc/mm/fault_32.c
arch/xtensa/Kconfig
arch/xtensa/mm/fault.c

index 97fce7386b0028d8d5b86287c41e9d0760362e65..d95d82abdf295710babbf4f4092f2523af232214 100644 (file)
@@ -28,6 +28,7 @@ config ALPHA
        select GENERIC_SMP_IDLE_THREAD
        select HAVE_ARCH_AUDITSYSCALL
        select HAVE_MOD_ARCH_SPECIFIC
+       select LOCK_MM_AND_FIND_VMA
        select MODULES_USE_ELF_RELA
        select ODD_RT_SIGACTION
        select OLD_SIGSUSPEND
index ef427a6bdd1ab91be0445b0261d38bb49bb2be93..2b49aa94e4de3ab3f34f40d58295ed9db715eb4f 100644 (file)
@@ -119,20 +119,12 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
                flags |= FAULT_FLAG_USER;
        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 retry:
-       mmap_read_lock(mm);
-       vma = find_vma(mm, address);
+       vma = lock_mm_and_find_vma(mm, address, regs);
        if (!vma)
-               goto bad_area;
-       if (vma->vm_start <= address)
-               goto good_area;
-       if (!(vma->vm_flags & VM_GROWSDOWN))
-               goto bad_area;
-       if (expand_stack(vma, address))
-               goto bad_area;
+               goto bad_area_nosemaphore;
 
        /* Ok, we have a good vm_area for this memory access, so
           we can handle it.  */
- good_area:
        si_code = SEGV_ACCERR;
        if (cause < 0) {
                if (!(vma->vm_flags & VM_EXEC))
@@ -189,6 +181,7 @@ retry:
  bad_area:
        mmap_read_unlock(mm);
 
+ bad_area_nosemaphore:
        if (user_mode(regs))
                goto do_sigsegv;
 
index d9a13ccf89a3aa5d6a4fb5dc8158ba2e4dba4671..cb1074f74c3f1e6417aafcf84aa6300c70126b8a 100644 (file)
@@ -41,6 +41,7 @@ config ARC
        select HAVE_PERF_EVENTS
        select HAVE_SYSCALL_TRACEPOINTS
        select IRQ_DOMAIN
+       select LOCK_MM_AND_FIND_VMA
        select MODULES_USE_ELF_RELA
        select OF
        select OF_EARLY_FLATTREE
index 5ca59a482632a880f0a40d31c44d9b1159581b85..f59e722d147f91972ac8cf42b1f77d1a12cca80b 100644 (file)
@@ -113,15 +113,9 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
 
        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 retry:
-       mmap_read_lock(mm);
-
-       vma = find_vma(mm, address);
+       vma = lock_mm_and_find_vma(mm, address, regs);
        if (!vma)
-               goto bad_area;
-       if (unlikely(address < vma->vm_start)) {
-               if (!(vma->vm_flags & VM_GROWSDOWN) || expand_stack(vma, address))
-                       goto bad_area;
-       }
+               goto bad_area_nosemaphore;
 
        /*
         * vm_area is good, now check permissions for this memory access
@@ -161,6 +155,7 @@ retry:
 bad_area:
        mmap_read_unlock(mm);
 
+bad_area_nosemaphore:
        /*
         * Major/minor page fault accounting
         * (in case of retry we only land here once)
index adee6ab36862e6a1a2a92ff68bfd93f57d0fd14c..742009123fd5a1b81c9e4990c1dc93c312438761 100644 (file)
@@ -96,6 +96,7 @@ config CSKY
        select HAVE_RSEQ
        select HAVE_STACKPROTECTOR
        select HAVE_SYSCALL_TRACEPOINTS
+       select LOCK_MM_AND_FIND_VMA
        select MAY_HAVE_SPARSE_IRQ
        select MODULES_USE_ELF_RELA if MODULES
        select OF
index e15f736cca4b4a43fb14989e099051fbdbf0a082..ae9781b7d92ea5e65554d42d6d9d6ba2ac5838d6 100644 (file)
@@ -97,13 +97,12 @@ static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_f
        BUG();
 }
 
-static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
+static inline void bad_area_nosemaphore(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
 {
        /*
         * Something tried to access memory that isn't in our memory map.
         * Fix it, but check if it's kernel or user first.
         */
-       mmap_read_unlock(mm);
        /* User mode accesses just cause a SIGSEGV */
        if (user_mode(regs)) {
                do_trap(regs, SIGSEGV, code, addr);
@@ -238,20 +237,9 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
        if (is_write(regs))
                flags |= FAULT_FLAG_WRITE;
 retry:
-       mmap_read_lock(mm);
-       vma = find_vma(mm, addr);
+       vma = lock_mm_and_find_vma(mm, address, regs);
        if (unlikely(!vma)) {
-               bad_area(regs, mm, code, addr);
-               return;
-       }
-       if (likely(vma->vm_start <= addr))
-               goto good_area;
-       if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
-               bad_area(regs, mm, code, addr);
-               return;
-       }
-       if (unlikely(expand_stack(vma, addr))) {
-               bad_area(regs, mm, code, addr);
+               bad_area_nosemaphore(regs, mm, code, addr);
                return;
        }
 
@@ -259,11 +247,11 @@ retry:
         * Ok, we have a good vm_area for this memory access, so
         * we can handle it.
         */
-good_area:
        code = SEGV_ACCERR;
 
        if (unlikely(access_error(regs, vma))) {
-               bad_area(regs, mm, code, addr);
+               mmap_read_unlock(mm);
+               bad_area_nosemaphore(regs, mm, code, addr);
                return;
        }
 
index 54eadf26517868f8cd575a1d5b777be700e35354..6726f4941015f353624a81838f6368c85508a0a7 100644 (file)
@@ -28,6 +28,7 @@ config HEXAGON
        select GENERIC_SMP_IDLE_THREAD
        select STACKTRACE_SUPPORT
        select GENERIC_CLOCKEVENTS_BROADCAST
+       select LOCK_MM_AND_FIND_VMA
        select MODULES_USE_ELF_RELA
        select GENERIC_CPU_DEVICES
        select ARCH_WANT_LD_ORPHAN_WARN
index f73c7cbfe32603c425269f80af6e767bd212ad68..583b087271667d49d0beecd75f8cf3e94be53062 100644 (file)
@@ -57,21 +57,10 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
 
        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 retry:
-       mmap_read_lock(mm);
-       vma = find_vma(mm, address);
-       if (!vma)
-               goto bad_area;
+       vma = lock_mm_and_find_vma(mm, address, regs);
+       if (unlikely(!vma))
+               goto bad_area_nosemaphore;
 
-       if (vma->vm_start <= address)
-               goto good_area;
-
-       if (!(vma->vm_flags & VM_GROWSDOWN))
-               goto bad_area;
-
-       if (expand_stack(vma, address))
-               goto bad_area;
-
-good_area:
        /* Address space is OK.  Now check access rights. */
        si_code = SEGV_ACCERR;
 
@@ -140,6 +129,7 @@ good_area:
 bad_area:
        mmap_read_unlock(mm);
 
+bad_area_nosemaphore:
        if (user_mode(regs)) {
                force_sig_fault(SIGSEGV, si_code, (void __user *)address);
                return;
index 903096bd87f8829273a0f7bba4134d3f4c468825..51d738ac12e55c8a8dfa16276b3ef6314bc42fbe 100644 (file)
@@ -107,6 +107,7 @@ config LOONGARCH
        select HAVE_VIRT_CPU_ACCOUNTING_GEN if !SMP
        select IRQ_FORCED_THREADING
        select IRQ_LOONGARCH_CPU
+       select LOCK_MM_AND_FIND_VMA
        select MMU_GATHER_MERGE_VMAS if MMU
        select MODULES_USE_ELF_RELA if MODULES
        select NEED_PER_CPU_EMBED_FIRST_CHUNK
index 1ccd53655cab097f02ed09a5c1bd566de38d01ec..b829ab911a17b137d01f2754fdff1a35262b5e92 100644 (file)
@@ -166,22 +166,18 @@ static void __kprobes __do_page_fault(struct pt_regs *regs,
 
        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 retry:
-       mmap_read_lock(mm);
-       vma = find_vma(mm, address);
-       if (!vma)
-               goto bad_area;
-       if (vma->vm_start <= address)
-               goto good_area;
-       if (!(vma->vm_flags & VM_GROWSDOWN))
-               goto bad_area;
-       if (!expand_stack(vma, address))
-               goto good_area;
+       vma = lock_mm_and_find_vma(mm, address, regs);
+       if (unlikely(!vma))
+               goto bad_area_nosemaphore;
+       goto good_area;
+
 /*
  * Something tried to access memory that isn't in our memory map..
  * Fix it, but check if it's kernel or user first..
  */
 bad_area:
        mmap_read_unlock(mm);
+bad_area_nosemaphore:
        do_sigsegv(regs, write, address, si_code);
        return;
 
index a582f72104f39229ca524cc2aa5f645f3bc1d5f3..1fb78865a459337b3e62443c06f5f9b1229ff21b 100644 (file)
@@ -16,6 +16,7 @@ config NIOS2
        select HAVE_ARCH_TRACEHOOK
        select HAVE_ARCH_KGDB
        select IRQ_DOMAIN
+       select LOCK_MM_AND_FIND_VMA
        select MODULES_USE_ELF_RELA
        select OF
        select OF_EARLY_FLATTREE
index edaca0a6c1c1ca5bc467237e4235bdcd9dba1748..71939fb28c2e71336802d667d3bbec731fa71da3 100644 (file)
@@ -86,27 +86,14 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause,
 
        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 
-       if (!mmap_read_trylock(mm)) {
-               if (!user_mode(regs) && !search_exception_tables(regs->ea))
-                       goto bad_area_nosemaphore;
 retry:
-               mmap_read_lock(mm);
-       }
-
-       vma = find_vma(mm, address);
+       vma = lock_mm_and_find_vma(mm, address, regs);
        if (!vma)
-               goto bad_area;
-       if (vma->vm_start <= address)
-               goto good_area;
-       if (!(vma->vm_flags & VM_GROWSDOWN))
-               goto bad_area;
-       if (expand_stack(vma, address))
-               goto bad_area;
+               goto bad_area_nosemaphore;
 /*
  * Ok, we have a good vm_area for this memory access, so
  * we can handle it..
  */
-good_area:
        code = SEGV_ACCERR;
 
        switch (cause) {
index 5f220e903e5abad8342e31125fbfd2a2f5c79811..8e4d1f757bcc937182527f33bc065fcad7607d2e 100644 (file)
@@ -56,6 +56,7 @@ config SUPERH
        select HAVE_STACKPROTECTOR
        select HAVE_SYSCALL_TRACEPOINTS
        select IRQ_FORCED_THREADING
+       select LOCK_MM_AND_FIND_VMA
        select MODULES_USE_ELF_RELA
        select NEED_SG_DMA_LENGTH
        select NO_DMA if !MMU && !DMA_COHERENT
index acd2f5e50bfcd08ccb71fc71a8f33a1c9cb84c06..06e6b49529245a58bce87aef5caaeb0e5051357d 100644 (file)
@@ -439,21 +439,9 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
        }
 
 retry:
-       mmap_read_lock(mm);
-
-       vma = find_vma(mm, address);
+       vma = lock_mm_and_find_vma(mm, address, regs);
        if (unlikely(!vma)) {
-               bad_area(regs, error_code, address);
-               return;
-       }
-       if (likely(vma->vm_start <= address))
-               goto good_area;
-       if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
-               bad_area(regs, error_code, address);
-               return;
-       }
-       if (unlikely(expand_stack(vma, address))) {
-               bad_area(regs, error_code, address);
+               bad_area_nosemaphore(regs, error_code, address);
                return;
        }
 
@@ -461,7 +449,6 @@ retry:
         * Ok, we have a good vm_area for this memory access, so
         * we can handle it..
         */
-good_area:
        if (unlikely(access_error(error_code, vma))) {
                bad_area_access_error(regs, error_code, address);
                return;
index 84437a4c65454ca0677c6d4271b55fa4a7ffbc3f..dbb1760cbe8c977e3647e7ab8aadd46c6689ab14 100644 (file)
@@ -56,6 +56,7 @@ config SPARC32
        select DMA_DIRECT_REMAP
        select GENERIC_ATOMIC64
        select HAVE_UID16
+       select LOCK_MM_AND_FIND_VMA
        select OLD_SIGACTION
        select ZONE_DMA
 
index 91259f291c54078541d59e39e89de7c7933c99ad..aef2aebe23799b9d5dc2866b5e045a7b7ba9532c 100644 (file)
@@ -143,28 +143,19 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
        if (pagefault_disabled() || !mm)
                goto no_context;
 
+       if (!from_user && address >= PAGE_OFFSET)
+               goto no_context;
+
        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 
 retry:
-       mmap_read_lock(mm);
-
-       if (!from_user && address >= PAGE_OFFSET)
-               goto bad_area;
-
-       vma = find_vma(mm, address);
+       vma = lock_mm_and_find_vma(mm, address, regs);
        if (!vma)
-               goto bad_area;
-       if (vma->vm_start <= address)
-               goto good_area;
-       if (!(vma->vm_flags & VM_GROWSDOWN))
-               goto bad_area;
-       if (expand_stack(vma, address))
-               goto bad_area;
+               goto bad_area_nosemaphore;
        /*
         * Ok, we have a good vm_area for this memory access, so
         * we can handle it..
         */
-good_area:
        code = SEGV_ACCERR;
        if (write) {
                if (!(vma->vm_flags & VM_WRITE))
@@ -318,17 +309,9 @@ static void force_user_fault(unsigned long address, int write)
 
        code = SEGV_MAPERR;
 
-       mmap_read_lock(mm);
-       vma = find_vma(mm, address);
+       vma = lock_mm_and_find_vma(mm, address, regs);
        if (!vma)
-               goto bad_area;
-       if (vma->vm_start <= address)
-               goto good_area;
-       if (!(vma->vm_flags & VM_GROWSDOWN))
-               goto bad_area;
-       if (expand_stack(vma, address))
-               goto bad_area;
-good_area:
+               goto bad_area_nosemaphore;
        code = SEGV_ACCERR;
        if (write) {
                if (!(vma->vm_flags & VM_WRITE))
@@ -347,6 +330,7 @@ good_area:
        return;
 bad_area:
        mmap_read_unlock(mm);
+bad_area_nosemaphore:
        __do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
        return;
 
index bcb0c5d2abc2fe78337eec71fab09d11d212898a..6d3c9257aa133fc0b44da472dd66d45c0d0025b6 100644 (file)
@@ -49,6 +49,7 @@ config XTENSA
        select HAVE_SYSCALL_TRACEPOINTS
        select HAVE_VIRT_CPU_ACCOUNTING_GEN
        select IRQ_DOMAIN
+       select LOCK_MM_AND_FIND_VMA
        select MODULES_USE_ELF_RELA
        select PERF_USE_VMALLOC
        select TRACE_IRQFLAGS_SUPPORT
index 8c781b05c0bdd9d55d62ff456c8150b94cf9d493..d89b193c779f12d34ce5e5a9f5e48e84116d6ba9 100644 (file)
@@ -130,23 +130,14 @@ void do_page_fault(struct pt_regs *regs)
        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 
 retry:
-       mmap_read_lock(mm);
-       vma = find_vma(mm, address);
-
+       vma = lock_mm_and_find_vma(mm, address, regs);
        if (!vma)
-               goto bad_area;
-       if (vma->vm_start <= address)
-               goto good_area;
-       if (!(vma->vm_flags & VM_GROWSDOWN))
-               goto bad_area;
-       if (expand_stack(vma, address))
-               goto bad_area;
+               goto bad_area_nosemaphore;
 
        /* Ok, we have a good vm_area for this memory access, so
         * we can handle it..
         */
 
-good_area:
        code = SEGV_ACCERR;
 
        if (is_write) {
@@ -205,6 +196,7 @@ good_area:
         */
 bad_area:
        mmap_read_unlock(mm);
+bad_area_nosemaphore:
        if (user_mode(regs)) {
                current->thread.bad_vaddr = address;
                current->thread.error_code = is_write;