mm/fault: convert remaining simple cases to lock_mm_and_find_vma()
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 24 Jun 2023 17:55:38 +0000 (10:55 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 1 Jul 2023 11:16:25 +0000 (13:16 +0200)
commit a050ba1e7422f2cc60ff8bfde3f96d34d00cb585 upstream.

This does the simple pattern conversion of alpha, arc, csky, hexagon,
loongarch, nios2, sh, sparc32, and xtensa to the lock_mm_and_find_vma()
helper.  They all have the regular fault handling pattern without odd
special cases.

The remaining architectures all have something that keeps us from a
straightforward conversion: ia64 and parisc have stacks that can grow
both up as well as down (and ia64 has special address region checks).

And m68k, microblaze, openrisc, sparc64, and um end up having extra
rules about only expanding the stack down a limited amount below the
user space stack pointer.  That is something that x86 used to do too
(long long ago), and it probably could just be skipped, but it still
makes the conversion less than trivial.

Note that this conversion was done manually and with the exception of
alpha without any build testing, because I have a fairly limited cross-
building environment.  The cases are all simple, and I went through the
changes several times, but...

Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Samuel Mendoza-Jonas <samjonas@amazon.com>
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
18 files changed:
arch/alpha/Kconfig
arch/alpha/mm/fault.c
arch/arc/Kconfig
arch/arc/mm/fault.c
arch/csky/Kconfig
arch/csky/mm/fault.c
arch/hexagon/Kconfig
arch/hexagon/mm/vm_fault.c
arch/loongarch/Kconfig
arch/loongarch/mm/fault.c
arch/nios2/Kconfig
arch/nios2/mm/fault.c
arch/sh/Kconfig
arch/sh/mm/fault.c
arch/sparc/Kconfig
arch/sparc/mm/fault_32.c
arch/xtensa/Kconfig
arch/xtensa/mm/fault.c

index 97fce73..d95d82a 100644 (file)
@@ -28,6 +28,7 @@ config ALPHA
        select GENERIC_SMP_IDLE_THREAD
        select HAVE_ARCH_AUDITSYSCALL
        select HAVE_MOD_ARCH_SPECIFIC
+       select LOCK_MM_AND_FIND_VMA
        select MODULES_USE_ELF_RELA
        select ODD_RT_SIGACTION
        select OLD_SIGSUSPEND
index ef427a6..2b49aa9 100644 (file)
@@ -119,20 +119,12 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
                flags |= FAULT_FLAG_USER;
        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 retry:
-       mmap_read_lock(mm);
-       vma = find_vma(mm, address);
+       vma = lock_mm_and_find_vma(mm, address, regs);
        if (!vma)
-               goto bad_area;
-       if (vma->vm_start <= address)
-               goto good_area;
-       if (!(vma->vm_flags & VM_GROWSDOWN))
-               goto bad_area;
-       if (expand_stack(vma, address))
-               goto bad_area;
+               goto bad_area_nosemaphore;
 
        /* Ok, we have a good vm_area for this memory access, so
           we can handle it.  */
- good_area:
        si_code = SEGV_ACCERR;
        if (cause < 0) {
                if (!(vma->vm_flags & VM_EXEC))
@@ -189,6 +181,7 @@ retry:
  bad_area:
        mmap_read_unlock(mm);
 
+ bad_area_nosemaphore:
        if (user_mode(regs))
                goto do_sigsegv;
 
index d9a13cc..cb1074f 100644 (file)
@@ -41,6 +41,7 @@ config ARC
        select HAVE_PERF_EVENTS
        select HAVE_SYSCALL_TRACEPOINTS
        select IRQ_DOMAIN
+       select LOCK_MM_AND_FIND_VMA
        select MODULES_USE_ELF_RELA
        select OF
        select OF_EARLY_FLATTREE
index 5ca59a4..f59e722 100644 (file)
@@ -113,15 +113,9 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
 
        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 retry:
-       mmap_read_lock(mm);
-
-       vma = find_vma(mm, address);
+       vma = lock_mm_and_find_vma(mm, address, regs);
        if (!vma)
-               goto bad_area;
-       if (unlikely(address < vma->vm_start)) {
-               if (!(vma->vm_flags & VM_GROWSDOWN) || expand_stack(vma, address))
-                       goto bad_area;
-       }
+               goto bad_area_nosemaphore;
 
        /*
         * vm_area is good, now check permissions for this memory access
@@ -161,6 +155,7 @@ retry:
 bad_area:
        mmap_read_unlock(mm);
 
+bad_area_nosemaphore:
        /*
         * Major/minor page fault accounting
         * (in case of retry we only land here once)
index adee6ab..7420091 100644 (file)
@@ -96,6 +96,7 @@ config CSKY
        select HAVE_RSEQ
        select HAVE_STACKPROTECTOR
        select HAVE_SYSCALL_TRACEPOINTS
+       select LOCK_MM_AND_FIND_VMA
        select MAY_HAVE_SPARSE_IRQ
        select MODULES_USE_ELF_RELA if MODULES
        select OF
index e15f736..ae9781b 100644 (file)
@@ -97,13 +97,12 @@ static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_f
        BUG();
 }
 
-static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
+static inline void bad_area_nosemaphore(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
 {
        /*
         * Something tried to access memory that isn't in our memory map.
         * Fix it, but check if it's kernel or user first.
         */
-       mmap_read_unlock(mm);
        /* User mode accesses just cause a SIGSEGV */
        if (user_mode(regs)) {
                do_trap(regs, SIGSEGV, code, addr);
@@ -238,20 +237,9 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
        if (is_write(regs))
                flags |= FAULT_FLAG_WRITE;
 retry:
-       mmap_read_lock(mm);
-       vma = find_vma(mm, addr);
+       vma = lock_mm_and_find_vma(mm, address, regs);
        if (unlikely(!vma)) {
-               bad_area(regs, mm, code, addr);
-               return;
-       }
-       if (likely(vma->vm_start <= addr))
-               goto good_area;
-       if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
-               bad_area(regs, mm, code, addr);
-               return;
-       }
-       if (unlikely(expand_stack(vma, addr))) {
-               bad_area(regs, mm, code, addr);
+               bad_area_nosemaphore(regs, mm, code, addr);
                return;
        }
 
@@ -259,11 +247,11 @@ retry:
         * Ok, we have a good vm_area for this memory access, so
         * we can handle it.
         */
-good_area:
        code = SEGV_ACCERR;
 
        if (unlikely(access_error(regs, vma))) {
-               bad_area(regs, mm, code, addr);
+               mmap_read_unlock(mm);
+               bad_area_nosemaphore(regs, mm, code, addr);
                return;
        }
 
index 54eadf2..6726f49 100644 (file)
@@ -28,6 +28,7 @@ config HEXAGON
        select GENERIC_SMP_IDLE_THREAD
        select STACKTRACE_SUPPORT
        select GENERIC_CLOCKEVENTS_BROADCAST
+       select LOCK_MM_AND_FIND_VMA
        select MODULES_USE_ELF_RELA
        select GENERIC_CPU_DEVICES
        select ARCH_WANT_LD_ORPHAN_WARN
index f73c7cb..583b087 100644 (file)
@@ -57,21 +57,10 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
 
        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 retry:
-       mmap_read_lock(mm);
-       vma = find_vma(mm, address);
-       if (!vma)
-               goto bad_area;
+       vma = lock_mm_and_find_vma(mm, address, regs);
+       if (unlikely(!vma))
+               goto bad_area_nosemaphore;
 
-       if (vma->vm_start <= address)
-               goto good_area;
-
-       if (!(vma->vm_flags & VM_GROWSDOWN))
-               goto bad_area;
-
-       if (expand_stack(vma, address))
-               goto bad_area;
-
-good_area:
        /* Address space is OK.  Now check access rights. */
        si_code = SEGV_ACCERR;
 
@@ -140,6 +129,7 @@ good_area:
 bad_area:
        mmap_read_unlock(mm);
 
+bad_area_nosemaphore:
        if (user_mode(regs)) {
                force_sig_fault(SIGSEGV, si_code, (void __user *)address);
                return;
index 903096b..51d738a 100644 (file)
@@ -107,6 +107,7 @@ config LOONGARCH
        select HAVE_VIRT_CPU_ACCOUNTING_GEN if !SMP
        select IRQ_FORCED_THREADING
        select IRQ_LOONGARCH_CPU
+       select LOCK_MM_AND_FIND_VMA
        select MMU_GATHER_MERGE_VMAS if MMU
        select MODULES_USE_ELF_RELA if MODULES
        select NEED_PER_CPU_EMBED_FIRST_CHUNK
index 1ccd536..b829ab9 100644 (file)
@@ -166,22 +166,18 @@ static void __kprobes __do_page_fault(struct pt_regs *regs,
 
        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 retry:
-       mmap_read_lock(mm);
-       vma = find_vma(mm, address);
-       if (!vma)
-               goto bad_area;
-       if (vma->vm_start <= address)
-               goto good_area;
-       if (!(vma->vm_flags & VM_GROWSDOWN))
-               goto bad_area;
-       if (!expand_stack(vma, address))
-               goto good_area;
+       vma = lock_mm_and_find_vma(mm, address, regs);
+       if (unlikely(!vma))
+               goto bad_area_nosemaphore;
+       goto good_area;
+
 /*
  * Something tried to access memory that isn't in our memory map..
  * Fix it, but check if it's kernel or user first..
  */
 bad_area:
        mmap_read_unlock(mm);
+bad_area_nosemaphore:
        do_sigsegv(regs, write, address, si_code);
        return;
 
index a582f72..1fb7886 100644 (file)
@@ -16,6 +16,7 @@ config NIOS2
        select HAVE_ARCH_TRACEHOOK
        select HAVE_ARCH_KGDB
        select IRQ_DOMAIN
+       select LOCK_MM_AND_FIND_VMA
        select MODULES_USE_ELF_RELA
        select OF
        select OF_EARLY_FLATTREE
index edaca0a..71939fb 100644 (file)
@@ -86,27 +86,14 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause,
 
        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 
-       if (!mmap_read_trylock(mm)) {
-               if (!user_mode(regs) && !search_exception_tables(regs->ea))
-                       goto bad_area_nosemaphore;
 retry:
-               mmap_read_lock(mm);
-       }
-
-       vma = find_vma(mm, address);
+       vma = lock_mm_and_find_vma(mm, address, regs);
        if (!vma)
-               goto bad_area;
-       if (vma->vm_start <= address)
-               goto good_area;
-       if (!(vma->vm_flags & VM_GROWSDOWN))
-               goto bad_area;
-       if (expand_stack(vma, address))
-               goto bad_area;
+               goto bad_area_nosemaphore;
 /*
  * Ok, we have a good vm_area for this memory access, so
  * we can handle it..
  */
-good_area:
        code = SEGV_ACCERR;
 
        switch (cause) {
index 5f220e9..8e4d1f7 100644 (file)
@@ -56,6 +56,7 @@ config SUPERH
        select HAVE_STACKPROTECTOR
        select HAVE_SYSCALL_TRACEPOINTS
        select IRQ_FORCED_THREADING
+       select LOCK_MM_AND_FIND_VMA
        select MODULES_USE_ELF_RELA
        select NEED_SG_DMA_LENGTH
        select NO_DMA if !MMU && !DMA_COHERENT
index acd2f5e..06e6b49 100644 (file)
@@ -439,21 +439,9 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
        }
 
 retry:
-       mmap_read_lock(mm);
-
-       vma = find_vma(mm, address);
+       vma = lock_mm_and_find_vma(mm, address, regs);
        if (unlikely(!vma)) {
-               bad_area(regs, error_code, address);
-               return;
-       }
-       if (likely(vma->vm_start <= address))
-               goto good_area;
-       if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
-               bad_area(regs, error_code, address);
-               return;
-       }
-       if (unlikely(expand_stack(vma, address))) {
-               bad_area(regs, error_code, address);
+               bad_area_nosemaphore(regs, error_code, address);
                return;
        }
 
@@ -461,7 +449,6 @@ retry:
         * Ok, we have a good vm_area for this memory access, so
         * we can handle it..
         */
-good_area:
        if (unlikely(access_error(error_code, vma))) {
                bad_area_access_error(regs, error_code, address);
                return;
index 84437a4..dbb1760 100644 (file)
@@ -56,6 +56,7 @@ config SPARC32
        select DMA_DIRECT_REMAP
        select GENERIC_ATOMIC64
        select HAVE_UID16
+       select LOCK_MM_AND_FIND_VMA
        select OLD_SIGACTION
        select ZONE_DMA
 
index 91259f2..aef2aeb 100644 (file)
@@ -143,28 +143,19 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
        if (pagefault_disabled() || !mm)
                goto no_context;
 
+       if (!from_user && address >= PAGE_OFFSET)
+               goto no_context;
+
        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 
 retry:
-       mmap_read_lock(mm);
-
-       if (!from_user && address >= PAGE_OFFSET)
-               goto bad_area;
-
-       vma = find_vma(mm, address);
+       vma = lock_mm_and_find_vma(mm, address, regs);
        if (!vma)
-               goto bad_area;
-       if (vma->vm_start <= address)
-               goto good_area;
-       if (!(vma->vm_flags & VM_GROWSDOWN))
-               goto bad_area;
-       if (expand_stack(vma, address))
-               goto bad_area;
+               goto bad_area_nosemaphore;
        /*
         * Ok, we have a good vm_area for this memory access, so
         * we can handle it..
         */
-good_area:
        code = SEGV_ACCERR;
        if (write) {
                if (!(vma->vm_flags & VM_WRITE))
@@ -318,17 +309,9 @@ static void force_user_fault(unsigned long address, int write)
 
        code = SEGV_MAPERR;
 
-       mmap_read_lock(mm);
-       vma = find_vma(mm, address);
+       vma = lock_mm_and_find_vma(mm, address, regs);
        if (!vma)
-               goto bad_area;
-       if (vma->vm_start <= address)
-               goto good_area;
-       if (!(vma->vm_flags & VM_GROWSDOWN))
-               goto bad_area;
-       if (expand_stack(vma, address))
-               goto bad_area;
-good_area:
+               goto bad_area_nosemaphore;
        code = SEGV_ACCERR;
        if (write) {
                if (!(vma->vm_flags & VM_WRITE))
@@ -347,6 +330,7 @@ good_area:
        return;
 bad_area:
        mmap_read_unlock(mm);
+bad_area_nosemaphore:
        __do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
        return;
 
index bcb0c5d..6d3c925 100644 (file)
@@ -49,6 +49,7 @@ config XTENSA
        select HAVE_SYSCALL_TRACEPOINTS
        select HAVE_VIRT_CPU_ACCOUNTING_GEN
        select IRQ_DOMAIN
+       select LOCK_MM_AND_FIND_VMA
        select MODULES_USE_ELF_RELA
        select PERF_USE_VMALLOC
        select TRACE_IRQFLAGS_SUPPORT
index 8c781b0..d89b193 100644 (file)
@@ -130,23 +130,14 @@ void do_page_fault(struct pt_regs *regs)
        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 
 retry:
-       mmap_read_lock(mm);
-       vma = find_vma(mm, address);
-
+       vma = lock_mm_and_find_vma(mm, address, regs);
        if (!vma)
-               goto bad_area;
-       if (vma->vm_start <= address)
-               goto good_area;
-       if (!(vma->vm_flags & VM_GROWSDOWN))
-               goto bad_area;
-       if (expand_stack(vma, address))
-               goto bad_area;
+               goto bad_area_nosemaphore;
 
        /* Ok, we have a good vm_area for this memory access, so
         * we can handle it..
         */
 
-good_area:
        code = SEGV_ACCERR;
 
        if (is_write) {
@@ -205,6 +196,7 @@ good_area:
         */
 bad_area:
        mmap_read_unlock(mm);
+bad_area_nosemaphore:
        if (user_mode(regs)) {
                current->thread.bad_vaddr = address;
                current->thread.error_code = is_write;