powerpc/mm: Convert to using lock_mm_and_find_vma()
authorMichael Ellerman <mpe@ellerman.id.au>
Fri, 16 Jun 2023 05:51:29 +0000 (15:51 +1000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 1 Jul 2023 11:16:24 +0000 (13:16 +0200)
commit e6fe228c4ffafdfc970cf6d46883a1f481baf7ea upstream.

Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Samuel Mendoza-Jonas <samjonas@amazon.com>
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/powerpc/Kconfig
arch/powerpc/mm/fault.c

index 2b11416..6050e6e 100644 (file)
@@ -257,6 +257,7 @@ config PPC
        select IRQ_DOMAIN
        select IRQ_FORCED_THREADING
        select KASAN_VMALLOC                    if KASAN && MODULES
+       select LOCK_MM_AND_FIND_VMA
        select MMU_GATHER_PAGE_SIZE
        select MMU_GATHER_RCU_TABLE_FREE
        select MMU_GATHER_MERGE_VMAS
index af46aa8..644e4ec 100644 (file)
@@ -84,11 +84,6 @@ static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code)
        return __bad_area_nosemaphore(regs, address, si_code);
 }
 
-static noinline int bad_area(struct pt_regs *regs, unsigned long address)
-{
-       return __bad_area(regs, address, SEGV_MAPERR);
-}
-
 static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address,
                                    struct vm_area_struct *vma)
 {
@@ -481,40 +476,12 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address,
         * we will deadlock attempting to validate the fault against the
         * address space.  Luckily the kernel only validly references user
         * space from well defined areas of code, which are listed in the
-        * exceptions table.
-        *
-        * As the vast majority of faults will be valid we will only perform
-        * the source reference check when there is a possibility of a deadlock.
-        * Attempt to lock the address space, if we cannot we then validate the
-        * source.  If this is invalid we can skip the address space check,
-        * thus avoiding the deadlock.
+        * exceptions table. lock_mm_and_find_vma() handles that logic.
         */
-       if (unlikely(!mmap_read_trylock(mm))) {
-               if (!is_user && !search_exception_tables(regs->nip))
-                       return bad_area_nosemaphore(regs, address);
-
 retry:
-               mmap_read_lock(mm);
-       } else {
-               /*
-                * The above down_read_trylock() might have succeeded in
-                * which case we'll have missed the might_sleep() from
-                * down_read():
-                */
-               might_sleep();
-       }
-
-       vma = find_vma(mm, address);
+       vma = lock_mm_and_find_vma(mm, address, regs);
        if (unlikely(!vma))
-               return bad_area(regs, address);
-
-       if (unlikely(vma->vm_start > address)) {
-               if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
-                       return bad_area(regs, address);
-
-               if (unlikely(expand_stack(vma, address)))
-                       return bad_area(regs, address);
-       }
+               return bad_area_nosemaphore(regs, address);
 
        if (unlikely(access_pkey_error(is_write, is_exec,
                                       (error_code & DSISR_KEYFAULT), vma)))