arm/mm: Convert to using lock_mm_and_find_vma()
authorBen Hutchings <ben@decadent.org.uk>
Thu, 22 Jun 2023 19:24:30 +0000 (21:24 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 1 Jul 2023 11:16:25 +0000 (13:16 +0200)
commit 8b35ca3e45e35a26a21427f35d4093606e93ad0a upstream.

arm has an additional check for address < FIRST_USER_ADDRESS before
expanding the stack.  Since FIRST_USER_ADDRESS is defined everywhere
(generally as 0), move that check to the generic expand_downwards().

Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Samuel Mendoza-Jonas <samjonas@amazon.com>
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/arm/Kconfig
arch/arm/mm/fault.c
mm/mmap.c

index a08c9d0..0202e48 100644 (file)
@@ -122,6 +122,7 @@ config ARM
        select HAVE_UID16
        select HAVE_VIRT_CPU_ACCOUNTING_GEN
        select IRQ_FORCED_THREADING
+       select LOCK_MM_AND_FIND_VMA
        select MODULES_USE_ELF_REL
        select NEED_DMA_MAP_STATE
        select OF_EARLY_FLATTREE if OF
index de988cb..b0db853 100644 (file)
@@ -231,37 +231,11 @@ static inline bool is_permission_fault(unsigned int fsr)
        return false;
 }
 
-static vm_fault_t __kprobes
-__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int flags,
-               unsigned long vma_flags, struct pt_regs *regs)
-{
-       struct vm_area_struct *vma = find_vma(mm, addr);
-       if (unlikely(!vma))
-               return VM_FAULT_BADMAP;
-
-       if (unlikely(vma->vm_start > addr)) {
-               if (!(vma->vm_flags & VM_GROWSDOWN))
-                       return VM_FAULT_BADMAP;
-               if (addr < FIRST_USER_ADDRESS)
-                       return VM_FAULT_BADMAP;
-               if (expand_stack(vma, addr))
-                       return VM_FAULT_BADMAP;
-       }
-
-       /*
-        * ok, we have a good vm_area for this memory access, check the
-        * permissions on the VMA allow for the fault which occurred.
-        */
-       if (!(vma->vm_flags & vma_flags))
-               return VM_FAULT_BADACCESS;
-
-       return handle_mm_fault(vma, addr & PAGE_MASK, flags, regs);
-}
-
 static int __kprobes
 do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 {
        struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma;
        int sig, code;
        vm_fault_t fault;
        unsigned int flags = FAULT_FLAG_DEFAULT;
@@ -300,31 +274,21 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 
        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
 
-       /*
-        * As per x86, we may deadlock here.  However, since the kernel only
-        * validly references user space from well defined areas of the code,
-        * we can bug out early if this is from code which shouldn't.
-        */
-       if (!mmap_read_trylock(mm)) {
-               if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc))
-                       goto no_context;
 retry:
-               mmap_read_lock(mm);
-       } else {
-               /*
-                * The above down_read_trylock() might have succeeded in
-                * which case, we'll have missed the might_sleep() from
-                * down_read()
-                */
-               might_sleep();
-#ifdef CONFIG_DEBUG_VM
-               if (!user_mode(regs) &&
-                   !search_exception_tables(regs->ARM_pc))
-                       goto no_context;
-#endif
+       vma = lock_mm_and_find_vma(mm, addr, regs);
+       if (unlikely(!vma)) {
+               fault = VM_FAULT_BADMAP;
+               goto bad_area;
        }
 
-       fault = __do_page_fault(mm, addr, flags, vm_flags, regs);
+       /*
+        * ok, we have a good vm_area for this memory access, check the
+        * permissions on the VMA allow for the fault which occurred.
+        */
+       if (!(vma->vm_flags & vm_flags))
+               fault = VM_FAULT_BADACCESS;
+       else
+               fault = handle_mm_fault(vma, addr & PAGE_MASK, flags, regs);
 
        /* If we need to retry but a fatal signal is pending, handle the
         * signal first. We do not need to release the mmap_lock because
@@ -355,6 +319,7 @@ retry:
        if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
                return 0;
 
+bad_area:
        /*
         * If we are in kernel mode at this point, we
         * have no context to handle this fault with.
index f031fa2..fc7d09d 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2045,7 +2045,7 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address)
        int error = 0;
 
        address &= PAGE_MASK;
-       if (address < mmap_min_addr)
+       if (address < mmap_min_addr || address < FIRST_USER_ADDRESS)
                return -EPERM;
 
        /* Enforce stack_guard_gap */