mm: run the fault-around code under the VMA lock
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 24 Jul 2023 18:54:08 +0000 (19:54 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 18 Aug 2023 17:12:52 +0000 (10:12 -0700)
The map_pages fs method should be safe to run under the VMA lock instead
of the mmap lock.  This should have a measurable reduction in contention
on the mmap lock.

Link: https://lkml.kernel.org/r/20230724185410.1124082-9-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Suren Baghdasaryan <surenb@google.com>
Cc: Arjun Roy <arjunroy@google.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Punit Agrawal <punit.agrawal@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memory.c

index 23a20b7..52235aa 100644 (file)
@@ -4533,11 +4533,6 @@ static vm_fault_t do_read_fault(struct vm_fault *vmf)
        vm_fault_t ret = 0;
        struct folio *folio;
 
-       if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
-               vma_end_read(vmf->vma);
-               return VM_FAULT_RETRY;
-       }
-
        /*
         * Let's call ->map_pages() first and use ->fault() as fallback
         * if page by the offset is not ready to be mapped (cold cache or
@@ -4549,6 +4544,11 @@ static vm_fault_t do_read_fault(struct vm_fault *vmf)
                        return ret;
        }
 
+       if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
+               vma_end_read(vmf->vma);
+               return VM_FAULT_RETRY;
+       }
+
        ret = __do_fault(vmf);
        if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
                return ret;