mm: handle swap and NUMA PTE faults under the VMA lock
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 24 Jul 2023 18:54:09 +0000 (19:54 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 18 Aug 2023 17:12:52 +0000 (10:12 -0700)
Move the FAULT_FLAG_VMA_LOCK check down in handle_pte_fault().  This is
probably not a huge win in its own right, but is a nicely separable bit
from the next patch.

Link: https://lkml.kernel.org/r/20230724185410.1124082-10-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Arjun Roy <arjunroy@google.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Punit Agrawal <punit.agrawal@bytedance.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memory.c

index 52235aa..c122adc 100644 (file)
@@ -4979,18 +4979,18 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
        if (!vmf->pte)
                return do_pte_missing(vmf);
 
-       if ((vmf->flags & FAULT_FLAG_VMA_LOCK) && !vma_is_anonymous(vmf->vma)) {
-               pte_unmap(vmf->pte);
-               vma_end_read(vmf->vma);
-               return VM_FAULT_RETRY;
-       }
-
        if (!pte_present(vmf->orig_pte))
                return do_swap_page(vmf);
 
        if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
                return do_numa_page(vmf);
 
+       if ((vmf->flags & FAULT_FLAG_VMA_LOCK) && !vma_is_anonymous(vmf->vma)) {
+               pte_unmap(vmf->pte);
+               vma_end_read(vmf->vma);
+               return VM_FAULT_RETRY;
+       }
+
        spin_lock(vmf->ptl);
        entry = vmf->orig_pte;
        if (unlikely(!pte_same(ptep_get(vmf->pte), entry))) {