mm: move FAULT_FLAG_VMA_LOCK check down in handle_pte_fault()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 24 Jul 2023 18:54:06 +0000 (19:54 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 18 Aug 2023 17:12:52 +0000 (10:12 -0700)
Call do_pte_missing() under the VMA lock ...  then immediately retry in
do_fault().

Link: https://lkml.kernel.org/r/20230724185410.1124082-7-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Suren Baghdasaryan <surenb@google.com>
Cc: Arjun Roy <arjunroy@google.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Punit Agrawal <punit.agrawal@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memory.c

index 932fc62..d947d8d 100644 (file)
@@ -4647,6 +4647,11 @@ static vm_fault_t do_fault(struct vm_fault *vmf)
        struct mm_struct *vm_mm = vma->vm_mm;
        vm_fault_t ret;
 
+       if (vmf->flags & FAULT_FLAG_VMA_LOCK){
+               vma_end_read(vma);
+               return VM_FAULT_RETRY;
+       }
+
        /*
         * The VMA was not fully populated on mmap() or missing VM_DONTEXPAND
         */
@@ -4932,11 +4937,6 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
 {
        pte_t entry;
 
-       if ((vmf->flags & FAULT_FLAG_VMA_LOCK) && !vma_is_anonymous(vmf->vma)) {
-               vma_end_read(vmf->vma);
-               return VM_FAULT_RETRY;
-       }
-
        if (unlikely(pmd_none(*vmf->pmd))) {
                /*
                 * Leave __pte_alloc() until later: because vm_ops->fault may
@@ -4969,6 +4969,12 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
        if (!vmf->pte)
                return do_pte_missing(vmf);
 
+       if ((vmf->flags & FAULT_FLAG_VMA_LOCK) && !vma_is_anonymous(vmf->vma)) {
+               pte_unmap(vmf->pte);
+               vma_end_read(vmf->vma);
+               return VM_FAULT_RETRY;
+       }
+
        if (!pte_present(vmf->orig_pte))
                return do_swap_page(vmf);