mm: rework handling in do_wp_page() based on private vs. shared mappings
authorDavid Hildenbrand <david@redhat.com>
Wed, 16 Nov 2022 10:26:45 +0000 (11:26 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 30 Nov 2022 23:58:57 +0000 (15:58 -0800)
We want to extent FAULT_FLAG_UNSHARE support to anything mapped into a
COW mapping (pagecache page, zeropage, PFN, ...), not just anonymous pages.
Let's prepare for that by handling shared mappings first such that we can
handle private mappings last.

While at it, use folio-based functions instead of page-based functions
where we touch the code either way.

Link: https://lkml.kernel.org/r/20221116102659.70287-7-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memory.c

index 5e4df6b..5d4b42f 100644 (file)
@@ -3341,7 +3341,7 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
 {
        const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
        struct vm_area_struct *vma = vmf->vma;
-       struct folio *folio;
+       struct folio *folio = NULL;
 
        if (likely(!unshare)) {
                if (userfaultfd_pte_wp(vma, *vmf->pte)) {
@@ -3359,13 +3359,12 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
        }
 
        vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
-       if (!vmf->page) {
-               if (unlikely(unshare)) {
-                       /* No anonymous page -> nothing to do. */
-                       pte_unmap_unlock(vmf->pte, vmf->ptl);
-                       return 0;
-               }
 
+       /*
+        * Shared mapping: we are guaranteed to have VM_WRITE and
+        * FAULT_FLAG_WRITE set at this point.
+        */
+       if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) {
                /*
                 * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
                 * VM_PFNMAP VMA.
@@ -3373,20 +3372,19 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
                 * We should not cow pages in a shared writeable mapping.
                 * Just mark the pages writable and/or call ops->pfn_mkwrite.
                 */
-               if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
-                                    (VM_WRITE|VM_SHARED))
+               if (!vmf->page)
                        return wp_pfn_shared(vmf);
-
-               pte_unmap_unlock(vmf->pte, vmf->ptl);
-               return wp_page_copy(vmf);
+               return wp_page_shared(vmf);
        }
 
+       if (vmf->page)
+               folio = page_folio(vmf->page);
+
        /*
-        * Take out anonymous pages first, anonymous shared vmas are
-        * not dirty accountable.
+        * Private mapping: create an exclusive anonymous page copy if reuse
+        * is impossible. We might miss VM_WRITE for FOLL_FORCE handling.
         */
-       folio = page_folio(vmf->page);
-       if (folio_test_anon(folio)) {
+       if (folio && folio_test_anon(folio)) {
                /*
                 * If the page is exclusive to this process we must reuse the
                 * page without further checks.
@@ -3437,19 +3435,17 @@ reuse:
                /* No anonymous page -> nothing to do. */
                pte_unmap_unlock(vmf->pte, vmf->ptl);
                return 0;
-       } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
-                                       (VM_WRITE|VM_SHARED))) {
-               return wp_page_shared(vmf);
        }
 copy:
        /*
         * Ok, we need to copy. Oh, well..
         */
-       get_page(vmf->page);
+       if (folio)
+               folio_get(folio);
 
        pte_unmap_unlock(vmf->pte, vmf->ptl);
 #ifdef CONFIG_KSM
-       if (PageKsm(vmf->page))
+       if (folio && folio_test_ksm(folio))
                count_vm_event(COW_KSM);
 #endif
        return wp_page_copy(vmf);