mm: convert do_wp_page() to use a folio
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 2 Sep 2022 19:46:37 +0000 (20:46 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 3 Oct 2022 21:02:52 +0000 (14:02 -0700)
Saves many calls to compound_head().

Link: https://lkml.kernel.org/r/20220902194653.1739778-42-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memory.c

index 0018df3f0cc241ff52bd779953a634c209fb4b1a..2f1397b7c77dc815f79b4307346ae14eb55b8e87 100644 (file)
@@ -3368,6 +3368,7 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
 {
        const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
        struct vm_area_struct *vma = vmf->vma;
+       struct folio *folio;
 
        VM_BUG_ON(unshare && (vmf->flags & FAULT_FLAG_WRITE));
        VM_BUG_ON(!unshare && !(vmf->flags & FAULT_FLAG_WRITE));
@@ -3414,48 +3415,47 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
         * Take out anonymous pages first, anonymous shared vmas are
         * not dirty accountable.
         */
-       if (PageAnon(vmf->page)) {
-               struct page *page = vmf->page;
-
+       folio = page_folio(vmf->page);
+       if (folio_test_anon(folio)) {
                /*
                 * If the page is exclusive to this process we must reuse the
                 * page without further checks.
                 */
-               if (PageAnonExclusive(page))
+               if (PageAnonExclusive(vmf->page))
                        goto reuse;
 
                /*
-                * We have to verify under page lock: these early checks are
-                * just an optimization to avoid locking the page and freeing
+                * We have to verify under folio lock: these early checks are
+                * just an optimization to avoid locking the folio and freeing
                 * the swapcache if there is little hope that we can reuse.
                 *
-                * PageKsm() doesn't necessarily raise the page refcount.
+                * KSM doesn't necessarily raise the folio refcount.
                 */
-               if (PageKsm(page) || page_count(page) > 3)
+               if (folio_test_ksm(folio) || folio_ref_count(folio) > 3)
                        goto copy;
-               if (!PageLRU(page))
+               if (!folio_test_lru(folio))
                        /*
                         * Note: We cannot easily detect+handle references from
-                        * remote LRU pagevecs or references to PageLRU() pages.
+                        * remote LRU pagevecs or references to LRU folios.
                         */
                        lru_add_drain();
-               if (page_count(page) > 1 + PageSwapCache(page))
+               if (folio_ref_count(folio) > 1 + folio_test_swapcache(folio))
                        goto copy;
-               if (!trylock_page(page))
+               if (!folio_trylock(folio))
                        goto copy;
-               if (PageSwapCache(page))
-                       try_to_free_swap(page);
-               if (PageKsm(page) || page_count(page) != 1) {
-                       unlock_page(page);
+               if (folio_test_swapcache(folio))
+                       folio_free_swap(folio);
+               if (folio_test_ksm(folio) || folio_ref_count(folio) != 1) {
+                       folio_unlock(folio);
                        goto copy;
                }
                /*
-                * Ok, we've got the only page reference from our mapping
-                * and the page is locked, it's dark out, and we're wearing
+                * Ok, we've got the only folio reference from our mapping
+                * and the folio is locked, it's dark out, and we're wearing
                 * sunglasses. Hit it.
                 */
-               page_move_anon_rmap(page, vma);
-               unlock_page(page);
+               page_move_anon_rmap(vmf->page, vma);
+               folio_unlock(folio);
 reuse:
                if (unlikely(unshare)) {
                        pte_unmap_unlock(vmf->pte, vmf->ptl);