mm/gup.c: reorganize try_get_folio()
authorVishal Moola (Oracle) <vishal.moola@gmail.com>
Wed, 14 Jun 2023 02:13:11 +0000 (19:13 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 19 Jun 2023 23:19:34 +0000 (16:19 -0700)
try_get_folio() takes in a page, then chooses to do some folio operations
based on the flags (either FOLL_GET or FOLL_PIN).  We can rewrite this
function to be more purpose oriented.

After calling try_get_folio(), if neither FOLL_GET nor FOLL_PIN are set,
warn and fail.  If FOLL_GET is set we can return the result.  If FOLL_GET
is not set then FOLL_PIN is set, so we pin the folio.

This change assists with folio conversions, and makes the function more
readable.

Link: https://lkml.kernel.org/r/20230614021312.34085-5-vishal.moola@gmail.com
Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/gup.c

index 38986e5..ce14d4d 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -124,58 +124,58 @@ retry:
  */
 struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags)
 {
+       struct folio *folio;
+
+       if (WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == 0))
+               return NULL;
+
        if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)))
                return NULL;
 
-       if (flags & FOLL_GET)
-               return try_get_folio(page, refs);
-       else if (flags & FOLL_PIN) {
-               struct folio *folio;
+       folio = try_get_folio(page, refs);
 
-               /*
-                * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a
-                * right zone, so fail and let the caller fall back to the slow
-                * path.
-                */
-               if (unlikely((flags & FOLL_LONGTERM) &&
-                            !is_longterm_pinnable_page(page)))
-                       return NULL;
+       if (flags & FOLL_GET)
+               return folio;
 
-               /*
-                * CAUTION: Don't use compound_head() on the page before this
-                * point, the result won't be stable.
-                */
-               folio = try_get_folio(page, refs);
-               if (!folio)
-                       return NULL;
+       /* FOLL_PIN is set */
+       if (!folio)
+               return NULL;
 
-               /*
-                * When pinning a large folio, use an exact count to track it.
-                *
-                * However, be sure to *also* increment the normal folio
-                * refcount field at least once, so that the folio really
-                * is pinned.  That's why the refcount from the earlier
-                * try_get_folio() is left intact.
-                */
-               if (folio_test_large(folio))
-                       atomic_add(refs, &folio->_pincount);
-               else
-                       folio_ref_add(folio,
-                                       refs * (GUP_PIN_COUNTING_BIAS - 1));
-               /*
-                * Adjust the pincount before re-checking the PTE for changes.
-                * This is essentially a smp_mb() and is paired with a memory
-                * barrier in page_try_share_anon_rmap().
-                */
-               smp_mb__after_atomic();
+       /*
+        * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a
+        * right zone, so fail and let the caller fall back to the slow
+        * path.
+        */
+       if (unlikely((flags & FOLL_LONGTERM) &&
+                    !folio_is_longterm_pinnable(folio))) {
+               if (!put_devmap_managed_page_refs(&folio->page, refs))
+                       folio_put_refs(folio, refs);
+               return NULL;
+       }
 
-               node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs);
+       /*
+        * When pinning a large folio, use an exact count to track it.
+        *
+        * However, be sure to *also* increment the normal folio
+        * refcount field at least once, so that the folio really
+        * is pinned.  That's why the refcount from the earlier
+        * try_get_folio() is left intact.
+        */
+       if (folio_test_large(folio))
+               atomic_add(refs, &folio->_pincount);
+       else
+               folio_ref_add(folio,
+                               refs * (GUP_PIN_COUNTING_BIAS - 1));
+       /*
+        * Adjust the pincount before re-checking the PTE for changes.
+        * This is essentially a smp_mb() and is paired with a memory
+        * barrier in page_try_share_anon_rmap().
+        */
+       smp_mb__after_atomic();
 
-               return folio;
-       }
+       node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs);
 
-       WARN_ON_ONCE(1);
-       return NULL;
+       return folio;
 }
 
 static void gup_put_folio(struct folio *folio, int refs, unsigned int flags)