mm: mlock: update the interface to use folios
authorLorenzo Stoakes <lstoakes@gmail.com>
Thu, 12 Jan 2023 12:39:31 +0000 (12:39 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 3 Feb 2023 06:33:04 +0000 (22:33 -0800)
Update the mlock interface to accept folios rather than pages, bringing
the interface in line with the internal implementation.

munlock_vma_page() still requires a page_folio() conversion, however this
is consistent with the existent mlock_vma_page() implementation and a
product of rmap still dealing in pages rather than folios.

Link: https://lkml.kernel.org/r/cba12777c5544305014bc0cbec56bb4cc71477d8.1673526881.git.lstoakes@gmail.com
Signed-off-by: Lorenzo Stoakes <lstoakes@gmail.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Joel Fernandes (Google) <joel@joelfernandes.org>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Liam R. Howlett <Liam.Howlett@oracle.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mike Rapoport (IBM) <rppt@kernel.org>
Cc: William Kucharski <william.kucharski@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/internal.h
mm/migrate.c
mm/mlock.c
mm/page_alloc.c
mm/rmap.c
mm/swap.c

index 583e153..973b48e 100644 (file)
@@ -533,10 +533,9 @@ extern int mlock_future_check(struct mm_struct *mm, unsigned long flags,
  * should be called with vma's mmap_lock held for read or write,
  * under page table lock for the pte/pmd being added or removed.
  *
- * mlock is usually called at the end of page_add_*_rmap(),
- * munlock at the end of page_remove_rmap(); but new anon
- * pages are managed by lru_cache_add_inactive_or_unevictable()
- * calling mlock_new_page().
+ * mlock is usually called at the end of page_add_*_rmap(), munlock at
+ * the end of page_remove_rmap(); but new anon folios are managed by
+ * folio_add_lru_vma() calling mlock_new_folio().
  *
  * @compound is used to include pmd mappings of THPs, but filter out
  * pte mappings of THPs, which cannot be consistently counted: a pte
@@ -565,18 +564,25 @@ static inline void mlock_vma_page(struct page *page,
        mlock_vma_folio(page_folio(page), vma, compound);
 }
 
-void munlock_page(struct page *page);
-static inline void munlock_vma_page(struct page *page,
+void munlock_folio(struct folio *folio);
+
+static inline void munlock_vma_folio(struct folio *folio,
                        struct vm_area_struct *vma, bool compound)
 {
        if (unlikely(vma->vm_flags & VM_LOCKED) &&
-           (compound || !PageTransCompound(page)))
-               munlock_page(page);
+           (compound || !folio_test_large(folio)))
+               munlock_folio(folio);
+}
+
+static inline void munlock_vma_page(struct page *page,
+                       struct vm_area_struct *vma, bool compound)
+{
+       munlock_vma_folio(page_folio(page), vma, compound);
 }
-void mlock_new_page(struct page *page);
-bool need_mlock_page_drain(int cpu);
-void mlock_page_drain_local(void);
-void mlock_page_drain_remote(int cpu);
+void mlock_new_folio(struct folio *folio);
+bool need_mlock_drain(int cpu);
+void mlock_drain_local(void);
+void mlock_drain_remote(int cpu);
 
 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
 
@@ -665,10 +671,10 @@ static inline void mlock_vma_page(struct page *page,
                        struct vm_area_struct *vma, bool compound) { }
 static inline void munlock_vma_page(struct page *page,
                        struct vm_area_struct *vma, bool compound) { }
-static inline void mlock_new_page(struct page *page) { }
-static inline bool need_mlock_page_drain(int cpu) { return false; }
-static inline void mlock_page_drain_local(void) { }
-static inline void mlock_page_drain_remote(int cpu) { }
+static inline void mlock_new_folio(struct folio *folio) { }
+static inline bool need_mlock_drain(int cpu) { return false; }
+static inline void mlock_drain_local(void) { }
+static inline void mlock_drain_remote(int cpu) { }
 static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
 {
 }
index 98de7ce..206fcdb 100644 (file)
@@ -265,7 +265,7 @@ static bool remove_migration_pte(struct folio *folio,
                        set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
                }
                if (vma->vm_flags & VM_LOCKED)
-                       mlock_page_drain_local();
+                       mlock_drain_local();
 
                trace_remove_migration_pte(pvmw.address, pte_val(pte),
                                           compound_order(new));
index f8e8d30..9e9c8be 100644 (file)
@@ -210,7 +210,7 @@ static void mlock_folio_batch(struct folio_batch *fbatch)
        folio_batch_reinit(fbatch);
 }
 
-void mlock_page_drain_local(void)
+void mlock_drain_local(void)
 {
        struct folio_batch *fbatch;
 
@@ -221,7 +221,7 @@ void mlock_page_drain_local(void)
        local_unlock(&mlock_fbatch.lock);
 }
 
-void mlock_page_drain_remote(int cpu)
+void mlock_drain_remote(int cpu)
 {
        struct folio_batch *fbatch;
 
@@ -231,7 +231,7 @@ void mlock_page_drain_remote(int cpu)
                mlock_folio_batch(fbatch);
 }
 
-bool need_mlock_page_drain(int cpu)
+bool need_mlock_drain(int cpu)
 {
        return folio_batch_count(&per_cpu(mlock_fbatch.fbatch, cpu));
 }
@@ -262,13 +262,12 @@ void mlock_folio(struct folio *folio)
 }
 
 /**
- * mlock_new_page - mlock a newly allocated page not yet on LRU
- * @page: page to be mlocked, either a normal page or a THP head.
+ * mlock_new_folio - mlock a newly allocated folio not yet on LRU
+ * @folio: folio to be mlocked, either normal or a THP head.
  */
-void mlock_new_page(struct page *page)
+void mlock_new_folio(struct folio *folio)
 {
        struct folio_batch *fbatch;
-       struct folio *folio = page_folio(page);
        int nr_pages = folio_nr_pages(folio);
 
        local_lock(&mlock_fbatch.lock);
@@ -286,13 +285,12 @@ void mlock_new_page(struct page *page)
 }
 
 /**
- * munlock_page - munlock a page
- * @page: page to be munlocked, either a normal page or a THP head.
+ * munlock_folio - munlock a folio
+ * @folio: folio to be munlocked, either normal or a THP head.
  */
-void munlock_page(struct page *page)
+void munlock_folio(struct folio *folio)
 {
        struct folio_batch *fbatch;
-       struct folio *folio = page_folio(page);
 
        local_lock(&mlock_fbatch.lock);
        fbatch = this_cpu_ptr(&mlock_fbatch.fbatch);
@@ -314,7 +312,7 @@ static int mlock_pte_range(pmd_t *pmd, unsigned long addr,
        struct vm_area_struct *vma = walk->vma;
        spinlock_t *ptl;
        pte_t *start_pte, *pte;
-       struct page *page;
+       struct folio *folio;
 
        ptl = pmd_trans_huge_lock(pmd, vma);
        if (ptl) {
@@ -322,11 +320,11 @@ static int mlock_pte_range(pmd_t *pmd, unsigned long addr,
                        goto out;
                if (is_huge_zero_pmd(*pmd))
                        goto out;
-               page = pmd_page(*pmd);
+               folio = page_folio(pmd_page(*pmd));
                if (vma->vm_flags & VM_LOCKED)
-                       mlock_folio(page_folio(page));
+                       mlock_folio(folio);
                else
-                       munlock_page(page);
+                       munlock_folio(folio);
                goto out;
        }
 
@@ -334,15 +332,15 @@ static int mlock_pte_range(pmd_t *pmd, unsigned long addr,
        for (pte = start_pte; addr != end; pte++, addr += PAGE_SIZE) {
                if (!pte_present(*pte))
                        continue;
-               page = vm_normal_page(vma, addr, *pte);
-               if (!page || is_zone_device_page(page))
+               folio = vm_normal_folio(vma, addr, *pte);
+               if (!folio || folio_is_zone_device(folio))
                        continue;
-               if (PageTransCompound(page))
+               if (folio_test_large(folio))
                        continue;
                if (vma->vm_flags & VM_LOCKED)
-                       mlock_folio(page_folio(page));
+                       mlock_folio(folio);
                else
-                       munlock_page(page);
+                       munlock_folio(folio);
        }
        pte_unmap(start_pte);
 out:
index 88494e8..83be3b5 100644 (file)
@@ -8587,7 +8587,7 @@ static int page_alloc_cpu_dead(unsigned int cpu)
        struct zone *zone;
 
        lru_add_drain_cpu(cpu);
-       mlock_page_drain_remote(cpu);
+       mlock_drain_remote(cpu);
        drain_pages(cpu);
 
        /*
index a079d99..073999f 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1764,7 +1764,7 @@ discard:
                 */
                page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
                if (vma->vm_flags & VM_LOCKED)
-                       mlock_page_drain_local();
+                       mlock_drain_local();
                folio_put(folio);
        }
 
@@ -2105,7 +2105,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
                 */
                page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
                if (vma->vm_flags & VM_LOCKED)
-                       mlock_page_drain_local();
+                       mlock_drain_local();
                folio_put(folio);
        }
 
index e54e2a2..42d67f9 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -562,7 +562,7 @@ void folio_add_lru_vma(struct folio *folio, struct vm_area_struct *vma)
        VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
 
        if (unlikely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED))
-               mlock_new_page(&folio->page);
+               mlock_new_folio(folio);
        else
                folio_add_lru(folio);
 }
@@ -781,7 +781,7 @@ void lru_add_drain(void)
        local_lock(&cpu_fbatches.lock);
        lru_add_drain_cpu(smp_processor_id());
        local_unlock(&cpu_fbatches.lock);
-       mlock_page_drain_local();
+       mlock_drain_local();
 }
 
 /*
@@ -796,7 +796,7 @@ static void lru_add_and_bh_lrus_drain(void)
        lru_add_drain_cpu(smp_processor_id());
        local_unlock(&cpu_fbatches.lock);
        invalidate_bh_lrus_cpu();
-       mlock_page_drain_local();
+       mlock_drain_local();
 }
 
 void lru_add_drain_cpu_zone(struct zone *zone)
@@ -805,7 +805,7 @@ void lru_add_drain_cpu_zone(struct zone *zone)
        lru_add_drain_cpu(smp_processor_id());
        drain_local_pages(zone);
        local_unlock(&cpu_fbatches.lock);
-       mlock_page_drain_local();
+       mlock_drain_local();
 }
 
 #ifdef CONFIG_SMP
@@ -828,7 +828,7 @@ static bool cpu_needs_drain(unsigned int cpu)
                folio_batch_count(&fbatches->lru_deactivate) ||
                folio_batch_count(&fbatches->lru_lazyfree) ||
                folio_batch_count(&fbatches->activate) ||
-               need_mlock_page_drain(cpu) ||
+               need_mlock_drain(cpu) ||
                has_bh_in_lru(cpu, NULL);
 }