mm: remove references to pagevec
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Wed, 21 Jun 2023 16:45:56 +0000 (17:45 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 23 Jun 2023 23:59:30 +0000 (16:59 -0700)
Most of these should just refer to the LRU cache rather than the data
structure used to implement the LRU cache.

Link: https://lkml.kernel.org/r/20230621164557.3510324-13-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/huge_memory.c
mm/khugepaged.c
mm/ksm.c
mm/memory.c
mm/migrate_device.c
mm/swap.c
mm/truncate.c

index e94fe29..eb36783 100644 (file)
@@ -1344,7 +1344,7 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf)
        /*
         * See do_wp_page(): we can only reuse the folio exclusively if
         * there are no additional references. Note that we always drain
-        * the LRU pagevecs immediately after adding a THP.
+        * the LRU cache immediately after adding a THP.
         */
        if (folio_ref_count(folio) >
                        1 + folio_test_swapcache(folio) * folio_nr_pages(folio))
index 5ef1e08..3beb4ad 100644 (file)
@@ -1051,7 +1051,7 @@ static int __collapse_huge_page_swapin(struct mm_struct *mm,
        if (pte)
                pte_unmap(pte);
 
-       /* Drain LRU add pagevec to remove extra pin on the swapped in pages */
+       /* Drain LRU cache to remove extra pin on the swapped in pages */
        if (swapped_in)
                lru_add_drain();
 
@@ -1972,7 +1972,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
                                        result = SCAN_FAIL;
                                        goto xa_unlocked;
                                }
-                               /* drain pagevecs to help isolate_lru_page() */
+                               /* drain lru cache to help isolate_lru_page() */
                                lru_add_drain();
                                page = folio_file_page(folio, index);
                        } else if (trylock_page(page)) {
@@ -1988,7 +1988,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
                                page_cache_sync_readahead(mapping, &file->f_ra,
                                                          file, index,
                                                          end - index);
-                               /* drain pagevecs to help isolate_lru_page() */
+                               /* drain lru cache to help isolate_lru_page() */
                                lru_add_drain();
                                page = find_lock_page(mapping, index);
                                if (unlikely(page == NULL)) {
index d995779..ba26635 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -932,7 +932,7 @@ static int remove_stable_node(struct ksm_stable_node *stable_node)
                 * The stable node did not yet appear stale to get_ksm_page(),
                 * since that allows for an unmapped ksm page to be recognized
                 * right up until it is freed; but the node is safe to remove.
-                * This page might be in a pagevec waiting to be freed,
+                * This page might be in an LRU cache waiting to be freed,
                 * or it might be PageSwapCache (perhaps under writeback),
                 * or it might have been removed from swapcache a moment ago.
                 */
@@ -2303,8 +2303,8 @@ static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page)
                trace_ksm_start_scan(ksm_scan.seqnr, ksm_rmap_items);
 
                /*
-                * A number of pages can hang around indefinitely on per-cpu
-                * pagevecs, raised page count preventing write_protect_page
+                * A number of pages can hang around indefinitely in per-cpu
+                * LRU cache, raised page count preventing write_protect_page
                 * from merging them.  Though it doesn't really matter much,
                 * it is puzzling to see some stuck in pages_volatile until
                 * other activity jostles them out, and they also prevented
index 3d78b55..f758f59 100644 (file)
@@ -3401,8 +3401,8 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
                        goto copy;
                if (!folio_test_lru(folio))
                        /*
-                        * Note: We cannot easily detect+handle references from
-                        * remote LRU pagevecs or references to LRU folios.
+                        * We cannot easily detect+handle references from
+                        * remote LRU caches or references to LRU folios.
                         */
                        lru_add_drain();
                if (folio_ref_count(folio) > 1 + folio_test_swapcache(folio))
@@ -3880,7 +3880,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
                 * If we want to map a page that's in the swapcache writable, we
                 * have to detect via the refcount if we're really the exclusive
                 * owner. Try removing the extra reference from the local LRU
-                * pagevecs if required.
+                * caches if required.
                 */
                if ((vmf->flags & FAULT_FLAG_WRITE) && folio == swapcache &&
                    !folio_test_ksm(folio) && !folio_test_lru(folio))
index 02d272b..8365158 100644 (file)
@@ -376,7 +376,7 @@ static unsigned long migrate_device_unmap(unsigned long *src_pfns,
                /* ZONE_DEVICE pages are not on LRU */
                if (!is_zone_device_page(page)) {
                        if (!PageLRU(page) && allow_drain) {
-                               /* Drain CPU's pagevec */
+                               /* Drain CPU's lru cache */
                                lru_add_drain_all();
                                allow_drain = false;
                        }
index 10348c1..cd8f015 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -76,7 +76,7 @@ static DEFINE_PER_CPU(struct cpu_fbatches, cpu_fbatches) = {
 
 /*
  * This path almost never happens for VM activity - pages are normally freed
- * via pagevecs.  But it gets used by networking - and for compound pages.
+ * in batches.  But it gets used by networking - and for compound pages.
  */
 static void __page_cache_release(struct folio *folio)
 {
index 4a91757..95d1291 100644 (file)
@@ -565,7 +565,7 @@ EXPORT_SYMBOL(invalidate_mapping_pages);
  * refcount.  We do this because invalidate_inode_pages2() needs stronger
  * invalidation guarantees, and cannot afford to leave pages behind because
  * shrink_page_list() has a temp ref on them, or because they're transiently
- * sitting in the folio_add_lru() pagevecs.
+ * sitting in the folio_add_lru() caches.
  */
 static int invalidate_complete_folio2(struct address_space *mapping,
                                        struct folio *folio)