mm: remove check_move_unevictable_pages()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Wed, 21 Jun 2023 16:45:50 +0000 (17:45 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 23 Jun 2023 23:59:29 +0000 (16:59 -0700)
All callers have now been converted to call
check_move_unevictable_folios().

Link: https://lkml.kernel.org/r/20230621164557.3510324-7-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/swap.h
mm/vmscan.c

index ce7e82c..4565464 100644 (file)
@@ -439,7 +439,6 @@ static inline bool node_reclaim_enabled(void)
 }
 
 void check_move_unevictable_folios(struct folio_batch *fbatch);
-void check_move_unevictable_pages(struct pagevec *pvec);
 
 extern void __meminit kswapd_run(int nid);
 extern void __meminit kswapd_stop(int nid);
index 27f9089..049342b 100644 (file)
@@ -8075,23 +8075,6 @@ int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
 }
 #endif
 
-void check_move_unevictable_pages(struct pagevec *pvec)
-{
-       struct folio_batch fbatch;
-       unsigned i;
-
-       folio_batch_init(&fbatch);
-       for (i = 0; i < pvec->nr; i++) {
-               struct page *page = pvec->pages[i];
-
-               if (PageTransTail(page))
-                       continue;
-               folio_batch_add(&fbatch, page_folio(page));
-       }
-       check_move_unevictable_folios(&fbatch);
-}
-EXPORT_SYMBOL_GPL(check_move_unevictable_pages);
-
 /**
  * check_move_unevictable_folios - Move evictable folios to appropriate zone
  * lru list