khugepage: replace try_to_release_page() with filemap_release_folio()
authorVishal Moola (Oracle) <vishal.moola@gmail.com>
Fri, 18 Nov 2022 07:30:53 +0000 (23:30 -0800)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 30 Nov 2022 23:59:02 +0000 (15:59 -0800)
Replace some calls with their folio equivalents.  This change removes 4
calls to compound_head() and is in preparation for the removal of the
try_to_release_page() wrapper.

Link: https://lkml.kernel.org/r/20221118073055.55694-3-vishal.moola@gmail.com
Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
Cc: Theodore Ts'o <tytso@mit.edu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/khugepaged.c

index 0d8f548..913b0f4 100644 (file)
@@ -1789,6 +1789,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
        xas_set(&xas, start);
        for (index = start; index < end; index++) {
                struct page *page = xas_next(&xas);
+               struct folio *folio;
 
                VM_BUG_ON(index != xas.xa_index);
                if (is_shmem) {
@@ -1815,8 +1816,6 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
                        }
 
                        if (xa_is_value(page) || !PageUptodate(page)) {
-                               struct folio *folio;
-
                                xas_unlock_irq(&xas);
                                /* swap in or instantiate fallocated page */
                                if (shmem_get_folio(mapping->host, index,
@@ -1904,13 +1903,15 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
                        goto out_unlock;
                }
 
-               if (page_mapping(page) != mapping) {
+               folio = page_folio(page);
+
+               if (folio_mapping(folio) != mapping) {
                        result = SCAN_TRUNCATED;
                        goto out_unlock;
                }
 
-               if (!is_shmem && (PageDirty(page) ||
-                                 PageWriteback(page))) {
+               if (!is_shmem && (folio_test_dirty(folio) ||
+                                 folio_test_writeback(folio))) {
                        /*
                         * khugepaged only works on read-only fd, so this
                         * page is dirty because it hasn't been flushed
@@ -1920,20 +1921,20 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
                        goto out_unlock;
                }
 
-               if (isolate_lru_page(page)) {
+               if (folio_isolate_lru(folio)) {
                        result = SCAN_DEL_PAGE_LRU;
                        goto out_unlock;
                }
 
-               if (page_has_private(page) &&
-                   !try_to_release_page(page, GFP_KERNEL)) {
+               if (folio_has_private(folio) &&
+                   !filemap_release_folio(folio, GFP_KERNEL)) {
                        result = SCAN_PAGE_HAS_PRIVATE;
-                       putback_lru_page(page);
+                       folio_putback_lru(folio);
                        goto out_unlock;
                }
 
-               if (page_mapped(page))
-                       try_to_unmap(page_folio(page),
+               if (folio_mapped(folio))
+                       try_to_unmap(folio,
                                        TTU_IGNORE_MLOCK | TTU_BATCH_FLUSH);
 
                xas_lock_irq(&xas);