hugetlb: Convert to migrate_folio
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 6 Jun 2022 14:47:21 +0000 (10:47 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 2 Aug 2022 16:34:04 +0000 (12:34 -0400)
This involves converting migrate_huge_page_move_mapping().  We also need a
folio variant of hugetlb_set_page_subpool(), but that's for a later patch.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: Muchun Song <songmuchun@bytedance.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
fs/hugetlbfs/inode.c
include/linux/migrate.h
mm/migrate.c

index 14d33f7..eca1d0f 100644 (file)
@@ -954,28 +954,33 @@ static int hugetlbfs_symlink(struct user_namespace *mnt_userns,
        return error;
 }
 
-static int hugetlbfs_migrate_page(struct address_space *mapping,
-                               struct page *newpage, struct page *page,
+#ifdef CONFIG_MIGRATION
+static int hugetlbfs_migrate_folio(struct address_space *mapping,
+                               struct folio *dst, struct folio *src,
                                enum migrate_mode mode)
 {
        int rc;
 
-       rc = migrate_huge_page_move_mapping(mapping, newpage, page);
+       rc = migrate_huge_page_move_mapping(mapping, dst, src);
        if (rc != MIGRATEPAGE_SUCCESS)
                return rc;
 
-       if (hugetlb_page_subpool(page)) {
-               hugetlb_set_page_subpool(newpage, hugetlb_page_subpool(page));
-               hugetlb_set_page_subpool(page, NULL);
+       if (hugetlb_page_subpool(&src->page)) {
+               hugetlb_set_page_subpool(&dst->page,
+                                       hugetlb_page_subpool(&src->page));
+               hugetlb_set_page_subpool(&src->page, NULL);
        }
 
        if (mode != MIGRATE_SYNC_NO_COPY)
-               migrate_page_copy(newpage, page);
+               folio_migrate_copy(dst, src);
        else
-               migrate_page_states(newpage, page);
+               folio_migrate_flags(dst, src);
 
        return MIGRATEPAGE_SUCCESS;
 }
+#else
+#define hugetlbfs_migrate_folio NULL
+#endif
 
 static int hugetlbfs_error_remove_page(struct address_space *mapping,
                                struct page *page)
@@ -1142,7 +1147,7 @@ static const struct address_space_operations hugetlbfs_aops = {
        .write_begin    = hugetlbfs_write_begin,
        .write_end      = hugetlbfs_write_end,
        .dirty_folio    = noop_dirty_folio,
-       .migratepage    = hugetlbfs_migrate_page,
+       .migrate_folio  = hugetlbfs_migrate_folio,
        .error_remove_page      = hugetlbfs_error_remove_page,
 };
 
index c9986d5..13f7933 100644 (file)
@@ -72,8 +72,8 @@ extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
 
 extern void migrate_page_states(struct page *newpage, struct page *page);
 extern void migrate_page_copy(struct page *newpage, struct page *page);
-extern int migrate_huge_page_move_mapping(struct address_space *mapping,
-                                 struct page *newpage, struct page *page);
+int migrate_huge_page_move_mapping(struct address_space *mapping,
+               struct folio *dst, struct folio *src);
 extern int migrate_page_move_mapping(struct address_space *mapping,
                struct page *newpage, struct page *page, int extra_count);
 void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep,
@@ -104,7 +104,7 @@ static inline void migrate_page_copy(struct page *newpage,
                                     struct page *page) {}
 
 static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
-                                 struct page *newpage, struct page *page)
+                                 struct folio *dst, struct folio *src)
 {
        return -ENOSYS;
 }
index 4ed8f0d..0dd3ec9 100644 (file)
@@ -474,26 +474,26 @@ EXPORT_SYMBOL(folio_migrate_mapping);
  * of folio_migrate_mapping().
  */
 int migrate_huge_page_move_mapping(struct address_space *mapping,
-                                  struct page *newpage, struct page *page)
+                                  struct folio *dst, struct folio *src)
 {
-       XA_STATE(xas, &mapping->i_pages, page_index(page));
+       XA_STATE(xas, &mapping->i_pages, folio_index(src));
        int expected_count;
 
        xas_lock_irq(&xas);
-       expected_count = 2 + page_has_private(page);
-       if (!page_ref_freeze(page, expected_count)) {
+       expected_count = 2 + folio_has_private(src);
+       if (!folio_ref_freeze(src, expected_count)) {
                xas_unlock_irq(&xas);
                return -EAGAIN;
        }
 
-       newpage->index = page->index;
-       newpage->mapping = page->mapping;
+       dst->index = src->index;
+       dst->mapping = src->mapping;
 
-       get_page(newpage);
+       folio_get(dst);
 
-       xas_store(&xas, newpage);
+       xas_store(&xas, dst);
 
-       page_ref_unfreeze(page, expected_count - 1);
+       folio_ref_unfreeze(src, expected_count - 1);
 
        xas_unlock_irq(&xas);