mm/hugetlb: convert move_hugetlb_state() to folios
authorSidhartha Kumar <sidhartha.kumar@oracle.com>
Tue, 1 Nov 2022 22:30:59 +0000 (15:30 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 30 Nov 2022 23:58:43 +0000 (15:58 -0800)
Clean up unmap_and_move_huge_page() by converting move_hugetlb_state() to
take in folios.

[akpm@linux-foundation.org: fix CONFIG_HUGETLB_PAGE=n build]
Link: https://lkml.kernel.org/r/20221101223059.460937-10-sidhartha.kumar@oracle.com
Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Reviewed-by: Muchun Song <songmuchun@bytedance.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Bui Quang Minh <minhquangbui99@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Mina Almasry <almasrymina@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/hugetlb.h
mm/hugetlb.c
mm/migrate.c

index 65ea340..58a3093 100644 (file)
@@ -187,7 +187,7 @@ int get_hwpoison_huge_page(struct page *page, bool *hugetlb, bool unpoison);
 int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
                                bool *migratable_cleared);
 void putback_active_hugepage(struct page *page);
-void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
+void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason);
 void free_huge_page(struct page *page);
 void hugetlb_fix_reserve_counts(struct inode *inode);
 extern struct mutex *hugetlb_fault_mutex_table;
@@ -407,8 +407,8 @@ static inline void putback_active_hugepage(struct page *page)
 {
 }
 
-static inline void move_hugetlb_state(struct page *oldpage,
-                                       struct page *newpage, int reason)
+static inline void move_hugetlb_state(struct folio *old_folio,
+                                       struct folio *new_folio, int reason)
 {
 }
 
@@ -991,6 +991,11 @@ void hugetlb_unregister_node(struct node *node);
 #else  /* CONFIG_HUGETLB_PAGE */
 struct hstate {};
 
+static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio)
+{
+       return NULL;
+}
+
 static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
 {
        return NULL;
index e1950ff..76ebefe 100644 (file)
@@ -7324,15 +7324,15 @@ void putback_active_hugepage(struct page *page)
        put_page(page);
 }
 
-void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
+void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason)
 {
-       struct hstate *h = page_hstate(oldpage);
+       struct hstate *h = folio_hstate(old_folio);
 
-       hugetlb_cgroup_migrate(page_folio(oldpage), page_folio(newpage));
-       set_page_owner_migrate_reason(newpage, reason);
+       hugetlb_cgroup_migrate(old_folio, new_folio);
+       set_page_owner_migrate_reason(&new_folio->page, reason);
 
        /*
-        * transfer temporary state of the new huge page. This is
+        * transfer temporary state of the new hugetlb folio. This is
         * reverse to other transitions because the newpage is going to
         * be final while the old one will be freed so it takes over
         * the temporary status.
@@ -7341,12 +7341,14 @@ void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
         * here as well otherwise the global surplus count will not match
         * the per-node's.
         */
-       if (HPageTemporary(newpage)) {
-               int old_nid = page_to_nid(oldpage);
-               int new_nid = page_to_nid(newpage);
+       if (folio_test_hugetlb_temporary(new_folio)) {
+               int old_nid = folio_nid(old_folio);
+               int new_nid = folio_nid(new_folio);
+
+
+               folio_set_hugetlb_temporary(old_folio);
+               folio_clear_hugetlb_temporary(new_folio);
 
-               SetHPageTemporary(oldpage);
-               ClearHPageTemporary(newpage);
 
                /*
                 * There is no need to transfer the per-node surplus state
index f8c85b4..4aea647 100644 (file)
@@ -1298,7 +1298,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
         * folio_mapping() set, hugetlbfs specific move page routine will not
         * be called and we could leak usage counts for subpools.
         */
-       if (hugetlb_page_subpool(hpage) && !folio_mapping(src)) {
+       if (hugetlb_folio_subpool(src) && !folio_mapping(src)) {
                rc = -EBUSY;
                goto out_unlock;
        }
@@ -1348,7 +1348,7 @@ put_anon:
                put_anon_vma(anon_vma);
 
        if (rc == MIGRATEPAGE_SUCCESS) {
-               move_hugetlb_state(hpage, new_hpage, reason);
+               move_hugetlb_state(src, dst, reason);
                put_new_page = NULL;
        }