mm/hugetlb_cgroup: convert hugetlb_cgroup_migrate to folios
authorSidhartha Kumar <sidhartha.kumar@oracle.com>
Tue, 1 Nov 2022 22:30:54 +0000 (15:30 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 30 Nov 2022 23:58:42 +0000 (15:58 -0800)
Cleans up intermediate page to folio conversion code in
hugetlb_cgroup_migrate() by changing its arguments from pages to folios.

Link: https://lkml.kernel.org/r/20221101223059.460937-5-sidhartha.kumar@oracle.com
Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Reviewed-by: Muchun Song <songmuchun@bytedance.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Bui Quang Minh <minhquangbui99@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Mina Almasry <almasrymina@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/hugetlb_cgroup.h
mm/hugetlb.c
mm/hugetlb_cgroup.c

index a7e3540..789b6fe 100644 (file)
@@ -177,8 +177,8 @@ extern void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
                                                bool region_del);
 
 extern void hugetlb_cgroup_file_init(void) __init;
-extern void hugetlb_cgroup_migrate(struct page *oldhpage,
-                                  struct page *newhpage);
+extern void hugetlb_cgroup_migrate(struct folio *old_folio,
+                                  struct folio *new_folio);
 
 #else
 static inline void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
@@ -286,8 +286,8 @@ static inline void hugetlb_cgroup_file_init(void)
 {
 }
 
-static inline void hugetlb_cgroup_migrate(struct page *oldhpage,
-                                         struct page *newhpage)
+static inline void hugetlb_cgroup_migrate(struct folio *old_folio,
+                                         struct folio *new_folio)
 {
 }
 
index 01ea43b..05a8328 100644 (file)
@@ -7325,7 +7325,7 @@ void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
 {
        struct hstate *h = page_hstate(oldpage);
 
-       hugetlb_cgroup_migrate(oldpage, newpage);
+       hugetlb_cgroup_migrate(page_folio(oldpage), page_folio(newpage));
        set_page_owner_migrate_reason(newpage, reason);
 
        /*
index 87a1125..b1b1833 100644 (file)
@@ -885,13 +885,11 @@ void __init hugetlb_cgroup_file_init(void)
  * hugetlb_lock will make sure a parallel cgroup rmdir won't happen
  * when we migrate hugepages
  */
-void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
+void hugetlb_cgroup_migrate(struct folio *old_folio, struct folio *new_folio)
 {
        struct hugetlb_cgroup *h_cg;
        struct hugetlb_cgroup *h_cg_rsvd;
-       struct hstate *h = page_hstate(oldhpage);
-       struct folio *old_folio = page_folio(oldhpage);
-       struct folio *new_folio = page_folio(newhpage);
+       struct hstate *h = folio_hstate(old_folio);
 
        if (hugetlb_cgroup_disabled())
                return;
@@ -905,7 +903,7 @@ void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
        /* move the h_cg details to new cgroup */
        set_hugetlb_cgroup(new_folio, h_cg);
        set_hugetlb_cgroup_rsvd(new_folio, h_cg_rsvd);
-       list_move(&newhpage->lru, &h->hugepage_activelist);
+       list_move(&new_folio->lru, &h->hugepage_activelist);
        spin_unlock_irq(&hugetlb_lock);
        return;
 }