mm/memcg: Convert mem_cgroup_uncharge() to take a folio
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Sun, 2 May 2021 00:42:23 +0000 (20:42 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 27 Sep 2021 13:27:31 +0000 (09:27 -0400)
Convert all the callers to call page_folio().  Most of them were already
using a head page, but a few of them I can't prove were, so this may
actually fix a bug.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Acked-by: Mike Rapoport <rppt@linux.ibm.com>
Reviewed-by: David Howells <dhowells@redhat.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
include/linux/memcontrol.h
mm/filemap.c
mm/khugepaged.c
mm/memcontrol.c
mm/memory-failure.c
mm/memremap.c
mm/page_alloc.c
mm/swap.c

index 19a5172..b4bc052 100644 (file)
@@ -722,12 +722,19 @@ int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm,
                                  gfp_t gfp, swp_entry_t entry);
 void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry);
 
-void __mem_cgroup_uncharge(struct page *page);
-static inline void mem_cgroup_uncharge(struct page *page)
+void __mem_cgroup_uncharge(struct folio *folio);
+
+/**
+ * mem_cgroup_uncharge - Uncharge a folio.
+ * @folio: Folio to uncharge.
+ *
+ * Uncharge a folio previously charged with mem_cgroup_charge().
+ */
+static inline void mem_cgroup_uncharge(struct folio *folio)
 {
        if (mem_cgroup_disabled())
                return;
-       __mem_cgroup_uncharge(page);
+       __mem_cgroup_uncharge(folio);
 }
 
 void __mem_cgroup_uncharge_list(struct list_head *page_list);
@@ -1229,7 +1236,7 @@ static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
 {
 }
 
-static inline void mem_cgroup_uncharge(struct page *page)
+static inline void mem_cgroup_uncharge(struct folio *folio)
 {
 }
 
index 816af22..44fcd9d 100644 (file)
@@ -940,7 +940,7 @@ unlock:
        if (xas_error(&xas)) {
                error = xas_error(&xas);
                if (charged)
-                       mem_cgroup_uncharge(page);
+                       mem_cgroup_uncharge(page_folio(page));
                goto error;
        }
 
index 8480a3b..6d56e7a 100644 (file)
@@ -1211,7 +1211,7 @@ out_up_write:
        mmap_write_unlock(mm);
 out_nolock:
        if (!IS_ERR_OR_NULL(*hpage))
-               mem_cgroup_uncharge(*hpage);
+               mem_cgroup_uncharge(page_folio(*hpage));
        trace_mm_collapse_huge_page(mm, isolated, result);
        return;
 }
@@ -1975,7 +1975,7 @@ xa_unlocked:
 out:
        VM_BUG_ON(!list_empty(&pagelist));
        if (!IS_ERR_OR_NULL(*hpage))
-               mem_cgroup_uncharge(*hpage);
+               mem_cgroup_uncharge(page_folio(*hpage));
        /* TODO: tracepoints */
 }
 
index 64eac15..6321ed6 100644 (file)
@@ -6858,22 +6858,16 @@ static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
        css_put(&memcg->css);
 }
 
-/**
- * __mem_cgroup_uncharge - uncharge a page
- * @page: page to uncharge
- *
- * Uncharge a page previously charged with __mem_cgroup_charge().
- */
-void __mem_cgroup_uncharge(struct page *page)
+void __mem_cgroup_uncharge(struct folio *folio)
 {
        struct uncharge_gather ug;
 
-       /* Don't touch page->lru of any random page, pre-check: */
-       if (!page_memcg(page))
+       /* Don't touch folio->lru of any random page, pre-check: */
+       if (!folio_memcg(folio))
                return;
 
        uncharge_gather_clear(&ug);
-       uncharge_folio(page_folio(page), &ug);
+       uncharge_folio(folio, &ug);
        uncharge_batch(&ug);
 }
 
index 3e6449f..fffe4af 100644 (file)
@@ -762,7 +762,7 @@ static int delete_from_lru_cache(struct page *p)
                 * Poisoned page might never drop its ref count to 0 so we have
                 * to uncharge it manually from its memcg.
                 */
-               mem_cgroup_uncharge(p);
+               mem_cgroup_uncharge(page_folio(p));
 
                /*
                 * drop the page count elevated by isolate_lru_page()
index ed593bf..5a66a71 100644 (file)
@@ -505,7 +505,7 @@ void free_devmap_managed_page(struct page *page)
 
        __ClearPageWaiters(page);
 
-       mem_cgroup_uncharge(page);
+       mem_cgroup_uncharge(page_folio(page));
 
        /*
         * When a device_private page is freed, the page->mapping field
index b37435c..869d0b0 100644 (file)
@@ -724,7 +724,7 @@ static inline void free_the_page(struct page *page, unsigned int order)
 
 void free_compound_page(struct page *page)
 {
-       mem_cgroup_uncharge(page);
+       mem_cgroup_uncharge(page_folio(page));
        free_the_page(page, compound_order(page));
 }
 
index 0edbcb9..5679ce5 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -94,7 +94,7 @@ static void __page_cache_release(struct page *page)
 static void __put_single_page(struct page *page)
 {
        __page_cache_release(page);
-       mem_cgroup_uncharge(page);
+       mem_cgroup_uncharge(page_folio(page));
        free_unref_page(page, 0);
 }