mm: swap: memcg: fix memcg stats for huge pages
authorShakeel Butt <shakeelb@google.com>
Wed, 3 Jun 2020 23:03:19 +0000 (16:03 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 4 Jun 2020 03:09:49 +0000 (20:09 -0700)
The commit 2262185c5b28 ("mm: per-cgroup memory reclaim stats") added
PGLAZYFREE, PGACTIVATE & PGDEACTIVATE stats for cgroups but missed
couple of places and PGLAZYFREE missed huge page handling. Fix that.
Also for PGLAZYFREE use the irq-unsafe function to update as the irq is
already disabled.

Fixes: 2262185c5b28 ("mm: per-cgroup memory reclaim stats")
Signed-off-by: Shakeel Butt <shakeelb@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Link: http://lkml.kernel.org/r/20200527182947.251343-1-shakeelb@google.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/swap.c

index fa07d31..dbcab84 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -320,6 +320,7 @@ static void __activate_page(struct page *page, struct lruvec *lruvec,
 {
        if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
                int lru = page_lru_base_type(page);
+               int nr_pages = hpage_nr_pages(page);
 
                del_page_from_lru_list(page, lruvec, lru);
                SetPageActive(page);
@@ -327,7 +328,9 @@ static void __activate_page(struct page *page, struct lruvec *lruvec,
                add_page_to_lru_list(page, lruvec, lru);
                trace_mm_lru_activate(page);
 
-               __count_vm_events(PGACTIVATE, hpage_nr_pages(page));
+               __count_vm_events(PGACTIVATE, nr_pages);
+               __count_memcg_events(lruvec_memcg(lruvec), PGACTIVATE,
+                                    nr_pages);
        }
 }
 
@@ -565,8 +568,11 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
                __count_vm_events(PGROTATED, nr_pages);
        }
 
-       if (active)
+       if (active) {
                __count_vm_events(PGDEACTIVATE, nr_pages);
+               __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
+                                    nr_pages);
+       }
 }
 
 static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
@@ -574,13 +580,16 @@ static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
 {
        if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
                int lru = page_lru_base_type(page);
+               int nr_pages = hpage_nr_pages(page);
 
                del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE);
                ClearPageActive(page);
                ClearPageReferenced(page);
                add_page_to_lru_list(page, lruvec, lru);
 
-               __count_vm_events(PGDEACTIVATE, hpage_nr_pages(page));
+               __count_vm_events(PGDEACTIVATE, nr_pages);
+               __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
+                                    nr_pages);
        }
 }
 
@@ -590,6 +599,7 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
        if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
            !PageSwapCache(page) && !PageUnevictable(page)) {
                bool active = PageActive(page);
+               int nr_pages = hpage_nr_pages(page);
 
                del_page_from_lru_list(page, lruvec,
                                       LRU_INACTIVE_ANON + active);
@@ -603,8 +613,9 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
                ClearPageSwapBacked(page);
                add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE);
 
-               __count_vm_events(PGLAZYFREE, hpage_nr_pages(page));
-               count_memcg_page_event(page, PGLAZYFREE);
+               __count_vm_events(PGLAZYFREE, nr_pages);
+               __count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE,
+                                    nr_pages);
        }
 }