mm: memcontrol: fix slub memory accounting
authorMuchun Song <songmuchun@bytedance.com>
Wed, 24 Feb 2021 20:04:26 +0000 (12:04 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 4 Mar 2021 10:38:19 +0000 (11:38 +0100)
[ Upstream commit 96403bfe50c344b587ea53894954a9d152af1c9d ]

SLUB currently account kmalloc() and kmalloc_node() allocations larger
than order-1 page per-node.  But it forget to update the per-memcg
vmstats.  So it can lead to inaccurate statistics of "slab_unreclaimable"
which is from memory.stat.  Fix it by using mod_lruvec_page_state instead
of mod_node_page_state.

Link: https://lkml.kernel.org/r/20210223092423.42420-1-songmuchun@bytedance.com
Fixes: 6a486c0ad4dc ("mm, sl[ou]b: improve memory accounting")
Signed-off-by: Muchun Song <songmuchun@bytedance.com>
Reviewed-by: Shakeel Butt <shakeelb@google.com>
Reviewed-by: Roman Gushchin <guro@fb.com>
Reviewed-by: Michal Koutný <mkoutny@suse.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
mm/slab_common.c
mm/slub.c

index f9ccd5d..8d96679 100644 (file)
@@ -836,8 +836,8 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
        page = alloc_pages(flags, order);
        if (likely(page)) {
                ret = page_address(page);
-               mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B,
-                                   PAGE_SIZE << order);
+               mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
+                                     PAGE_SIZE << order);
        }
        ret = kasan_kmalloc_large(ret, size, flags);
        /* As ret might get tagged, call kmemleak hook after KASAN. */
index 071e410..7b378e2 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3984,8 +3984,8 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
        page = alloc_pages_node(node, flags, order);
        if (page) {
                ptr = page_address(page);
-               mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B,
-                                   PAGE_SIZE << order);
+               mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
+                                     PAGE_SIZE << order);
        }
 
        return kmalloc_large_node_hook(ptr, size, flags);
@@ -4116,8 +4116,8 @@ void kfree(const void *x)
 
                BUG_ON(!PageCompound(page));
                kfree_hook(object);
-               mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B,
-                                   -(PAGE_SIZE << order));
+               mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
+                                     -(PAGE_SIZE << order));
                __free_pages(page, order);
                return;
        }