mm/memcg: rename mem_cgroup_split_huge_fixup to split_page_memcg and add nr_pages...
authorZhou Guanghui <zhouguanghui1@huawei.com>
Sat, 13 Mar 2021 05:08:30 +0000 (21:08 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 30 Mar 2021 12:31:47 +0000 (14:31 +0200)
commit be6c8982e4ab9a41907555f601b711a7e2a17d4c upstream.

Rename mem_cgroup_split_huge_fixup to split_page_memcg and explicitly pass
in page number argument.

In this way, the interface name is more common and can be used by
potential users.  In addition, the complete info(memcg and flag) of the
memcg needs to be set to the tail pages.

Link: https://lkml.kernel.org/r/20210304074053.65527-2-zhouguanghui1@huawei.com
Signed-off-by: Zhou Guanghui <zhouguanghui1@huawei.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Reviewed-by: Shakeel Butt <shakeelb@google.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Hanjun Guo <guohanjun@huawei.com>
Cc: Tianhong Ding <dingtianhong@huawei.com>
Cc: Weilong Chen <chenweilong@huawei.com>
Cc: Rui Xiang <rui.xiang@huawei.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Hugh Dickins <hughd@google.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
include/linux/memcontrol.h
mm/huge_memory.c
mm/memcontrol.c

index 922a7f6..c691b1a 100644 (file)
@@ -937,9 +937,7 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm,
        rcu_read_unlock();
 }
 
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-void mem_cgroup_split_huge_fixup(struct page *head);
-#endif
+void split_page_memcg(struct page *head, unsigned int nr);
 
 #else /* CONFIG_MEMCG */
 
@@ -1267,7 +1265,7 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
        return 0;
 }
 
-static inline void mem_cgroup_split_huge_fixup(struct page *head)
+static inline void split_page_memcg(struct page *head, unsigned int nr)
 {
 }
 
index 4a78514..d9ade23 100644 (file)
@@ -2433,7 +2433,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
        lruvec = mem_cgroup_page_lruvec(head, pgdat);
 
        /* complete memcg works before add pages to LRU */
-       mem_cgroup_split_huge_fixup(head);
+       split_page_memcg(head, nr);
 
        if (PageAnon(head) && PageSwapCache(head)) {
                swp_entry_t entry = { .val = page_private(head) };
index d6966f1..dda4223 100644 (file)
@@ -3268,26 +3268,21 @@ void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
 
 #endif /* CONFIG_MEMCG_KMEM */
 
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-
 /*
- * Because tail pages are not marked as "used", set it. We're under
- * pgdat->lru_lock and migration entries setup in all page mappings.
+ * Because head->mem_cgroup is not set on tails, set it now.
  */
-void mem_cgroup_split_huge_fixup(struct page *head)
+void split_page_memcg(struct page *head, unsigned int nr)
 {
        struct mem_cgroup *memcg = head->mem_cgroup;
        int i;
 
-       if (mem_cgroup_disabled())
+       if (mem_cgroup_disabled() || !memcg)
                return;
 
-       for (i = 1; i < HPAGE_PMD_NR; i++) {
-               css_get(&memcg->css);
+       for (i = 1; i < nr; i++)
                head[i].mem_cgroup = memcg;
-       }
+       css_get_many(&memcg->css, nr - 1);
 }
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
 #ifdef CONFIG_MEMCG_SWAP
 /**