mm: vmscan: harmonize writeback congestion tracking for nodes & memcgs
authorJohannes Weiner <hannes@cmpxchg.org>
Sun, 1 Dec 2019 01:55:52 +0000 (17:55 -0800)
committerMarek Szyprowski <m.szyprowski@samsung.com>
Wed, 17 Jan 2024 17:15:53 +0000 (18:15 +0100)
The current writeback congestion tracking has separate flags for kswapd
reclaim (node level) and cgroup limit reclaim (memcg-node level).  This is
unnecessarily complicated: the lruvec is an existing abstraction layer for
that node-memcg intersection.

Introduce lruvec->flags and LRUVEC_CONGESTED.  Then track that at the
reclaim root level, which is either the NUMA node for global reclaim, or
the cgroup-node intersection for cgroup reclaim.

Link: http://lkml.kernel.org/r/20191022144803.302233-9-hannes@cmpxchg.org
Change-Id: Iaa10ac1d8ee7de5c6f2563ac5ff36ae9bae876a2
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Roman Gushchin <guro@fb.com>
Reviewed-by: Shakeel Butt <shakeelb@google.com>
Cc: Michal Hocko <mhocko@suse.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
[backport of the commit 1b05117df78e035afb5f66ef50bf8750d976ef08 from mainline]
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
include/linux/memcontrol.h
include/linux/mmzone.h
mm/vmscan.c

index 832c32a..d9b3592 100644 (file)
@@ -134,9 +134,6 @@ struct mem_cgroup_per_node {
        unsigned long           usage_in_excess;/* Set to the value by which */
                                                /* the soft limit is exceeded*/
        bool                    on_tree;
-       bool                    congested;      /* memcg has many dirty pages */
-                                               /* backed by a congested BDI */
-
        struct mem_cgroup       *memcg;         /* Back pointer, we cannot */
                                                /* use container_of        */
 };
@@ -412,6 +409,9 @@ static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
                goto out;
        }
 
+       if (!memcg)
+               memcg = root_mem_cgroup;
+
        mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
        lruvec = &mz->lruvec;
 out:
index ed333ee..92d6fb2 100644 (file)
@@ -295,6 +295,12 @@ struct zone_reclaim_stat {
        unsigned long           recent_scanned[2];
 };
 
+enum lruvec_flags {
+       LRUVEC_CONGESTED,               /* lruvec has many dirty pages
+                                        * backed by a congested BDI
+                                        */
+};
+
 struct lruvec {
        struct list_head                lists[NR_LRU_LISTS];
        struct zone_reclaim_stat        reclaim_stat;
@@ -302,6 +308,8 @@ struct lruvec {
        atomic_long_t                   inactive_age;
        /* Refaults at the time of last reclaim cycle */
        unsigned long                   refaults;
+       /* Various lruvec state flags (enum lruvec_flags) */
+       unsigned long                   flags;
 #ifdef CONFIG_MEMCG
        struct pglist_data *pgdat;
 #endif
@@ -571,9 +579,6 @@ struct zone {
 } ____cacheline_internodealigned_in_smp;
 
 enum pgdat_flags {
-       PGDAT_CONGESTED,                /* pgdat has many dirty pages backed by
-                                        * a congested BDI
-                                        */
        PGDAT_DIRTY,                    /* reclaim scanning has recently found
                                         * many dirty file pages at the tail
                                         * of the LRU.
index 84d84bb..a79dc51 100644 (file)
@@ -267,29 +267,6 @@ static bool writeback_throttling_sane(struct scan_control *sc)
 #endif
        return false;
 }
-
-static void set_memcg_congestion(pg_data_t *pgdat,
-                               struct mem_cgroup *memcg,
-                               bool congested)
-{
-       struct mem_cgroup_per_node *mn;
-
-       if (!memcg)
-               return;
-
-       mn = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
-       WRITE_ONCE(mn->congested, congested);
-}
-
-static bool memcg_congested(pg_data_t *pgdat,
-                       struct mem_cgroup *memcg)
-{
-       struct mem_cgroup_per_node *mn;
-
-       mn = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
-       return READ_ONCE(mn->congested);
-
-}
 #else
 static int prealloc_memcg_shrinker(struct shrinker *shrinker)
 {
@@ -309,18 +286,6 @@ static bool writeback_throttling_sane(struct scan_control *sc)
 {
        return true;
 }
-
-static inline void set_memcg_congestion(struct pglist_data *pgdat,
-                               struct mem_cgroup *memcg, bool congested)
-{
-}
-
-static inline bool memcg_congested(struct pglist_data *pgdat,
-                       struct mem_cgroup *memcg)
-{
-       return false;
-
-}
 #endif
 
 /*
@@ -2720,12 +2685,6 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat,
        return inactive_lru_pages > pages_for_compaction;
 }
 
-static bool pgdat_memcg_congested(pg_data_t *pgdat, struct mem_cgroup *memcg)
-{
-       return test_bit(PGDAT_CONGESTED, &pgdat->flags) ||
-               (memcg && memcg_congested(pgdat, memcg));
-}
-
 static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc)
 {
        struct mem_cgroup *target_memcg = sc->target_mem_cgroup;
@@ -2787,10 +2746,12 @@ static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc)
 static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
 {
        struct reclaim_state *reclaim_state = current->reclaim_state;
-       struct mem_cgroup *target_memcg = sc->target_mem_cgroup;
        unsigned long nr_reclaimed, nr_scanned;
+       struct lruvec *target_lruvec;
        bool reclaimable = false;
 
+       target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
+
 again:
        memset(&sc->nr, 0, sizeof(sc->nr));
 
@@ -2805,7 +2766,7 @@ again:
        }
 
        /* Record the subtree's reclaim efficiency */
-       vmpressure(sc->gfp_mask, target_memcg, true,
+       vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true,
                   sc->nr_scanned - nr_scanned,
                   sc->nr_reclaimed - nr_reclaimed);
 
@@ -2833,14 +2794,6 @@ again:
                if (sc->nr.writeback && sc->nr.writeback == sc->nr.taken)
                        set_bit(PGDAT_WRITEBACK, &pgdat->flags);
 
-               /*
-                * Tag a node as congested if all the dirty pages
-                * scanned were backed by a congested BDI and
-                * wait_iff_congested will stall.
-                */
-               if (sc->nr.dirty && sc->nr.dirty == sc->nr.congested)
-                       set_bit(PGDAT_CONGESTED, &pgdat->flags);
-
                /* Allow kswapd to start writing pages during reclaim.*/
                if (sc->nr.unqueued_dirty == sc->nr.file_taken)
                        set_bit(PGDAT_DIRTY, &pgdat->flags);
@@ -2856,12 +2809,17 @@ again:
        }
 
        /*
+        * Tag a node/memcg as congested if all the dirty pages
+        * scanned were backed by a congested BDI and
+        * wait_iff_congested will stall.
+        *
         * Legacy memcg will stall in page writeback so avoid forcibly
         * stalling in wait_iff_congested().
         */
-       if (cgroup_reclaim(sc) && writeback_throttling_sane(sc) &&
+       if ((current_is_kswapd() ||
+            (cgroup_reclaim(sc) && writeback_throttling_sane(sc))) &&
            sc->nr.dirty && sc->nr.dirty == sc->nr.congested)
-               set_memcg_congestion(pgdat, target_memcg, true);
+               set_bit(LRUVEC_CONGESTED, &target_lruvec->flags);
 
        /*
         * Stall direct reclaim for IO completions if underlying BDIs
@@ -2869,9 +2827,9 @@ again:
         * starts encountering unqueued dirty pages or cycling through
         * the LRU too quickly.
         */
-       if (!sc->hibernation_mode && !current_is_kswapd() &&
-           current_may_throttle() &&
-           pgdat_memcg_congested(pgdat, target_memcg))
+       if (!current_is_kswapd() && current_may_throttle() &&
+           !sc->hibernation_mode &&
+           test_bit(LRUVEC_CONGESTED, &target_lruvec->flags))
                wait_iff_congested(BLK_RW_ASYNC, HZ/10);
 
        if (should_continue_reclaim(pgdat, sc->nr_reclaimed - nr_reclaimed,
@@ -3085,8 +3043,16 @@ retry:
                if (zone->zone_pgdat == last_pgdat)
                        continue;
                last_pgdat = zone->zone_pgdat;
+
                snapshot_refaults(sc->target_mem_cgroup, zone->zone_pgdat);
-               set_memcg_congestion(last_pgdat, sc->target_mem_cgroup, false);
+
+               if (cgroup_reclaim(sc)) {
+                       struct lruvec *lruvec;
+
+                       lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup,
+                                                  zone->zone_pgdat);
+                       clear_bit(LRUVEC_CONGESTED, &lruvec->flags);
+               }
        }
 
        delayacct_freepages_end();
@@ -3458,7 +3424,9 @@ static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx)
 /* Clear pgdat state for congested, dirty or under writeback. */
 static void clear_pgdat_congested(pg_data_t *pgdat)
 {
-       clear_bit(PGDAT_CONGESTED, &pgdat->flags);
+       struct lruvec *lruvec = mem_cgroup_lruvec(NULL, pgdat);
+
+       clear_bit(LRUVEC_CONGESTED, &lruvec->flags);
        clear_bit(PGDAT_DIRTY, &pgdat->flags);
        clear_bit(PGDAT_WRITEBACK, &pgdat->flags);
 }