mm: vmscan: naming fixes: global_reclaim() and sane_reclaim()
[platform/kernel/linux-rpi.git] / mm / vmscan.c
index bdb3105..666610c 100644 (file)
@@ -239,13 +239,13 @@ static void unregister_memcg_shrinker(struct shrinker *shrinker)
        up_write(&shrinker_rwsem);
 }
 
-static bool global_reclaim(struct scan_control *sc)
+static bool cgroup_reclaim(struct scan_control *sc)
 {
-       return !sc->target_mem_cgroup;
+       return sc->target_mem_cgroup;
 }
 
 /**
- * sane_reclaim - is the usual dirty throttling mechanism operational?
+ * writeback_throttling_sane - is the usual dirty throttling mechanism available?
  * @sc: scan_control in question
  *
  * The normal page dirty throttling mechanism in balance_dirty_pages() is
@@ -257,11 +257,9 @@ static bool global_reclaim(struct scan_control *sc)
  * This function tests whether the vmscan currently in progress can assume
  * that the normal dirty throttling mechanism is operational.
  */
-static bool sane_reclaim(struct scan_control *sc)
+static bool writeback_throttling_sane(struct scan_control *sc)
 {
-       struct mem_cgroup *memcg = sc->target_mem_cgroup;
-
-       if (!memcg)
+       if (!cgroup_reclaim(sc))
                return true;
 #ifdef CONFIG_CGROUP_WRITEBACK
        if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
@@ -302,12 +300,12 @@ static void unregister_memcg_shrinker(struct shrinker *shrinker)
 {
 }
 
-static bool global_reclaim(struct scan_control *sc)
+static bool cgroup_reclaim(struct scan_control *sc)
 {
-       return true;
+       return false;
 }
 
-static bool sane_reclaim(struct scan_control *sc)
+static bool writeback_throttling_sane(struct scan_control *sc)
 {
        return true;
 }
@@ -1228,7 +1226,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                                goto activate_locked;
 
                        /* Case 2 above */
-                       } else if (sane_reclaim(sc) ||
+                       } else if (writeback_throttling_sane(sc) ||
                            !PageReclaim(page) || !may_enter_fs) {
                                /*
                                 * This is slightly racy - end_page_writeback()
@@ -1822,7 +1820,7 @@ static int too_many_isolated(struct pglist_data *pgdat, int file,
        if (current_is_kswapd())
                return 0;
 
-       if (!sane_reclaim(sc))
+       if (!writeback_throttling_sane(sc))
                return 0;
 
        if (file) {
@@ -1972,7 +1970,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
        reclaim_stat->recent_scanned[file] += nr_taken;
 
        item = current_is_kswapd() ? PGSCAN_KSWAPD : PGSCAN_DIRECT;
-       if (global_reclaim(sc))
+       if (!cgroup_reclaim(sc))
                __count_vm_events(item, nr_scanned);
        __count_memcg_events(lruvec_memcg(lruvec), item, nr_scanned);
        spin_unlock_irq(&pgdat->lru_lock);
@@ -1986,7 +1984,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
        spin_lock_irq(&pgdat->lru_lock);
 
        item = current_is_kswapd() ? PGSTEAL_KSWAPD : PGSTEAL_DIRECT;
-       if (global_reclaim(sc))
+       if (!cgroup_reclaim(sc))
                __count_vm_events(item, nr_reclaimed);
        __count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed);
        reclaim_stat->recent_rotated[0] += stat.nr_activate[0];
@@ -2310,7 +2308,7 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
         * using the memory controller's swap limit feature would be
         * too expensive.
         */
-       if (!global_reclaim(sc) && !swappiness) {
+       if (cgroup_reclaim(sc) && !swappiness) {
                scan_balance = SCAN_FILE;
                goto out;
        }
@@ -2334,7 +2332,7 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
         * thrashing file LRU becomes infinitely more attractive than
         * anon pages.  Try to detect this based on file LRU size.
         */
-       if (global_reclaim(sc)) {
+       if (!cgroup_reclaim(sc)) {
                unsigned long pgdatfile;
                unsigned long pgdatfree;
                int z;
@@ -2568,7 +2566,7 @@ static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memc
         * abort proportional reclaim if either the file or anon lru has already
         * dropped to zero at the first pass.
         */
-       scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() &&
+       scan_adjusted = (!cgroup_reclaim(sc) && !current_is_kswapd() &&
                         sc->priority == DEF_PRIORITY);
 
        blk_start_plug(&plug);
@@ -2857,7 +2855,7 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
                 * Legacy memcg will stall in page writeback so avoid forcibly
                 * stalling in wait_iff_congested().
                 */
-               if (!global_reclaim(sc) && sane_reclaim(sc) &&
+               if (cgroup_reclaim(sc) && writeback_throttling_sane(sc) &&
                    sc->nr.dirty && sc->nr.dirty == sc->nr.congested)
                        set_memcg_congestion(pgdat, root, true);
 
@@ -2952,7 +2950,7 @@ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
                 * Take care memory controller reclaiming has small influence
                 * to global LRU.
                 */
-               if (global_reclaim(sc)) {
+               if (!cgroup_reclaim(sc)) {
                        if (!cpuset_zone_allowed(zone,
                                                 GFP_KERNEL | __GFP_HARDWALL))
                                continue;
@@ -3052,7 +3050,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
 retry:
        delayacct_freepages_start();
 
-       if (global_reclaim(sc))
+       if (!cgroup_reclaim(sc))
                __count_zid_vm_events(ALLOCSTALL, sc->reclaim_idx, 1);
 
        do {