mm: vmscan: improve comment on low-page cache handling
[platform/adaptation/renesas_rcar/renesas_kernel.git] / mm / vmscan.c
index 196709f..259f820 100644 (file)
@@ -1656,7 +1656,6 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
        struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
        u64 fraction[2], denominator;
        enum lru_list lru;
-       int noswap = 0;
        bool force_scan = false;
        struct zone *zone = lruvec_zone(lruvec);
 
@@ -1677,42 +1676,71 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
 
        /* If we have no swap space, do not bother scanning anon pages. */
        if (!sc->may_swap || (nr_swap_pages <= 0)) {
-               noswap = 1;
                fraction[0] = 0;
                fraction[1] = 1;
                denominator = 1;
                goto out;
        }
 
+       /*
+        * Global reclaim will swap to prevent OOM even with no
+        * swappiness, but memcg users want to use this knob to
+        * disable swapping for individual groups completely when
+        * using the memory controller's swap limit feature would be
+        * too expensive.
+        */
+       if (!global_reclaim(sc) && !vmscan_swappiness(sc)) {
+               fraction[0] = 0;
+               fraction[1] = 1;
+               denominator = 1;
+               goto out;
+       }
+
+       /*
+        * Do not apply any pressure balancing cleverness when the
+        * system is close to OOM, scan both anon and file equally
+        * (unless the swappiness setting disagrees with swapping).
+        */
+       if (!sc->priority && vmscan_swappiness(sc)) {
+               fraction[0] = 1;
+               fraction[1] = 1;
+               denominator = 1;
+               goto out;
+       }
+
        anon  = get_lru_size(lruvec, LRU_ACTIVE_ANON) +
                get_lru_size(lruvec, LRU_INACTIVE_ANON);
        file  = get_lru_size(lruvec, LRU_ACTIVE_FILE) +
                get_lru_size(lruvec, LRU_INACTIVE_FILE);
 
+       /*
+        * If it's foreseeable that reclaiming the file cache won't be
+        * enough to get the zone back into a desirable shape, we have
+        * to swap.  Better start now and leave the - probably heavily
+        * thrashing - remaining file pages alone.
+        */
        if (global_reclaim(sc)) {
-               free  = zone_page_state(zone, NR_FREE_PAGES);
+               free = zone_page_state(zone, NR_FREE_PAGES);
                if (unlikely(file + free <= high_wmark_pages(zone))) {
-                       /*
-                        * If we have very few page cache pages, force-scan
-                        * anon pages.
-                        */
                        fraction[0] = 1;
                        fraction[1] = 0;
                        denominator = 1;
                        goto out;
-               } else if (!inactive_file_is_low_global(zone)) {
-                       /*
-                        * There is enough inactive page cache, do not
-                        * reclaim anything from the working set right now.
-                        */
-                       fraction[0] = 0;
-                       fraction[1] = 1;
-                       denominator = 1;
-                       goto out;
                }
        }
 
        /*
+        * There is enough inactive page cache, do not reclaim
+        * anything from the anonymous working set right now.
+        */
+       if (!inactive_file_is_low(lruvec)) {
+               fraction[0] = 0;
+               fraction[1] = 1;
+               denominator = 1;
+               goto out;
+       }
+
+       /*
         * With swappiness at 100, anonymous and file have the same priority.
         * This scanning priority is essentially the inverse of IO cost.
         */
@@ -1759,15 +1787,14 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
 out:
        for_each_evictable_lru(lru) {
                int file = is_file_lru(lru);
+               unsigned long size;
                unsigned long scan;
 
-               scan = get_lru_size(lruvec, lru);
-               if (sc->priority || noswap || !vmscan_swappiness(sc)) {
-                       scan >>= sc->priority;
-                       if (!scan && force_scan)
-                               scan = SWAP_CLUSTER_MAX;
-                       scan = div64_u64(scan * fraction[file], denominator);
-               }
+               size = get_lru_size(lruvec, lru);
+               scan = size >> sc->priority;
+               if (!scan && force_scan)
+                       scan = min(size, SWAP_CLUSTER_MAX);
+               scan = div64_u64(scan * fraction[file], denominator);
                nr[lru] = scan;
        }
 }