mm: vmscan: take page buffers dirty and locked state into account
[platform/adaptation/renesas_rcar/renesas_kernel.git] / mm / vmscan.c
index d6c916d..c857943 100644 (file)
@@ -669,6 +669,35 @@ static enum page_references page_check_references(struct page *page,
        return PAGEREF_RECLAIM;
 }
 
+/* Check if a page is dirty or under writeback */
+static void page_check_dirty_writeback(struct page *page,
+                                      bool *dirty, bool *writeback)
+{
+       struct address_space *mapping;
+
+       /*
+        * Anonymous pages are not handled by flushers and must be written
+        * from reclaim context. Do not stall reclaim based on them
+        */
+       if (!page_is_file_cache(page)) {
+               *dirty = false;
+               *writeback = false;
+               return;
+       }
+
+       /* By default assume that the page flags are accurate */
+       *dirty = PageDirty(page);
+       *writeback = PageWriteback(page);
+
+       /* Verify dirty/writeback state if the filesystem supports it */
+       if (!page_has_private(page))
+               return;
+
+       mapping = page_mapping(page);
+       if (mapping && mapping->a_ops->is_dirty_writeback)
+               mapping->a_ops->is_dirty_writeback(page, dirty, writeback);
+}
+
 /*
  * shrink_page_list() returns the number of reclaimed pages
  */
@@ -676,8 +705,11 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                                      struct zone *zone,
                                      struct scan_control *sc,
                                      enum ttu_flags ttu_flags,
+                                     unsigned long *ret_nr_dirty,
                                      unsigned long *ret_nr_unqueued_dirty,
+                                     unsigned long *ret_nr_congested,
                                      unsigned long *ret_nr_writeback,
+                                     unsigned long *ret_nr_immediate,
                                      bool force_reclaim)
 {
        LIST_HEAD(ret_pages);
@@ -688,6 +720,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
        unsigned long nr_congested = 0;
        unsigned long nr_reclaimed = 0;
        unsigned long nr_writeback = 0;
+       unsigned long nr_immediate = 0;
 
        cond_resched();
 
@@ -697,6 +730,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                struct page *page;
                int may_enter_fs;
                enum page_references references = PAGEREF_RECLAIM_CLEAN;
+               bool dirty, writeback;
 
                cond_resched();
 
@@ -724,25 +758,77 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
                        (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
 
+               /*
+                * The number of dirty pages determines if a zone is marked
+                * reclaim_congested which affects wait_iff_congested. kswapd
+                * will stall and start writing pages if the tail of the LRU
+                * is all dirty unqueued pages.
+                */
+               page_check_dirty_writeback(page, &dirty, &writeback);
+               if (dirty || writeback)
+                       nr_dirty++;
+
+               if (dirty && !writeback)
+                       nr_unqueued_dirty++;
+
+               /*
+                * Treat this page as congested if the underlying BDI is or if
+                * pages are cycling through the LRU so quickly that the
+                * pages marked for immediate reclaim are making it to the
+                * end of the LRU a second time.
+                */
+               mapping = page_mapping(page);
+               if ((mapping && bdi_write_congested(mapping->backing_dev_info)) ||
+                   (writeback && PageReclaim(page)))
+                       nr_congested++;
+
+               /*
+                * If a page at the tail of the LRU is under writeback, there
+                * are three cases to consider.
+                *
+                * 1) If reclaim is encountering an excessive number of pages
+                *    under writeback and this page is both under writeback and
+                *    PageReclaim then it indicates that pages are being queued
+                *    for IO but are being recycled through the LRU before the
+                *    IO can complete. Waiting on the page itself risks an
+                *    indefinite stall if it is impossible to writeback the
+                *    page due to IO error or disconnected storage so instead
+                *    note that the LRU is being scanned too quickly and the
+                *    caller can stall after page list has been processed.
+                *
+                * 2) Global reclaim encounters a page, memcg encounters a
+                *    page that is not marked for immediate reclaim or
+                *    the caller does not have __GFP_IO. In this case mark
+                *    the page for immediate reclaim and continue scanning.
+                *
+                *    __GFP_IO is checked  because a loop driver thread might
+                *    enter reclaim, and deadlock if it waits on a page for
+                *    which it is needed to do the write (loop masks off
+                *    __GFP_IO|__GFP_FS for this reason); but more thought
+                *    would probably show more reasons.
+                *
+                *    Don't require __GFP_FS, since we're not going into the
+                *    FS, just waiting on its writeback completion. Worryingly,
+                *    ext4 gfs2 and xfs allocate pages with
+                *    grab_cache_page_write_begin(,,AOP_FLAG_NOFS), so testing
+                *    may_enter_fs here is liable to OOM on them.
+                *
+                * 3) memcg encounters a page that is not already marked
+                *    PageReclaim. memcg does not have any dirty pages
+                *    throttling so we could easily OOM just because too many
+                *    pages are in writeback and there is nothing else to
+                *    reclaim. Wait for the writeback to complete.
+                */
                if (PageWriteback(page)) {
-                       /*
-                        * memcg doesn't have any dirty pages throttling so we
-                        * could easily OOM just because too many pages are in
-                        * writeback and there is nothing else to reclaim.
-                        *
-                        * Check __GFP_IO, certainly because a loop driver
-                        * thread might enter reclaim, and deadlock if it waits
-                        * on a page for which it is needed to do the write
-                        * (loop masks off __GFP_IO|__GFP_FS for this reason);
-                        * but more thought would probably show more reasons.
-                        *
-                        * Don't require __GFP_FS, since we're not going into
-                        * the FS, just waiting on its writeback completion.
-                        * Worryingly, ext4 gfs2 and xfs allocate pages with
-                        * grab_cache_page_write_begin(,,AOP_FLAG_NOFS), so
-                        * testing may_enter_fs here is liable to OOM on them.
-                        */
-                       if (global_reclaim(sc) ||
+                       /* Case 1 above */
+                       if (current_is_kswapd() &&
+                           PageReclaim(page) &&
+                           zone_is_reclaim_writeback(zone)) {
+                               nr_immediate++;
+                               goto keep_locked;
+
+                       /* Case 2 above */
+                       } else if (global_reclaim(sc) ||
                            !PageReclaim(page) || !(sc->gfp_mask & __GFP_IO)) {
                                /*
                                 * This is slightly racy - end_page_writeback()
@@ -757,9 +843,13 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                                 */
                                SetPageReclaim(page);
                                nr_writeback++;
+
                                goto keep_locked;
+
+                       /* Case 3 above */
+                       } else {
+                               wait_on_page_writeback(page);
                        }
-                       wait_on_page_writeback(page);
                }
 
                if (!force_reclaim)
@@ -785,9 +875,10 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                        if (!add_to_swap(page, page_list))
                                goto activate_locked;
                        may_enter_fs = 1;
-               }
 
-               mapping = page_mapping(page);
+                       /* Adding to swap updated mapping */
+                       mapping = page_mapping(page);
+               }
 
                /*
                 * The page is mapped into the page tables of one or more
@@ -807,11 +898,6 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                }
 
                if (PageDirty(page)) {
-                       nr_dirty++;
-
-                       if (!PageWriteback(page))
-                               nr_unqueued_dirty++;
-
                        /*
                         * Only kswapd can writeback filesystem pages to
                         * avoid risk of stack overflow but only writeback
@@ -842,7 +928,6 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                        /* Page is dirty, try to write it out here */
                        switch (pageout(page, mapping, sc)) {
                        case PAGE_KEEP:
-                               nr_congested++;
                                goto keep_locked;
                        case PAGE_ACTIVATE:
                                goto activate_locked;
@@ -950,22 +1035,16 @@ keep:
                VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
        }
 
-       /*
-        * Tag a zone as congested if all the dirty pages encountered were
-        * backed by a congested BDI. In this case, reclaimers should just
-        * back off and wait for congestion to clear because further reclaim
-        * will encounter the same problem
-        */
-       if (nr_dirty && nr_dirty == nr_congested && global_reclaim(sc))
-               zone_set_flag(zone, ZONE_CONGESTED);
-
        free_hot_cold_page_list(&free_pages, 1);
 
        list_splice(&ret_pages, page_list);
        count_vm_events(PGACTIVATE, pgactivate);
        mem_cgroup_uncharge_end();
+       *ret_nr_dirty += nr_dirty;
+       *ret_nr_congested += nr_congested;
        *ret_nr_unqueued_dirty += nr_unqueued_dirty;
        *ret_nr_writeback += nr_writeback;
+       *ret_nr_immediate += nr_immediate;
        return nr_reclaimed;
 }
 
@@ -977,7 +1056,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
                .priority = DEF_PRIORITY,
                .may_unmap = 1,
        };
-       unsigned long ret, dummy1, dummy2;
+       unsigned long ret, dummy1, dummy2, dummy3, dummy4, dummy5;
        struct page *page, *next;
        LIST_HEAD(clean_pages);
 
@@ -989,8 +1068,8 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
        }
 
        ret = shrink_page_list(&clean_pages, zone, &sc,
-                               TTU_UNMAP|TTU_IGNORE_ACCESS,
-                               &dummy1, &dummy2, true);
+                       TTU_UNMAP|TTU_IGNORE_ACCESS,
+                       &dummy1, &dummy2, &dummy3, &dummy4, &dummy5, true);
        list_splice(&clean_pages, page_list);
        __mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret);
        return ret;
@@ -1285,7 +1364,10 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
        unsigned long nr_reclaimed = 0;
        unsigned long nr_taken;
        unsigned long nr_dirty = 0;
+       unsigned long nr_congested = 0;
+       unsigned long nr_unqueued_dirty = 0;
        unsigned long nr_writeback = 0;
+       unsigned long nr_immediate = 0;
        isolate_mode_t isolate_mode = 0;
        int file = is_file_lru(lru);
        struct zone *zone = lruvec_zone(lruvec);
@@ -1327,7 +1409,9 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
                return 0;
 
        nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP,
-                                       &nr_dirty, &nr_writeback, false);
+                               &nr_dirty, &nr_unqueued_dirty, &nr_congested,
+                               &nr_writeback, &nr_immediate,
+                               false);
 
        spin_lock_irq(&zone->lru_lock);
 
@@ -1361,7 +1445,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
         * same way balance_dirty_pages() manages.
         *
         * This scales the number of dirty pages that must be under writeback
-        * before throttling depending on priority. It is a simple backoff
+        * before a zone gets flagged ZONE_WRITEBACK. It is a simple backoff
         * function that has the most effect in the range DEF_PRIORITY to
         * DEF_PRIORITY-2 which is the priority reclaim is considered to be
         * in trouble and reclaim is considered to be in trouble.
@@ -1372,19 +1456,54 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
         * ...
         * DEF_PRIORITY-6 For SWAP_CLUSTER_MAX isolated pages, throttle if any
         *                     isolated page is PageWriteback
+        *
+        * Once a zone is flagged ZONE_WRITEBACK, kswapd will count the number
+        * of pages under pages flagged for immediate reclaim and stall if any
+        * are encountered in the nr_immediate check below.
         */
        if (nr_writeback && nr_writeback >=
                        (nr_taken >> (DEF_PRIORITY - sc->priority)))
-               wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10);
+               zone_set_flag(zone, ZONE_WRITEBACK);
+
+       /*
+        * memcg will stall in page writeback so only consider forcibly
+        * stalling for global reclaim
+        */
+       if (global_reclaim(sc)) {
+               /*
+                * Tag a zone as congested if all the dirty pages scanned were
+                * backed by a congested BDI and wait_iff_congested will stall.
+                */
+               if (nr_dirty && nr_dirty == nr_congested)
+                       zone_set_flag(zone, ZONE_CONGESTED);
+
+               /*
+                * If dirty pages are scanned that are not queued for IO, it
+                * implies that flushers are not keeping up. In this case, flag
+                * the zone ZONE_TAIL_LRU_DIRTY and kswapd will start writing
+                * pages from reclaim context. It will forcibly stall in the
+                * next check.
+                */
+               if (nr_unqueued_dirty == nr_taken)
+                       zone_set_flag(zone, ZONE_TAIL_LRU_DIRTY);
+
+               /*
+                * In addition, if kswapd scans pages marked marked for
+                * immediate reclaim and under writeback (nr_immediate), it
+                * implies that pages are cycling through the LRU faster than
+                * they are written so also forcibly stall.
+                */
+               if (nr_unqueued_dirty == nr_taken || nr_immediate)
+                       congestion_wait(BLK_RW_ASYNC, HZ/10);
+       }
 
        /*
-        * Similarly, if many dirty pages are encountered that are not
-        * currently being written then flag that kswapd should start
-        * writing back pages.
+        * Stall direct reclaim for IO completions if underlying BDIs or zone
+        * is congested. Allow kswapd to continue until it starts encountering
+        * unqueued dirty pages or cycling through the LRU too quickly.
         */
-       if (global_reclaim(sc) && nr_dirty &&
-                       nr_dirty >= (nr_taken >> (DEF_PRIORITY - sc->priority)))
-               zone_set_flag(zone, ZONE_TAIL_LRU_DIRTY);
+       if (!sc->hibernation_mode && !current_is_kswapd())
+               wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10);
 
        trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id,
                zone_idx(zone),
@@ -2286,17 +2405,6 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
                                                WB_REASON_TRY_TO_FREE_PAGES);
                        sc->may_writepage = 1;
                }
-
-               /* Take a nap, wait for some writeback to complete */
-               if (!sc->hibernation_mode && sc->nr_scanned &&
-                   sc->priority < DEF_PRIORITY - 2) {
-                       struct zone *preferred_zone;
-
-                       first_zones_zonelist(zonelist, gfp_zone(sc->gfp_mask),
-                                               &cpuset_current_mems_allowed,
-                                               &preferred_zone);
-                       wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/10);
-               }
        } while (--sc->priority >= 0);
 
 out:
@@ -2669,22 +2777,57 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
  * the high watermark.
  *
  * Returns true if kswapd scanned at least the requested number of pages to
- * reclaim. This is used to determine if the scanning priority needs to be
- * raised.
+ * reclaim or if the lack of progress was due to pages under writeback.
+ * This is used to determine if the scanning priority needs to be raised.
  */
 static bool kswapd_shrink_zone(struct zone *zone,
+                              int classzone_idx,
                               struct scan_control *sc,
                               unsigned long lru_pages,
                               unsigned long *nr_attempted)
 {
        unsigned long nr_slab;
+       int testorder = sc->order;
+       unsigned long balance_gap;
        struct reclaim_state *reclaim_state = current->reclaim_state;
        struct shrink_control shrink = {
                .gfp_mask = sc->gfp_mask,
        };
+       bool lowmem_pressure;
 
        /* Reclaim above the high watermark. */
        sc->nr_to_reclaim = max(SWAP_CLUSTER_MAX, high_wmark_pages(zone));
+
+       /*
+        * Kswapd reclaims only single pages with compaction enabled. Trying
+        * too hard to reclaim until contiguous free pages have become
+        * available can hurt performance by evicting too much useful data
+        * from memory. Do not reclaim more than needed for compaction.
+        */
+       if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
+                       compaction_suitable(zone, sc->order) !=
+                               COMPACT_SKIPPED)
+               testorder = 0;
+
+       /*
+        * We put equal pressure on every zone, unless one zone has way too
+        * many pages free already. The "too many pages" is defined as the
+        * high wmark plus a "gap" where the gap is either the low
+        * watermark or 1% of the zone, whichever is smaller.
+        */
+       balance_gap = min(low_wmark_pages(zone),
+               (zone->managed_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
+               KSWAPD_ZONE_BALANCE_GAP_RATIO);
+
+       /*
+        * If there is no low memory pressure or the zone is balanced then no
+        * reclaim is necessary
+        */
+       lowmem_pressure = (buffer_heads_over_limit && is_highmem(zone));
+       if (!lowmem_pressure && zone_balanced(zone, testorder,
+                                               balance_gap, classzone_idx))
+               return true;
+
        shrink_zone(zone, sc);
 
        reclaim_state->reclaimed_slab = 0;
@@ -2697,6 +2840,20 @@ static bool kswapd_shrink_zone(struct zone *zone,
        if (nr_slab == 0 && !zone_reclaimable(zone))
                zone->all_unreclaimable = 1;
 
+       zone_clear_flag(zone, ZONE_WRITEBACK);
+
+       /*
+        * If a zone reaches its high watermark, consider it to be no longer
+        * congested. It's possible there are dirty pages backed by congested
+        * BDIs but as pressure is relieved, speculatively avoid congestion
+        * waits.
+        */
+       if (!zone->all_unreclaimable &&
+           zone_balanced(zone, testorder, 0, classzone_idx)) {
+               zone_clear_flag(zone, ZONE_CONGESTED);
+               zone_clear_flag(zone, ZONE_TAIL_LRU_DIRTY);
+       }
+
        return sc->nr_scanned >= sc->nr_to_reclaim;
 }
 
@@ -2815,6 +2972,13 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
                }
 
                /*
+                * If we're getting trouble reclaiming, start doing writepage
+                * even in laptop mode.
+                */
+               if (sc.priority < DEF_PRIORITY - 2)
+                       sc.may_writepage = 1;
+
+               /*
                 * Now scan the zone in the dma->highmem direction, stopping
                 * at the last zone which needs scanning.
                 *
@@ -2825,8 +2989,6 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
                 */
                for (i = 0; i <= end_zone; i++) {
                        struct zone *zone = pgdat->node_zones + i;
-                       int testorder;
-                       unsigned long balance_gap;
 
                        if (!populated_zone(zone))
                                continue;
@@ -2847,68 +3009,14 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
                        sc.nr_reclaimed += nr_soft_reclaimed;
 
                        /*
-                        * We put equal pressure on every zone, unless
-                        * one zone has way too many pages free
-                        * already. The "too many pages" is defined
-                        * as the high wmark plus a "gap" where the
-                        * gap is either the low watermark or 1%
-                        * of the zone, whichever is smaller.
-                        */
-                       balance_gap = min(low_wmark_pages(zone),
-                               (zone->managed_pages +
-                                       KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
-                               KSWAPD_ZONE_BALANCE_GAP_RATIO);
-                       /*
-                        * Kswapd reclaims only single pages with compaction
-                        * enabled. Trying too hard to reclaim until contiguous
-                        * free pages have become available can hurt performance
-                        * by evicting too much useful data from memory.
-                        * Do not reclaim more than needed for compaction.
-                        */
-                       testorder = order;
-                       if (IS_ENABLED(CONFIG_COMPACTION) && order &&
-                                       compaction_suitable(zone, order) !=
-                                               COMPACT_SKIPPED)
-                               testorder = 0;
-
-                       if ((buffer_heads_over_limit && is_highmem_idx(i)) ||
-                           !zone_balanced(zone, testorder,
-                                          balance_gap, end_zone)) {
-                               /*
-                                * There should be no need to raise the
-                                * scanning priority if enough pages are
-                                * already being scanned that high
-                                * watermark would be met at 100% efficiency.
-                                */
-                               if (kswapd_shrink_zone(zone, &sc, lru_pages,
-                                                      &nr_attempted))
-                                       raise_priority = false;
-                       }
-
-                       /*
-                        * If we're getting trouble reclaiming, start doing
-                        * writepage even in laptop mode.
+                        * There should be no need to raise the scanning
+                        * priority if enough pages are already being scanned
+                        * that that high watermark would be met at 100%
+                        * efficiency.
                         */
-                       if (sc.priority < DEF_PRIORITY - 2)
-                               sc.may_writepage = 1;
-
-                       if (zone->all_unreclaimable) {
-                               if (end_zone && end_zone == i)
-                                       end_zone--;
-                               continue;
-                       }
-
-                       if (zone_balanced(zone, testorder, 0, end_zone))
-                               /*
-                                * If a zone reaches its high watermark,
-                                * consider it to be no longer congested. It's
-                                * possible there are dirty pages backed by
-                                * congested BDIs but as pressure is relieved,
-                                * speculatively avoid congestion waits
-                                * or writing pages from kswapd context.
-                                */
-                               zone_clear_flag(zone, ZONE_CONGESTED);
-                               zone_clear_flag(zone, ZONE_TAIL_LRU_DIRTY);
+                       if (kswapd_shrink_zone(zone, end_zone, &sc,
+                                       lru_pages, &nr_attempted))
+                               raise_priority = false;
                }
 
                /*