x86: add tizen_qemu_x86_defconfig & tizen_qemu_x86_64_defconfig
[platform/kernel/linux-rpi.git] / mm / vmscan.c
index 74296c2..201acea 100644 (file)
@@ -1866,69 +1866,6 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
 }
 
 /*
- * Attempt to remove the specified page from its LRU.  Only take this page
- * if it is of the appropriate PageActive status.  Pages which are being
- * freed elsewhere are also ignored.
- *
- * page:       page to consider
- * mode:       one of the LRU isolation modes defined above
- *
- * returns true on success, false on failure.
- */
-bool __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode)
-{
-       /* Only take pages on the LRU. */
-       if (!PageLRU(page))
-               return false;
-
-       /* Compaction should not handle unevictable pages but CMA can do so */
-       if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE))
-               return false;
-
-       /*
-        * To minimise LRU disruption, the caller can indicate that it only
-        * wants to isolate pages it will be able to operate on without
-        * blocking - clean pages for the most part.
-        *
-        * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages
-        * that it is possible to migrate without blocking
-        */
-       if (mode & ISOLATE_ASYNC_MIGRATE) {
-               /* All the caller can do on PageWriteback is block */
-               if (PageWriteback(page))
-                       return false;
-
-               if (PageDirty(page)) {
-                       struct address_space *mapping;
-                       bool migrate_dirty;
-
-                       /*
-                        * Only pages without mappings or that have a
-                        * ->migratepage callback are possible to migrate
-                        * without blocking. However, we can be racing with
-                        * truncation so it's necessary to lock the page
-                        * to stabilise the mapping as truncation holds
-                        * the page lock until after the page is removed
-                        * from the page cache.
-                        */
-                       if (!trylock_page(page))
-                               return false;
-
-                       mapping = page_mapping(page);
-                       migrate_dirty = !mapping || mapping->a_ops->migratepage;
-                       unlock_page(page);
-                       if (!migrate_dirty)
-                               return false;
-               }
-       }
-
-       if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
-               return false;
-
-       return true;
-}
-
-/*
  * Update LRU sizes after isolating pages. The LRU size updates must
  * be complete before mem_cgroup_update_lru_size due to a sanity check.
  */
@@ -1979,11 +1916,11 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
        unsigned long skipped = 0;
        unsigned long scan, total_scan, nr_pages;
        LIST_HEAD(pages_skipped);
-       isolate_mode_t mode = (sc->may_unmap ? 0 : ISOLATE_UNMAPPED);
 
        total_scan = 0;
        scan = 0;
        while (scan < nr_to_scan && !list_empty(src)) {
+               struct list_head *move_to = src;
                struct page *page;
 
                page = lru_to_page(src);
@@ -1993,9 +1930,9 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
                total_scan += nr_pages;
 
                if (page_zonenum(page) > sc->reclaim_idx) {
-                       list_move(&page->lru, &pages_skipped);
                        nr_skipped[page_zonenum(page)] += nr_pages;
-                       continue;
+                       move_to = &pages_skipped;
+                       goto move;
                }
 
                /*
@@ -2003,37 +1940,34 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
                 * return with no isolated pages if the LRU mostly contains
                 * ineligible pages.  This causes the VM to not reclaim any
                 * pages, triggering a premature OOM.
-                *
-                * Account all tail pages of THP.  This would not cause
-                * premature OOM since __isolate_lru_page() returns -EBUSY
-                * only when the page is being freed somewhere else.
+                * Account all tail pages of THP.
                 */
                scan += nr_pages;
-               if (!__isolate_lru_page_prepare(page, mode)) {
-                       /* It is being freed elsewhere */
-                       list_move(&page->lru, src);
-                       continue;
-               }
+
+               if (!PageLRU(page))
+                       goto move;
+               if (!sc->may_unmap && page_mapped(page))
+                       goto move;
+
                /*
                 * Be careful not to clear PageLRU until after we're
                 * sure the page is not being freed elsewhere -- the
                 * page release code relies on it.
                 */
-               if (unlikely(!get_page_unless_zero(page))) {
-                       list_move(&page->lru, src);
-                       continue;
-               }
+               if (unlikely(!get_page_unless_zero(page)))
+                       goto move;
 
                if (!TestClearPageLRU(page)) {
                        /* Another thread is already isolating this page */
                        put_page(page);
-                       list_move(&page->lru, src);
-                       continue;
+                       goto move;
                }
 
                nr_taken += nr_pages;
                nr_zone_taken[page_zonenum(page)] += nr_pages;
-               list_move(&page->lru, dst);
+               move_to = dst;
+move:
+               list_move(&page->lru, move_to);
        }
 
        /*
@@ -2057,7 +1991,8 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
        }
        *nr_scanned = total_scan;
        trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan,
-                                   total_scan, skipped, nr_taken, mode, lru);
+                                   total_scan, skipped, nr_taken,
+                                   sc->may_unmap ? 0 : ISOLATE_UNMAPPED, lru);
        update_lru_sizes(lruvec, lru, nr_zone_taken);
        return nr_taken;
 }
@@ -2791,8 +2726,8 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
        enum lru_list lru;
        unsigned long nr_reclaimed = 0;
        unsigned long nr_to_reclaim = sc->nr_to_reclaim;
+       bool proportional_reclaim;
        struct blk_plug plug;
-       bool scan_adjusted;
 
        get_scan_count(lruvec, sc, nr);
 
@@ -2810,8 +2745,8 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
         * abort proportional reclaim if either the file or anon lru has already
         * dropped to zero at the first pass.
         */
-       scan_adjusted = (!cgroup_reclaim(sc) && !current_is_kswapd() &&
-                        sc->priority == DEF_PRIORITY);
+       proportional_reclaim = (!cgroup_reclaim(sc) && !current_is_kswapd() &&
+                               sc->priority == DEF_PRIORITY);
 
        blk_start_plug(&plug);
        while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
@@ -2831,7 +2766,7 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
 
                cond_resched();
 
-               if (nr_reclaimed < nr_to_reclaim || scan_adjusted)
+               if (nr_reclaimed < nr_to_reclaim || proportional_reclaim)
                        continue;
 
                /*
@@ -2882,8 +2817,6 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
                nr_scanned = targets[lru] - nr[lru];
                nr[lru] = targets[lru] * (100 - percentage) / 100;
                nr[lru] -= min(nr[lru], nr_scanned);
-
-               scan_adjusted = true;
        }
        blk_finish_plug(&plug);
        sc->nr_reclaimed += nr_reclaimed;