SHM_LOCKED pages are unevictable
[platform/adaptation/renesas_rcar/renesas_kernel.git] / mm / vmscan.c
index 9588973..dfb342e 100644 (file)
@@ -470,6 +470,85 @@ int remove_mapping(struct address_space *mapping, struct page *page)
        return 0;
 }
 
+/**
+ * putback_lru_page - put previously isolated page onto appropriate LRU list
+ * @page: page to be put back to appropriate lru list
+ *
+ * Add previously isolated @page to appropriate LRU list.
+ * Page may still be unevictable for other reasons.
+ *
+ * lru_lock must not be held, interrupts must be enabled.
+ */
+#ifdef CONFIG_UNEVICTABLE_LRU
+void putback_lru_page(struct page *page)
+{
+       int lru;
+       int active = !!TestClearPageActive(page);
+       int was_unevictable = PageUnevictable(page);
+
+       VM_BUG_ON(PageLRU(page));
+
+redo:
+       ClearPageUnevictable(page);
+
+       if (page_evictable(page, NULL)) {
+               /*
+                * For evictable pages, we can use the cache.
+                * In event of a race, worst case is we end up with an
+                * unevictable page on [in]active list.
+                * We know how to handle that.
+                */
+               lru = active + page_is_file_cache(page);
+               lru_cache_add_lru(page, lru);
+       } else {
+               /*
+                * Put unevictable pages directly on zone's unevictable
+                * list.
+                */
+               lru = LRU_UNEVICTABLE;
+               add_page_to_unevictable_list(page);
+       }
+       mem_cgroup_move_lists(page, lru);
+
+       /*
+        * page's status can change while we move it among lru. If an evictable
+        * page is on unevictable list, it never be freed. To avoid that,
+        * check after we added it to the list, again.
+        */
+       if (lru == LRU_UNEVICTABLE && page_evictable(page, NULL)) {
+               if (!isolate_lru_page(page)) {
+                       put_page(page);
+                       goto redo;
+               }
+               /* This means someone else dropped this page from LRU
+                * So, it will be freed or putback to LRU again. There is
+                * nothing to do here.
+                */
+       }
+
+       if (was_unevictable && lru != LRU_UNEVICTABLE)
+               count_vm_event(UNEVICTABLE_PGRESCUED);
+       else if (!was_unevictable && lru == LRU_UNEVICTABLE)
+               count_vm_event(UNEVICTABLE_PGCULLED);
+
+       put_page(page);         /* drop ref from isolate */
+}
+
+#else /* CONFIG_UNEVICTABLE_LRU */
+
+void putback_lru_page(struct page *page)
+{
+       int lru;
+       VM_BUG_ON(PageLRU(page));
+
+       lru = !!TestClearPageActive(page) + page_is_file_cache(page);
+       lru_cache_add_lru(page, lru);
+       mem_cgroup_move_lists(page, lru);
+       put_page(page);
+}
+#endif /* CONFIG_UNEVICTABLE_LRU */
+
+
 /*
  * shrink_page_list() returns the number of reclaimed pages
  */
@@ -503,6 +582,12 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 
                sc->nr_scanned++;
 
+               if (unlikely(!page_evictable(page, NULL))) {
+                       unlock_page(page);
+                       putback_lru_page(page);
+                       continue;
+               }
+
                if (!sc->may_swap && page_mapped(page))
                        goto keep_locked;
 
@@ -602,7 +687,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                 * possible for a page to have PageDirty set, but it is actually
                 * clean (all its buffers are clean).  This happens if the
                 * buffers were written out directly, with submit_bh(). ext3
-                * will do this, as well as the blockdev mapping. 
+                * will do this, as well as the blockdev mapping.
                 * try_to_release_page() will discover that cleanness and will
                 * drop the buffers and mark the page clean - it can be freed.
                 *
@@ -650,6 +735,7 @@ activate_locked:
                /* Not a candidate for swapping, so reclaim swap space. */
                if (PageSwapCache(page) && vm_swap_full())
                        remove_exclusive_swap_page_ref(page);
+               VM_BUG_ON(PageActive(page));
                SetPageActive(page);
                pgactivate++;
 keep_locked:
@@ -699,6 +785,14 @@ int __isolate_lru_page(struct page *page, int mode, int file)
        if (mode != ISOLATE_BOTH && (!page_is_file_cache(page) != !file))
                return ret;
 
+       /*
+        * When this function is being called for lumpy reclaim, we
+        * initially look into all LRU pages, active, inactive and
+        * unevictable; only give shrink_page_list evictable pages.
+        */
+       if (PageUnevictable(page))
+               return ret;
+
        ret = -EBUSY;
        if (likely(get_page_unless_zero(page))) {
                /*
@@ -810,7 +904,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
                                /* else it is being freed elsewhere */
                                list_move(&cursor_page->lru, src);
                        default:
-                               break;
+                               break;  /* ! on LRU or wrong list */
                        }
                }
        }
@@ -870,8 +964,9 @@ static unsigned long clear_active_flags(struct list_head *page_list,
  * Returns -EBUSY if the page was not on an LRU list.
  *
  * The returned page will have PageLRU() cleared.  If it was found on
- * the active list, it will have PageActive set.  That flag may need
- * to be cleared by the caller before letting the page go.
+ * the active list, it will have PageActive set.  If it was found on
+ * the unevictable list, it will have the PageUnevictable bit set. That flag
+ * may need to be cleared by the caller before letting the page go.
  *
  * The vmstat statistic corresponding to the list on which the page was
  * found will be decremented.
@@ -892,11 +987,10 @@ int isolate_lru_page(struct page *page)
 
                spin_lock_irq(&zone->lru_lock);
                if (PageLRU(page) && get_page_unless_zero(page)) {
-                       int lru = LRU_BASE;
+                       int lru = page_lru(page);
                        ret = 0;
                        ClearPageLRU(page);
 
-                       lru += page_is_file_cache(page) + !!PageActive(page);
                        del_page_from_lru_list(zone, page, lru);
                }
                spin_unlock_irq(&zone->lru_lock);
@@ -909,7 +1003,8 @@ int isolate_lru_page(struct page *page)
  * of reclaimed pages
  */
 static unsigned long shrink_inactive_list(unsigned long max_scan,
-                       struct zone *zone, struct scan_control *sc, int file)
+                       struct zone *zone, struct scan_control *sc,
+                       int priority, int file)
 {
        LIST_HEAD(page_list);
        struct pagevec pvec;
@@ -927,8 +1022,19 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                unsigned long nr_freed;
                unsigned long nr_active;
                unsigned int count[NR_LRU_LISTS] = { 0, };
-               int mode = (sc->order > PAGE_ALLOC_COSTLY_ORDER) ?
-                                       ISOLATE_BOTH : ISOLATE_INACTIVE;
+               int mode = ISOLATE_INACTIVE;
+
+               /*
+                * If we need a large contiguous chunk of memory, or have
+                * trouble getting a small set of contiguous pages, we
+                * will reclaim both active and inactive pages.
+                *
+                * We use the same threshold as pageout congestion_wait below.
+                */
+               if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
+                       mode = ISOLATE_BOTH;
+               else if (sc->order && priority < DEF_PRIORITY - 2)
+                       mode = ISOLATE_BOTH;
 
                nr_taken = sc->isolate_pages(sc->swap_cluster_max,
                             &page_list, &nr_scan, sc->order, mode,
@@ -996,11 +1102,20 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                 * Put back any unfreeable pages.
                 */
                while (!list_empty(&page_list)) {
+                       int lru;
                        page = lru_to_page(&page_list);
                        VM_BUG_ON(PageLRU(page));
-                       SetPageLRU(page);
                        list_del(&page->lru);
-                       add_page_to_lru_list(zone, page, page_lru(page));
+                       if (unlikely(!page_evictable(page, NULL))) {
+                               spin_unlock_irq(&zone->lru_lock);
+                               putback_lru_page(page);
+                               spin_lock_irq(&zone->lru_lock);
+                               continue;
+                       }
+                       SetPageLRU(page);
+                       lru = page_lru(page);
+                       add_page_to_lru_list(zone, page, lru);
+                       mem_cgroup_move_lists(page, lru);
                        if (PageActive(page) && scan_global_lru(sc)) {
                                int file = !!page_is_file_cache(page);
                                zone->recent_rotated[file]++;
@@ -1095,6 +1210,11 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
                page = lru_to_page(&l_hold);
                list_del(&page->lru);
 
+               if (unlikely(!page_evictable(page, NULL))) {
+                       putback_lru_page(page);
+                       continue;
+               }
+
                /* page_referenced clears PageReferenced */
                if (page_mapping_inuse(page) &&
                    page_referenced(page, 0, sc->mem_cgroup))
@@ -1128,7 +1248,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
                ClearPageActive(page);
 
                list_move(&page->lru, &zone->lru[lru].list);
-               mem_cgroup_move_lists(page, false);
+               mem_cgroup_move_lists(page, lru);
                pgmoved++;
                if (!pagevec_add(&pvec, page)) {
                        __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
@@ -1172,7 +1292,7 @@ static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
                shrink_active_list(nr_to_scan, zone, sc, priority, file);
                return 0;
        }
-       return shrink_inactive_list(nr_to_scan, zone, sc, file);
+       return shrink_inactive_list(nr_to_scan, zone, sc, priority, file);
 }
 
 /*
@@ -1274,7 +1394,7 @@ static unsigned long shrink_zone(int priority, struct zone *zone,
 
        get_scan_ratio(zone, sc, percent);
 
-       for_each_lru(l) {
+       for_each_evictable_lru(l) {
                if (scan_global_lru(sc)) {
                        int file = is_file_lru(l);
                        int scan;
@@ -1306,7 +1426,7 @@ static unsigned long shrink_zone(int priority, struct zone *zone,
 
        while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
                                        nr[LRU_INACTIVE_FILE]) {
-               for_each_lru(l) {
+               for_each_evictable_lru(l) {
                        if (nr[l]) {
                                nr_to_scan = min(nr[l],
                                        (unsigned long)sc->swap_cluster_max);
@@ -1863,8 +1983,8 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
                if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY)
                        continue;
 
-               for_each_lru(l) {
-                       /* For pass = 0 we don't shrink the active list */
+               for_each_evictable_lru(l) {
+                       /* For pass = 0, we don't shrink the active list */
                        if (pass == 0 &&
                                (l == LRU_ACTIVE || l == LRU_ACTIVE_FILE))
                                continue;
@@ -2201,3 +2321,118 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
        return ret;
 }
 #endif
+
+#ifdef CONFIG_UNEVICTABLE_LRU
+/*
+ * page_evictable - test whether a page is evictable
+ * @page: the page to test
+ * @vma: the VMA in which the page is or will be mapped, may be NULL
+ *
+ * Test whether page is evictable--i.e., should be placed on active/inactive
+ * lists vs unevictable list.
+ *
+ * Reasons page might not be evictable:
+ * (1) page's mapping marked unevictable
+ *
+ * TODO - later patches
+ */
+int page_evictable(struct page *page, struct vm_area_struct *vma)
+{
+
+       if (mapping_unevictable(page_mapping(page)))
+               return 0;
+
+       /* TODO:  test page [!]evictable conditions */
+
+       return 1;
+}
+
+/**
+ * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list
+ * @page: page to check evictability and move to appropriate lru list
+ * @zone: zone page is in
+ *
+ * Checks a page for evictability and moves the page to the appropriate
+ * zone lru list.
+ *
+ * Restrictions: zone->lru_lock must be held, page must be on LRU and must
+ * have PageUnevictable set.
+ */
+static void check_move_unevictable_page(struct page *page, struct zone *zone)
+{
+       VM_BUG_ON(PageActive(page));
+
+retry:
+       ClearPageUnevictable(page);
+       if (page_evictable(page, NULL)) {
+               enum lru_list l = LRU_INACTIVE_ANON + page_is_file_cache(page);
+               __dec_zone_state(zone, NR_UNEVICTABLE);
+               list_move(&page->lru, &zone->lru[l].list);
+               __inc_zone_state(zone, NR_INACTIVE_ANON + l);
+               __count_vm_event(UNEVICTABLE_PGRESCUED);
+       } else {
+               /*
+                * rotate unevictable list
+                */
+               SetPageUnevictable(page);
+               list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list);
+               if (page_evictable(page, NULL))
+                       goto retry;
+       }
+}
+
+/**
+ * scan_mapping_unevictable_pages - scan an address space for evictable pages
+ * @mapping: struct address_space to scan for evictable pages
+ *
+ * Scan all pages in mapping.  Check unevictable pages for
+ * evictability and move them to the appropriate zone lru list.
+ */
+void scan_mapping_unevictable_pages(struct address_space *mapping)
+{
+       pgoff_t next = 0;
+       pgoff_t end   = (i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1) >>
+                        PAGE_CACHE_SHIFT;
+       struct zone *zone;
+       struct pagevec pvec;
+
+       if (mapping->nrpages == 0)
+               return;
+
+       pagevec_init(&pvec, 0);
+       while (next < end &&
+               pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
+               int i;
+               int pg_scanned = 0;
+
+               zone = NULL;
+
+               for (i = 0; i < pagevec_count(&pvec); i++) {
+                       struct page *page = pvec.pages[i];
+                       pgoff_t page_index = page->index;
+                       struct zone *pagezone = page_zone(page);
+
+                       pg_scanned++;
+                       if (page_index > next)
+                               next = page_index;
+                       next++;
+
+                       if (pagezone != zone) {
+                               if (zone)
+                                       spin_unlock_irq(&zone->lru_lock);
+                               zone = pagezone;
+                               spin_lock_irq(&zone->lru_lock);
+                       }
+
+                       if (PageLRU(page) && PageUnevictable(page))
+                               check_move_unevictable_page(page, zone);
+               }
+               if (zone)
+                       spin_unlock_irq(&zone->lru_lock);
+               pagevec_release(&pvec);
+
+               count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned);
+       }
+
+}
+#endif