mm: remove vma arg from page_evictable
[platform/adaptation/renesas_rcar/renesas_kernel.git] / mm / vmscan.c
index b010efc..8b62730 100644 (file)
@@ -553,7 +553,7 @@ void putback_lru_page(struct page *page)
 redo:
        ClearPageUnevictable(page);
 
-       if (page_evictable(page, NULL)) {
+       if (page_evictable(page)) {
                /*
                 * For evictable pages, we can use the cache.
                 * In event of a race, worst case is we end up with an
@@ -587,7 +587,7 @@ redo:
         * page is on unevictable list, it never be freed. To avoid that,
         * check after we added it to the list, again.
         */
-       if (lru == LRU_UNEVICTABLE && page_evictable(page, NULL)) {
+       if (lru == LRU_UNEVICTABLE && page_evictable(page)) {
                if (!isolate_lru_page(page)) {
                        put_page(page);
                        goto redo;
@@ -709,7 +709,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 
                sc->nr_scanned++;
 
-               if (unlikely(!page_evictable(page, NULL)))
+               if (unlikely(!page_evictable(page)))
                        goto cull_mlocked;
 
                if (!sc->may_unmap && page_mapped(page))
@@ -1217,7 +1217,7 @@ putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
 
                VM_BUG_ON(PageLRU(page));
                list_del(&page->lru);
-               if (unlikely(!page_evictable(page, NULL))) {
+               if (unlikely(!page_evictable(page))) {
                        spin_unlock_irq(&zone->lru_lock);
                        putback_lru_page(page);
                        spin_lock_irq(&zone->lru_lock);
@@ -1470,7 +1470,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
                page = lru_to_page(&l_hold);
                list_del(&page->lru);
 
-               if (unlikely(!page_evictable(page, NULL))) {
+               if (unlikely(!page_evictable(page))) {
                        putback_lru_page(page);
                        continue;
                }
@@ -3414,27 +3414,18 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
 /*
  * page_evictable - test whether a page is evictable
  * @page: the page to test
- * @vma: the VMA in which the page is or will be mapped, may be NULL
  *
  * Test whether page is evictable--i.e., should be placed on active/inactive
- * lists vs unevictable list.  The vma argument is !NULL when called from the
- * fault path to determine how to instantate a new page.
+ * lists vs unevictable list.
  *
  * Reasons page might not be evictable:
  * (1) page's mapping marked unevictable
  * (2) page is part of an mlocked VMA
  *
  */
-int page_evictable(struct page *page, struct vm_area_struct *vma)
+int page_evictable(struct page *page)
 {
-
-       if (mapping_unevictable(page_mapping(page)))
-               return 0;
-
-       if (PageMlocked(page) || (vma && mlocked_vma_newpage(vma, page)))
-               return 0;
-
-       return 1;
+       return !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
 }
 
 #ifdef CONFIG_SHMEM
@@ -3472,7 +3463,7 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages)
                if (!PageLRU(page) || !PageUnevictable(page))
                        continue;
 
-               if (page_evictable(page, NULL)) {
+               if (page_evictable(page)) {
                        enum lru_list lru = page_lru_base_type(page);
 
                        VM_BUG_ON(PageActive(page));