mm: remove pages_to_free argument of move_active_pages_to_lru()
authorKirill Tkhai <ktkhai@virtuozzo.com>
Tue, 14 May 2019 00:16:57 +0000 (17:16 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 14 May 2019 16:47:45 +0000 (09:47 -0700)
We may use input argument list as output argument too.  This makes the
function more similar to putback_inactive_pages().

Link: http://lkml.kernel.org/r/155290129079.31489.16180612694090502942.stgit@localhost.localdomain
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com>
Cc: Michal Hocko <mhocko@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/vmscan.c

index 5002cc4..4c5f4b8 100644 (file)
@@ -2004,10 +2004,10 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
 
 static unsigned move_active_pages_to_lru(struct lruvec *lruvec,
                                     struct list_head *list,
-                                    struct list_head *pages_to_free,
                                     enum lru_list lru)
 {
        struct pglist_data *pgdat = lruvec_pgdat(lruvec);
+       LIST_HEAD(pages_to_free);
        struct page *page;
        int nr_pages;
        int nr_moved = 0;
@@ -2034,12 +2034,17 @@ static unsigned move_active_pages_to_lru(struct lruvec *lruvec,
                                (*get_compound_page_dtor(page))(page);
                                spin_lock_irq(&pgdat->lru_lock);
                        } else
-                               list_add(&page->lru, pages_to_free);
+                               list_add(&page->lru, &pages_to_free);
                } else {
                        nr_moved += nr_pages;
                }
        }
 
+       /*
+        * To save our caller's stack, now use input list for pages to free.
+        */
+       list_splice(&pages_to_free, list);
+
        return nr_moved;
 }
 
@@ -2129,8 +2134,10 @@ static void shrink_active_list(unsigned long nr_to_scan,
         */
        reclaim_stat->recent_rotated[file] += nr_rotated;
 
-       nr_activate = move_active_pages_to_lru(lruvec, &l_active, &l_hold, lru);
-       nr_deactivate = move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE);
+       nr_activate = move_active_pages_to_lru(lruvec, &l_active, lru);
+       nr_deactivate = move_active_pages_to_lru(lruvec, &l_inactive, lru - LRU_ACTIVE);
+       /* Keep all free pages in l_active list */
+       list_splice(&l_inactive, &l_active);
 
        __count_vm_events(PGDEACTIVATE, nr_deactivate);
        __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate);
@@ -2138,8 +2145,8 @@ static void shrink_active_list(unsigned long nr_to_scan,
        __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
        spin_unlock_irq(&pgdat->lru_lock);
 
-       mem_cgroup_uncharge_list(&l_hold);
-       free_unref_page_list(&l_hold);
+       mem_cgroup_uncharge_list(&l_active);
+       free_unref_page_list(&l_active);
        trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate,
                        nr_deactivate, nr_rotated, sc->priority, file);
 }