mm: swap: fix vmstats for huge pages
authorShakeel Butt <shakeelb@google.com>
Wed, 3 Jun 2020 23:03:16 +0000 (16:03 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 4 Jun 2020 03:09:49 +0000 (20:09 -0700)
Many of the callbacks called by pagevec_lru_move_fn() does not correctly
update the vmstats for huge pages. Fix that. Also __pagevec_lru_add_fn()
use the irq-unsafe alternative to update the stat as the irqs are
already disabled.

Signed-off-by: Shakeel Butt <shakeelb@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Link: http://lkml.kernel.org/r/20200527182916.249910-1-shakeelb@google.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/swap.c

index 343675d..fa07d31 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -241,7 +241,7 @@ static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec,
                del_page_from_lru_list(page, lruvec, page_lru(page));
                ClearPageActive(page);
                add_page_to_lru_list_tail(page, lruvec, page_lru(page));
-               (*pgmoved)++;
+               (*pgmoved) += hpage_nr_pages(page);
        }
 }
 
@@ -327,7 +327,7 @@ static void __activate_page(struct page *page, struct lruvec *lruvec,
                add_page_to_lru_list(page, lruvec, lru);
                trace_mm_lru_activate(page);
 
-               __count_vm_event(PGACTIVATE);
+               __count_vm_events(PGACTIVATE, hpage_nr_pages(page));
        }
 }
 
@@ -529,6 +529,7 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
 {
        int lru;
        bool active;
+       int nr_pages = hpage_nr_pages(page);
 
        if (!PageLRU(page))
                return;
@@ -561,11 +562,11 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
                 * We moves tha page into tail of inactive.
                 */
                add_page_to_lru_list_tail(page, lruvec, lru);
-               __count_vm_event(PGROTATED);
+               __count_vm_events(PGROTATED, nr_pages);
        }
 
        if (active)
-               __count_vm_event(PGDEACTIVATE);
+               __count_vm_events(PGDEACTIVATE, nr_pages);
 }
 
 static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
@@ -960,6 +961,7 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
 {
        enum lru_list lru;
        int was_unevictable = TestClearPageUnevictable(page);
+       int nr_pages = hpage_nr_pages(page);
 
        VM_BUG_ON_PAGE(PageLRU(page), page);
 
@@ -995,13 +997,13 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
        if (page_evictable(page)) {
                lru = page_lru(page);
                if (was_unevictable)
-                       count_vm_event(UNEVICTABLE_PGRESCUED);
+                       __count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
        } else {
                lru = LRU_UNEVICTABLE;
                ClearPageActive(page);
                SetPageUnevictable(page);
                if (!was_unevictable)
-                       count_vm_event(UNEVICTABLE_PGCULLED);
+                       __count_vm_events(UNEVICTABLE_PGCULLED, nr_pages);
        }
 
        add_page_to_lru_list(page, lruvec, lru);