mm/swap.c: don't pass "enum lru_list" to trace_mm_lru_insertion()
authorYu Zhao <yuzhao@google.com>
Wed, 24 Feb 2021 20:08:21 +0000 (12:08 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 24 Feb 2021 21:38:33 +0000 (13:38 -0800)
The parameter is redundant in the sense that it can be extracted
from the "struct page" parameter by page_lru() correctly.

Link: https://lore.kernel.org/linux-mm/20201207220949.830352-5-yuzhao@google.com/
Link: https://lkml.kernel.org/r/20210122220600.906146-5-yuzhao@google.com
Signed-off-by: Yu Zhao <yuzhao@google.com>
Reviewed-by: Alex Shi <alex.shi@linux.alibaba.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Roman Gushchin <guro@fb.com>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/trace/events/pagemap.h
mm/swap.c

index 8fd1babae761b901bc6627bb45e85f3631f4fa48..e1735fe7c76afd0900294a42c3f1e0e8b22998ce 100644 (file)
 
 TRACE_EVENT(mm_lru_insertion,
 
-       TP_PROTO(
-               struct page *page,
-               int lru
-       ),
+       TP_PROTO(struct page *page),
 
-       TP_ARGS(page, lru),
+       TP_ARGS(page),
 
        TP_STRUCT__entry(
                __field(struct page *,  page    )
                __field(unsigned long,  pfn     )
-               __field(int,            lru     )
+               __field(enum lru_list,  lru     )
                __field(unsigned long,  flags   )
        ),
 
        TP_fast_assign(
                __entry->page   = page;
                __entry->pfn    = page_to_pfn(page);
-               __entry->lru    = lru;
+               __entry->lru    = page_lru(page);
                __entry->flags  = trace_pagemap_flags(page);
        ),
 
index 22fad34b9c1882322dc9c1efedcc92b0edd1c5f0..a26e3a4fe17bb99cd7fd433f44e69d985c100d93 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -957,7 +957,6 @@ EXPORT_SYMBOL(__pagevec_release);
 
 static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec)
 {
-       enum lru_list lru;
        int was_unevictable = TestClearPageUnevictable(page);
        int nr_pages = thp_nr_pages(page);
 
@@ -993,11 +992,9 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec)
        smp_mb__after_atomic();
 
        if (page_evictable(page)) {
-               lru = page_lru(page);
                if (was_unevictable)
                        __count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
        } else {
-               lru = LRU_UNEVICTABLE;
                ClearPageActive(page);
                SetPageUnevictable(page);
                if (!was_unevictable)
@@ -1005,7 +1002,7 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec)
        }
 
        add_page_to_lru_list(page, lruvec);
-       trace_mm_lru_insertion(page, lru);
+       trace_mm_lru_insertion(page);
 }
 
 /*