mm: memcontrol: switch to native NR_ANON_MAPPED counter
[platform/kernel/linux-starfive.git] / mm / rmap.c
index f79a206..150513d 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1114,6 +1114,11 @@ void do_page_add_anon_rmap(struct page *page,
        bool compound = flags & RMAP_COMPOUND;
        bool first;
 
+       if (unlikely(PageKsm(page)))
+               lock_page_memcg(page);
+       else
+               VM_BUG_ON_PAGE(!PageLocked(page), page);
+
        if (compound) {
                atomic_t *mapcount;
                VM_BUG_ON_PAGE(!PageLocked(page), page);
@@ -1134,12 +1139,13 @@ void do_page_add_anon_rmap(struct page *page,
                 */
                if (compound)
                        __inc_node_page_state(page, NR_ANON_THPS);
-               __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr);
+               __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr);
        }
-       if (unlikely(PageKsm(page)))
-               return;
 
-       VM_BUG_ON_PAGE(!PageLocked(page), page);
+       if (unlikely(PageKsm(page))) {
+               unlock_page_memcg(page);
+               return;
+       }
 
        /* address might be in next vma when migration races vma_adjust */
        if (first)
@@ -1181,7 +1187,7 @@ void page_add_new_anon_rmap(struct page *page,
                /* increment count (starts at -1) */
                atomic_set(&page->_mapcount, 0);
        }
-       __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr);
+       __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr);
        __page_set_anon_rmap(page, vma, address, 1);
 }
 
@@ -1230,13 +1236,12 @@ static void page_remove_file_rmap(struct page *page, bool compound)
        int i, nr = 1;
 
        VM_BUG_ON_PAGE(compound && !PageHead(page), page);
-       lock_page_memcg(page);
 
        /* Hugepages are not counted in NR_FILE_MAPPED for now. */
        if (unlikely(PageHuge(page))) {
                /* hugetlb pages are always mapped with pmds */
                atomic_dec(compound_mapcount_ptr(page));
-               goto out;
+               return;
        }
 
        /* page still mapped by someone else? */
@@ -1246,14 +1251,14 @@ static void page_remove_file_rmap(struct page *page, bool compound)
                                nr++;
                }
                if (!atomic_add_negative(-1, compound_mapcount_ptr(page)))
-                       goto out;
+                       return;
                if (PageSwapBacked(page))
                        __dec_node_page_state(page, NR_SHMEM_PMDMAPPED);
                else
                        __dec_node_page_state(page, NR_FILE_PMDMAPPED);
        } else {
                if (!atomic_add_negative(-1, &page->_mapcount))
-                       goto out;
+                       return;
        }
 
        /*
@@ -1265,8 +1270,6 @@ static void page_remove_file_rmap(struct page *page, bool compound)
 
        if (unlikely(PageMlocked(page)))
                clear_page_mlock(page);
-out:
-       unlock_page_memcg(page);
 }
 
 static void page_remove_anon_compound_rmap(struct page *page)
@@ -1310,7 +1313,7 @@ static void page_remove_anon_compound_rmap(struct page *page)
                clear_page_mlock(page);
 
        if (nr)
-               __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, -nr);
+               __mod_lruvec_page_state(page, NR_ANON_MAPPED, -nr);
 }
 
 /**
@@ -1322,22 +1325,28 @@ static void page_remove_anon_compound_rmap(struct page *page)
  */
 void page_remove_rmap(struct page *page, bool compound)
 {
-       if (!PageAnon(page))
-               return page_remove_file_rmap(page, compound);
+       lock_page_memcg(page);
 
-       if (compound)
-               return page_remove_anon_compound_rmap(page);
+       if (!PageAnon(page)) {
+               page_remove_file_rmap(page, compound);
+               goto out;
+       }
+
+       if (compound) {
+               page_remove_anon_compound_rmap(page);
+               goto out;
+       }
 
        /* page still mapped by someone else? */
        if (!atomic_add_negative(-1, &page->_mapcount))
-               return;
+               goto out;
 
        /*
         * We use the irq-unsafe __{inc|mod}_zone_page_stat because
         * these counters are not modified in interrupt context, and
         * pte lock(a spinlock) is held, which implies preemption disabled.
         */
-       __dec_node_page_state(page, NR_ANON_MAPPED);
+       __dec_lruvec_page_state(page, NR_ANON_MAPPED);
 
        if (unlikely(PageMlocked(page)))
                clear_page_mlock(page);
@@ -1354,6 +1363,8 @@ void page_remove_rmap(struct page *page, bool compound)
         * Leaving it set also helps swapoff to reinstate ptes
         * faster for those pages still in swapcache.
         */
+out:
+       unlock_page_memcg(page);
 }
 
 /*