mm,thp,rmap: clean up the end of __split_huge_pmd_locked()
authorHugh Dickins <hughd@google.com>
Tue, 22 Nov 2022 09:51:50 +0000 (01:51 -0800)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 30 Nov 2022 23:58:48 +0000 (15:58 -0800)
It's hard to add a page_add_anon_rmap() into __split_huge_pmd_locked()'s
HPAGE_PMD_NR set_pte_at() loop, without wincing at the "freeze" case's
HPAGE_PMD_NR page_remove_rmap() loop below it.

It's just a mistake to add rmaps in the "freeze" (insert migration entries
prior to splitting huge page) case: the pmd_migration case already avoids
doing that, so just follow its lead.  page_add_ref() versus put_page()
likewise.  But why is one more put_page() needed in the "freeze" case?
Because it's removing the pmd rmap, already removed when pmd_migration
(and freeze and pmd_migration are mutually exclusive cases).

Link: https://lkml.kernel.org/r/d43748aa-fece-e0b9-c4ab-f23c9ebc9011@google.com
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Dan Carpenter <error27@gmail.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: James Houghton <jthoughton@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Mina Almasry <almasrymina@google.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: Naoya Horiguchi <naoya.horiguchi@linux.dev>
Cc: Peter Xu <peterx@redhat.com>
Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Yang Shi <shy828301@gmail.com>
Cc: Yu Zhao <yuzhao@google.com>
Cc: Zach O'Keefe <zokeefe@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/huge_memory.c

index 681253a..aba3406 100644 (file)
@@ -2141,7 +2141,6 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
                uffd_wp = pmd_uffd_wp(old_pmd);
 
                VM_BUG_ON_PAGE(!page_count(page), page);
-               page_ref_add(page, HPAGE_PMD_NR - 1);
 
                /*
                 * Without "freeze", we'll simply split the PMD, propagating the
@@ -2161,6 +2160,8 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
                anon_exclusive = PageAnon(page) && PageAnonExclusive(page);
                if (freeze && anon_exclusive && page_try_share_anon_rmap(page))
                        freeze = false;
+               if (!freeze)
+                       page_ref_add(page, HPAGE_PMD_NR - 1);
        }
 
        /*
@@ -2216,27 +2217,21 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
                                entry = pte_mksoft_dirty(entry);
                        if (uffd_wp)
                                entry = pte_mkuffd_wp(entry);
+                       page_add_anon_rmap(page + i, vma, addr, false);
                }
                pte = pte_offset_map(&_pmd, addr);
                BUG_ON(!pte_none(*pte));
                set_pte_at(mm, addr, pte, entry);
-               if (!pmd_migration)
-                       page_add_anon_rmap(page + i, vma, addr, false);
                pte_unmap(pte);
        }
 
        if (!pmd_migration)
                page_remove_rmap(page, vma, true);
+       if (freeze)
+               put_page(page);
 
        smp_wmb(); /* make pte visible before pmd */
        pmd_populate(mm, pmd, pgtable);
-
-       if (freeze) {
-               for (i = 0; i < HPAGE_PMD_NR; i++) {
-                       page_remove_rmap(page + i, vma, false);
-                       put_page(page + i);
-               }
-       }
 }
 
 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,