mm/page_alloc: fix potential deadlock on zonelist_update_seq seqlock
[platform/kernel/linux-starfive.git] / mm / huge_memory.c
index 032fb0e..3fae2d2 100644 (file)
@@ -1838,10 +1838,10 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
        if (is_swap_pmd(*pmd)) {
                swp_entry_t entry = pmd_to_swp_entry(*pmd);
                struct page *page = pfn_swap_entry_to_page(entry);
+               pmd_t newpmd;
 
                VM_BUG_ON(!is_pmd_migration_entry(*pmd));
                if (is_writable_migration_entry(entry)) {
-                       pmd_t newpmd;
                        /*
                         * A protection check is difficult so
                         * just be safe and disable write
@@ -1855,8 +1855,16 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                                newpmd = pmd_swp_mksoft_dirty(newpmd);
                        if (pmd_swp_uffd_wp(*pmd))
                                newpmd = pmd_swp_mkuffd_wp(newpmd);
-                       set_pmd_at(mm, addr, pmd, newpmd);
+               } else {
+                       newpmd = *pmd;
                }
+
+               if (uffd_wp)
+                       newpmd = pmd_swp_mkuffd_wp(newpmd);
+               else if (uffd_wp_resolve)
+                       newpmd = pmd_swp_clear_uffd_wp(newpmd);
+               if (!pmd_same(*pmd, newpmd))
+                       set_pmd_at(mm, addr, pmd, newpmd);
                goto unlock;
        }
 #endif
@@ -2657,9 +2665,10 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
        VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
 
        is_hzp = is_huge_zero_page(&folio->page);
-       VM_WARN_ON_ONCE_FOLIO(is_hzp, folio);
-       if (is_hzp)
+       if (is_hzp) {
+               pr_warn_ratelimited("Called split_huge_page for huge zero page\n");
                return -EBUSY;
+       }
 
        if (folio_test_writeback(folio))
                return -EBUSY;
@@ -3251,6 +3260,8 @@ int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
        pmdswp = swp_entry_to_pmd(entry);
        if (pmd_soft_dirty(pmdval))
                pmdswp = pmd_swp_mksoft_dirty(pmdswp);
+       if (pmd_uffd_wp(pmdval))
+               pmdswp = pmd_swp_mkuffd_wp(pmdswp);
        set_pmd_at(mm, address, pvmw->pmd, pmdswp);
        page_remove_rmap(page, vma, true);
        put_page(page);