mm: add a call to flush_cache_vmap() in vmap_pfn()
[platform/kernel/linux-starfive.git] / mm / huge_memory.c
index 811d19b..b20fef2 100644 (file)
@@ -1805,10 +1805,10 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
        if (is_swap_pmd(*pmd)) {
                swp_entry_t entry = pmd_to_swp_entry(*pmd);
                struct page *page = pfn_swap_entry_to_page(entry);
+               pmd_t newpmd;
 
                VM_BUG_ON(!is_pmd_migration_entry(*pmd));
                if (is_writable_migration_entry(entry)) {
-                       pmd_t newpmd;
                        /*
                         * A protection check is difficult so
                         * just be safe and disable write
@@ -1822,8 +1822,16 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                                newpmd = pmd_swp_mksoft_dirty(newpmd);
                        if (pmd_swp_uffd_wp(*pmd))
                                newpmd = pmd_swp_mkuffd_wp(newpmd);
-                       set_pmd_at(mm, addr, pmd, newpmd);
+               } else {
+                       newpmd = *pmd;
                }
+
+               if (uffd_wp)
+                       newpmd = pmd_swp_mkuffd_wp(newpmd);
+               else if (uffd_wp_resolve)
+                       newpmd = pmd_swp_clear_uffd_wp(newpmd);
+               if (!pmd_same(*pmd, newpmd))
+                       set_pmd_at(mm, addr, pmd, newpmd);
                goto unlock;
        }
 #endif
@@ -2004,7 +2012,7 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
 {
        struct mm_struct *mm = vma->vm_mm;
        pgtable_t pgtable;
-       pmd_t _pmd;
+       pmd_t _pmd, old_pmd;
        int i;
 
        /*
@@ -2015,7 +2023,7 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
         *
         * See Documentation/mm/mmu_notifier.rst
         */
-       pmdp_huge_clear_flush(vma, haddr, pmd);
+       old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
 
        pgtable = pgtable_trans_huge_withdraw(mm, pmd);
        pmd_populate(mm, &_pmd, pgtable);
@@ -2024,6 +2032,8 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
                pte_t *pte, entry;
                entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
                entry = pte_mkspecial(entry);
+               if (pmd_uffd_wp(old_pmd))
+                       entry = pte_mkuffd_wp(entry);
                pte = pte_offset_map(&_pmd, haddr);
                VM_BUG_ON(!pte_none(*pte));
                set_pte_at(mm, haddr, pte, entry);
@@ -2645,9 +2655,10 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
        VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
 
        is_hzp = is_huge_zero_page(&folio->page);
-       VM_WARN_ON_ONCE_FOLIO(is_hzp, folio);
-       if (is_hzp)
+       if (is_hzp) {
+               pr_warn_ratelimited("Called split_huge_page for huge zero page\n");
                return -EBUSY;
+       }
 
        if (folio_test_writeback(folio))
                return -EBUSY;
@@ -2818,6 +2829,9 @@ void deferred_split_huge_page(struct page *page)
        if (PageSwapCache(page))
                return;
 
+       if (!list_empty(page_deferred_list(page)))
+               return;
+
        spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
        if (list_empty(page_deferred_list(page))) {
                count_vm_event(THP_DEFERRED_SPLIT_PAGE);
@@ -3228,6 +3242,8 @@ int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
        pmdswp = swp_entry_to_pmd(entry);
        if (pmd_soft_dirty(pmdval))
                pmdswp = pmd_swp_mksoft_dirty(pmdswp);
+       if (pmd_uffd_wp(pmdval))
+               pmdswp = pmd_swp_mkuffd_wp(pmdswp);
        set_pmd_at(mm, address, pvmw->pmd, pmdswp);
        page_remove_rmap(page, vma, true);
        put_page(page);
@@ -3253,8 +3269,6 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
        pmde = mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot));
        if (pmd_swp_soft_dirty(*pvmw->pmd))
                pmde = pmd_mksoft_dirty(pmde);
-       if (is_writable_migration_entry(entry))
-               pmde = maybe_pmd_mkwrite(pmde, vma);
        if (pmd_swp_uffd_wp(*pvmw->pmd))
                pmde = pmd_wrprotect(pmd_mkuffd_wp(pmde));
        if (!is_migration_entry_young(entry))
@@ -3262,6 +3276,10 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
        /* NOTE: this may contain setting soft-dirty on some archs */
        if (PageDirty(new) && is_migration_entry_dirty(entry))
                pmde = pmd_mkdirty(pmde);
+       if (is_writable_migration_entry(entry))
+               pmde = maybe_pmd_mkwrite(pmde, vma);
+       else
+               pmde = pmd_wrprotect(pmde);
 
        if (PageAnon(new)) {
                rmap_t rmap_flags = RMAP_COMPOUND;