WIP: update tizen_qemu_defconfig
[platform/kernel/linux-starfive.git] / mm / khugepaged.c
index a8d5ef2..ef72d3d 100644 (file)
@@ -561,6 +561,10 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
                        result = SCAN_PTE_NON_PRESENT;
                        goto out;
                }
+               if (pte_uffd_wp(pteval)) {
+                       result = SCAN_PTE_UFFD_WP;
+                       goto out;
+               }
                page = vm_normal_page(vma, address, pteval);
                if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
                        result = SCAN_PAGE_NULL;
@@ -847,6 +851,10 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
        return SCAN_SUCCEED;
 }
 
+/*
+ * See pmd_trans_unstable() for how the result may change out from
+ * underneath us, even if we hold mmap_lock in read.
+ */
 static int find_pmd_or_thp_or_none(struct mm_struct *mm,
                                   unsigned long address,
                                   pmd_t **pmd)
@@ -865,8 +873,12 @@ static int find_pmd_or_thp_or_none(struct mm_struct *mm,
 #endif
        if (pmd_none(pmde))
                return SCAN_PMD_NONE;
+       if (!pmd_present(pmde))
+               return SCAN_PMD_NULL;
        if (pmd_trans_huge(pmde))
                return SCAN_PMD_MAPPED;
+       if (pmd_devmap(pmde))
+               return SCAN_PMD_NULL;
        if (pmd_bad(pmde))
                return SCAN_PMD_NULL;
        return SCAN_SUCCEED;
@@ -1051,6 +1063,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
        _pmd = pmdp_collapse_flush(vma, address, pmd);
        spin_unlock(pmd_ptl);
        mmu_notifier_invalidate_range_end(&range);
+       tlb_remove_table_sync_one();
 
        spin_lock(pte_ptl);
        result =  __collapse_huge_page_isolate(vma, address, pte, cc,
@@ -1379,16 +1392,43 @@ static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
        return SCAN_SUCCEED;
 }
 
+/*
+ * A note about locking:
+ * Trying to take the page table spinlocks would be useless here because those
+ * are only used to synchronize:
+ *
+ *  - modifying terminal entries (ones that point to a data page, not to another
+ *    page table)
+ *  - installing *new* non-terminal entries
+ *
+ * Instead, we need roughly the same kind of protection as free_pgtables() or
+ * mm_take_all_locks() (but only for a single VMA):
+ * The mmap lock together with this VMA's rmap locks covers all paths towards
+ * the page table entries we're messing with here, except for hardware page
+ * table walks and lockless_pages_from_mm().
+ */
 static void collapse_and_free_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
                                  unsigned long addr, pmd_t *pmdp)
 {
-       spinlock_t *ptl;
        pmd_t pmd;
+       struct mmu_notifier_range range;
 
        mmap_assert_write_locked(mm);
-       ptl = pmd_lock(vma->vm_mm, pmdp);
+       if (vma->vm_file)
+               lockdep_assert_held_write(&vma->vm_file->f_mapping->i_mmap_rwsem);
+       /*
+        * All anon_vmas attached to the VMA have the same root and are
+        * therefore locked by the same lock.
+        */
+       if (vma->anon_vma)
+               lockdep_assert_held_write(&vma->anon_vma->root->rwsem);
+
+       mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm, addr,
+                               addr + HPAGE_PMD_SIZE);
+       mmu_notifier_invalidate_range_start(&range);
        pmd = pmdp_collapse_flush(vma, addr, pmdp);
-       spin_unlock(ptl);
+       tlb_remove_table_sync_one();
+       mmu_notifier_invalidate_range_end(&range);
        mm_dec_nr_ptes(mm);
        page_table_check_pte_clear_range(mm, addr, pmd);
        pte_free(mm, pmd_pgtable(pmd));
@@ -1472,6 +1512,20 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
                goto drop_hpage;
        }
 
+       /*
+        * We need to lock the mapping so that from here on, only GUP-fast and
+        * hardware page walks can access the parts of the page tables that
+        * we're operating on.
+        * See collapse_and_free_pmd().
+        */
+       i_mmap_lock_write(vma->vm_file->f_mapping);
+
+       /*
+        * This spinlock should be unnecessary: Nobody else should be accessing
+        * the page tables under spinlock protection here, only
+        * lockless_pages_from_mm() and the hardware page walker can access page
+        * tables while all the high-level locks are held in write mode.
+        */
        start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
        result = SCAN_FAIL;
 
@@ -1524,8 +1578,16 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
        }
 
        /* step 4: remove pte entries */
+       /* we make no change to anon, but protect concurrent anon page lookup */
+       if (vma->anon_vma)
+               anon_vma_lock_write(vma->anon_vma);
+
        collapse_and_free_pmd(mm, vma, haddr, pmd);
 
+       if (vma->anon_vma)
+               anon_vma_unlock_write(vma->anon_vma);
+       i_mmap_unlock_write(vma->vm_file->f_mapping);
+
 maybe_install_pmd:
        /* step 5: install pmd entry */
        result = install_pmd
@@ -1539,6 +1601,7 @@ drop_hpage:
 
 abort:
        pte_unmap_unlock(start_pte, ptl);
+       i_mmap_unlock_write(vma->vm_file->f_mapping);
        goto drop_hpage;
 }
 
@@ -1595,9 +1658,10 @@ static int retract_page_tables(struct address_space *mapping, pgoff_t pgoff,
                 * An alternative would be drop the check, but check that page
                 * table is clear before calling pmdp_collapse_flush() under
                 * ptl. It has higher chance to recover THP for the VMA, but
-                * has higher cost too.
+                * has higher cost too. It would also probably require locking
+                * the anon_vma.
                 */
-               if (vma->anon_vma) {
+               if (READ_ONCE(vma->anon_vma)) {
                        result = SCAN_PAGE_ANON;
                        goto next;
                }
@@ -1626,6 +1690,18 @@ static int retract_page_tables(struct address_space *mapping, pgoff_t pgoff,
                if ((cc->is_khugepaged || is_target) &&
                    mmap_write_trylock(mm)) {
                        /*
+                        * Re-check whether we have an ->anon_vma, because
+                        * collapse_and_free_pmd() requires that either no
+                        * ->anon_vma exists or the anon_vma is locked.
+                        * We already checked ->anon_vma above, but that check
+                        * is racy because ->anon_vma can be populated under the
+                        * mmap lock in read mode.
+                        */
+                       if (vma->anon_vma) {
+                               result = SCAN_PAGE_ANON;
+                               goto unlock_next;
+                       }
+                       /*
                         * When a vma is registered with uffd-wp, we can't
                         * recycle the pmd pgtable because there can be pte
                         * markers installed.  Skip it only, so the rest mm/vma
@@ -2536,6 +2612,7 @@ static int madvise_collapse_errno(enum scan_result r)
        case SCAN_CGROUP_CHARGE_FAIL:
                return -EBUSY;
        /* Resource temporary unavailable - trying again might succeed */
+       case SCAN_PAGE_COUNT:
        case SCAN_PAGE_LOCK:
        case SCAN_PAGE_LRU:
        case SCAN_DEL_PAGE_LRU:
@@ -2592,7 +2669,7 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
                                goto out_nolock;
                        }
 
-                       hend = vma->vm_end & HPAGE_PMD_MASK;
+                       hend = min(hend, vma->vm_end & HPAGE_PMD_MASK);
                }
                mmap_assert_locked(mm);
                memset(cc->node_load, 0, sizeof(cc->node_load));