block: fix hctx checks for batch allocation
[platform/kernel/linux-starfive.git] / mm / khugepaged.c
index 4734315..c982b25 100644 (file)
@@ -97,8 +97,8 @@ struct collapse_control {
        /* Num pages scanned per node */
        u32 node_load[MAX_NUMNODES];
 
-       /* Last target selected in hpage_collapse_find_target_node() */
-       int last_target_node;
+       /* nodemask for allocation fallback */
+       nodemask_t alloc_nmask;
 };
 
 /**
@@ -734,7 +734,6 @@ static void khugepaged_alloc_sleep(void)
 
 struct collapse_control khugepaged_collapse_control = {
        .is_khugepaged = true,
-       .last_target_node = NUMA_NO_NODE,
 };
 
 static bool hpage_collapse_scan_abort(int nid, struct collapse_control *cc)
@@ -783,16 +782,11 @@ static int hpage_collapse_find_target_node(struct collapse_control *cc)
                        target_node = nid;
                }
 
-       /* do some balance if several nodes have the same hit record */
-       if (target_node <= cc->last_target_node)
-               for (nid = cc->last_target_node + 1; nid < MAX_NUMNODES;
-                    nid++)
-                       if (max_value == cc->node_load[nid]) {
-                               target_node = nid;
-                               break;
-                       }
+       for_each_online_node(nid) {
+               if (max_value == cc->node_load[nid])
+                       node_set(nid, cc->alloc_nmask);
+       }
 
-       cc->last_target_node = target_node;
        return target_node;
 }
 #else
@@ -802,9 +796,10 @@ static int hpage_collapse_find_target_node(struct collapse_control *cc)
 }
 #endif
 
-static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node)
+static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node,
+                                     nodemask_t *nmask)
 {
-       *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
+       *hpage = __alloc_pages(gfp, HPAGE_PMD_ORDER, node, nmask);
        if (unlikely(!*hpage)) {
                count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
                return false;
@@ -955,12 +950,11 @@ static int __collapse_huge_page_swapin(struct mm_struct *mm,
 static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
                              struct collapse_control *cc)
 {
-       /* Only allocate from the target node */
        gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() :
-                    GFP_TRANSHUGE) | __GFP_THISNODE;
+                    GFP_TRANSHUGE);
        int node = hpage_collapse_find_target_node(cc);
 
-       if (!hpage_collapse_alloc_page(hpage, gfp, node))
+       if (!hpage_collapse_alloc_page(hpage, gfp, node, &cc->alloc_nmask))
                return SCAN_ALLOC_HUGE_PAGE_FAIL;
        if (unlikely(mem_cgroup_charge(page_folio(*hpage), mm, gfp)))
                return SCAN_CGROUP_CHARGE_FAIL;
@@ -1057,6 +1051,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
        _pmd = pmdp_collapse_flush(vma, address, pmd);
        spin_unlock(pmd_ptl);
        mmu_notifier_invalidate_range_end(&range);
+       tlb_remove_table_sync_one();
 
        spin_lock(pte_ptl);
        result =  __collapse_huge_page_isolate(vma, address, pte, cc,
@@ -1144,6 +1139,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
                goto out;
 
        memset(cc->node_load, 0, sizeof(cc->node_load));
+       nodes_clear(cc->alloc_nmask);
        pte = pte_offset_map_lock(mm, pmd, address, &ptl);
        for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR;
             _pte++, _address += PAGE_SIZE) {
@@ -1384,16 +1380,43 @@ static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
        return SCAN_SUCCEED;
 }
 
+/*
+ * A note about locking:
+ * Trying to take the page table spinlocks would be useless here because those
+ * are only used to synchronize:
+ *
+ *  - modifying terminal entries (ones that point to a data page, not to another
+ *    page table)
+ *  - installing *new* non-terminal entries
+ *
+ * Instead, we need roughly the same kind of protection as free_pgtables() or
+ * mm_take_all_locks() (but only for a single VMA):
+ * The mmap lock together with this VMA's rmap locks covers all paths towards
+ * the page table entries we're messing with here, except for hardware page
+ * table walks and lockless_pages_from_mm().
+ */
 static void collapse_and_free_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
                                  unsigned long addr, pmd_t *pmdp)
 {
-       spinlock_t *ptl;
        pmd_t pmd;
+       struct mmu_notifier_range range;
 
        mmap_assert_write_locked(mm);
-       ptl = pmd_lock(vma->vm_mm, pmdp);
+       if (vma->vm_file)
+               lockdep_assert_held_write(&vma->vm_file->f_mapping->i_mmap_rwsem);
+       /*
+        * All anon_vmas attached to the VMA have the same root and are
+        * therefore locked by the same lock.
+        */
+       if (vma->anon_vma)
+               lockdep_assert_held_write(&vma->anon_vma->root->rwsem);
+
+       mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm, addr,
+                               addr + HPAGE_PMD_SIZE);
+       mmu_notifier_invalidate_range_start(&range);
        pmd = pmdp_collapse_flush(vma, addr, pmdp);
-       spin_unlock(ptl);
+       tlb_remove_table_sync_one();
+       mmu_notifier_invalidate_range_end(&range);
        mm_dec_nr_ptes(mm);
        page_table_check_pte_clear_range(mm, addr, pmd);
        pte_free(mm, pmd_pgtable(pmd));
@@ -1477,6 +1500,20 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
                goto drop_hpage;
        }
 
+       /*
+        * We need to lock the mapping so that from here on, only GUP-fast and
+        * hardware page walks can access the parts of the page tables that
+        * we're operating on.
+        * See collapse_and_free_pmd().
+        */
+       i_mmap_lock_write(vma->vm_file->f_mapping);
+
+       /*
+        * This spinlock should be unnecessary: Nobody else should be accessing
+        * the page tables under spinlock protection here, only
+        * lockless_pages_from_mm() and the hardware page walker can access page
+        * tables while all the high-level locks are held in write mode.
+        */
        start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
        result = SCAN_FAIL;
 
@@ -1529,8 +1566,16 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
        }
 
        /* step 4: remove pte entries */
+       /* we make no change to anon, but protect concurrent anon page lookup */
+       if (vma->anon_vma)
+               anon_vma_lock_write(vma->anon_vma);
+
        collapse_and_free_pmd(mm, vma, haddr, pmd);
 
+       if (vma->anon_vma)
+               anon_vma_unlock_write(vma->anon_vma);
+       i_mmap_unlock_write(vma->vm_file->f_mapping);
+
 maybe_install_pmd:
        /* step 5: install pmd entry */
        result = install_pmd
@@ -1544,6 +1589,7 @@ drop_hpage:
 
 abort:
        pte_unmap_unlock(start_pte, ptl);
+       i_mmap_unlock_write(vma->vm_file->f_mapping);
        goto drop_hpage;
 }
 
@@ -1600,7 +1646,8 @@ static int retract_page_tables(struct address_space *mapping, pgoff_t pgoff,
                 * An alternative would be drop the check, but check that page
                 * table is clear before calling pmdp_collapse_flush() under
                 * ptl. It has higher chance to recover THP for the VMA, but
-                * has higher cost too.
+                * has higher cost too. It would also probably require locking
+                * the anon_vma.
                 */
                if (vma->anon_vma) {
                        result = SCAN_PAGE_ANON;
@@ -2077,6 +2124,7 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
        present = 0;
        swap = 0;
        memset(cc->node_load, 0, sizeof(cc->node_load));
+       nodes_clear(cc->alloc_nmask);
        rcu_read_lock();
        xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
                if (xas_retry(&xas, page))
@@ -2157,8 +2205,7 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
                }
        }
 
-       trace_mm_khugepaged_scan_file(mm, page, file->f_path.dentry->d_iname,
-                                     present, swap, result);
+       trace_mm_khugepaged_scan_file(mm, page, file, present, swap, result);
        return result;
 }
 #else
@@ -2576,7 +2623,6 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
        if (!cc)
                return -ENOMEM;
        cc->is_khugepaged = false;
-       cc->last_target_node = NUMA_NO_NODE;
 
        mmgrab(mm);
        lru_add_drain_all();
@@ -2598,10 +2644,11 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
                                goto out_nolock;
                        }
 
-                       hend = vma->vm_end & HPAGE_PMD_MASK;
+                       hend = min(hend, vma->vm_end & HPAGE_PMD_MASK);
                }
                mmap_assert_locked(mm);
                memset(cc->node_load, 0, sizeof(cc->node_load));
+               nodes_clear(cc->alloc_nmask);
                if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
                        struct file *file = get_file(vma->vm_file);
                        pgoff_t pgoff = linear_page_index(vma, addr);