khugepaged: do not stop collapse if less than half PTEs are referenced
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Wed, 3 Jun 2020 23:00:09 +0000 (16:00 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 4 Jun 2020 03:09:46 +0000 (20:09 -0700)
__collapse_huge_page_swapin() checks the number of referenced PTE to
decide if the memory range is hot enough to justify swapin.

We have few problems with the approach:

 - It is way too late: we can do the check much earlier and safe time.
   khugepaged_scan_pmd() already knows if we have any pages to swap in
   and number of referenced page.

 - It stops collapse altogether if there's not enough referenced pages,
   not only swappingin.

Fix it by making the right check early. We also can avoid additional
page table scanning if khugepaged_scan_pmd() haven't found any swap
entries.

Fixes: 0db501f7a34c ("mm, thp: convert from optimistic swapin collapsing to conservative")
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Tested-by: Zi Yan <ziy@nvidia.com>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Acked-by: Yang Shi <yang.shi@linux.alibaba.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Link: http://lkml.kernel.org/r/20200416160026.16538-3-kirill.shutemov@linux.intel.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/khugepaged.c

index cd280af..c436fd3 100644 (file)
@@ -899,11 +899,6 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
                .pgoff = linear_page_index(vma, address),
        };
 
-       /* we only decide to swapin, if there is enough young ptes */
-       if (referenced < HPAGE_PMD_NR/2) {
-               trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
-               return false;
-       }
        vmf.pte = pte_offset_map(pmd, address);
        for (; vmf.address < address + HPAGE_PMD_NR*PAGE_SIZE;
                        vmf.pte++, vmf.address += PAGE_SIZE) {
@@ -943,7 +938,7 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
 static void collapse_huge_page(struct mm_struct *mm,
                                   unsigned long address,
                                   struct page **hpage,
-                                  int node, int referenced)
+                                  int node, int referenced, int unmapped)
 {
        pmd_t *pmd, _pmd;
        pte_t *pte;
@@ -1000,7 +995,8 @@ static void collapse_huge_page(struct mm_struct *mm,
         * If it fails, we release mmap_sem and jump out_nolock.
         * Continuing to collapse causes inconsistency.
         */
-       if (!__collapse_huge_page_swapin(mm, vma, address, pmd, referenced)) {
+       if (unmapped && !__collapse_huge_page_swapin(mm, vma, address,
+                                                    pmd, referenced)) {
                mem_cgroup_cancel_charge(new_page, memcg, true);
                up_read(&mm->mmap_sem);
                goto out_nolock;
@@ -1233,22 +1229,21 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
                    mmu_notifier_test_young(vma->vm_mm, address))
                        referenced++;
        }
-       if (writable) {
-               if (referenced) {
-                       result = SCAN_SUCCEED;
-                       ret = 1;
-               } else {
-                       result = SCAN_LACK_REFERENCED_PAGE;
-               }
-       } else {
+       if (!writable) {
                result = SCAN_PAGE_RO;
+       } else if (!referenced || (unmapped && referenced < HPAGE_PMD_NR/2)) {
+               result = SCAN_LACK_REFERENCED_PAGE;
+       } else {
+               result = SCAN_SUCCEED;
+               ret = 1;
        }
 out_unmap:
        pte_unmap_unlock(pte, ptl);
        if (ret) {
                node = khugepaged_find_target_node();
                /* collapse_huge_page will return with the mmap_sem released */
-               collapse_huge_page(mm, address, hpage, node, referenced);
+               collapse_huge_page(mm, address, hpage, node,
+                               referenced, unmapped);
        }
 out:
        trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,