mm: thp: kill transparent_hugepage_active()
authorYang Shi <shy828301@gmail.com>
Thu, 16 Jun 2022 17:48:37 +0000 (10:48 -0700)
committerakpm <akpm@linux-foundation.org>
Mon, 18 Jul 2022 00:14:33 +0000 (17:14 -0700)
The transparent_hugepage_active() was introduced to show THP eligibility
bit in smaps in proc, smaps is the only user.  But it actually does the
similar check as hugepage_vma_check() which is used by khugepaged.  We
definitely don't have to maintain two similar checks, so kill
transparent_hugepage_active().

This patch also fixed the wrong behavior for VM_NO_KHUGEPAGED vmas.

Also move hugepage_vma_check() to huge_memory.c and huge_mm.h since it
is not only for khugepaged anymore.

[akpm@linux-foundation.org: check vma->vm_mm, per Zach]
[akpm@linux-foundation.org: add comment to vdso check]
Link: https://lkml.kernel.org/r/20220616174840.1202070-5-shy828301@gmail.com
Signed-off-by: Yang Shi <shy828301@gmail.com>
Reviewed-by: Zach O'Keefe <zokeefe@google.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
fs/proc/task_mmu.c
include/linux/huge_mm.h
include/linux/khugepaged.h
mm/huge_memory.c
mm/khugepaged.c

index 1d7fd83..072cf77 100644 (file)
@@ -863,7 +863,7 @@ static int show_smap(struct seq_file *m, void *v)
        __show_smap(m, &mss, false);
 
        seq_printf(m, "THPeligible:    %d\n",
-                  transparent_hugepage_active(vma));
+                  hugepage_vma_check(vma, vma->vm_flags, true));
 
        if (arch_pkeys_enabled())
                seq_printf(m, "ProtectionKey:  %8u\n", vma_pkey(vma));
index 8a5a8bf..64487bc 100644 (file)
@@ -202,7 +202,9 @@ static inline bool file_thp_enabled(struct vm_area_struct *vma)
               !inode_is_open_for_write(inode) && S_ISREG(inode->i_mode);
 }
 
-bool transparent_hugepage_active(struct vm_area_struct *vma);
+bool hugepage_vma_check(struct vm_area_struct *vma,
+                       unsigned long vm_flags,
+                       bool smaps);
 
 #define transparent_hugepage_use_zero_page()                           \
        (transparent_hugepage_flags &                                   \
@@ -351,11 +353,6 @@ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
        return false;
 }
 
-static inline bool transparent_hugepage_active(struct vm_area_struct *vma)
-{
-       return false;
-}
-
 static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
                unsigned long addr)
 {
@@ -368,6 +365,13 @@ static inline bool transhuge_vma_enabled(struct vm_area_struct *vma,
        return false;
 }
 
+static inline bool hugepage_vma_check(struct vm_area_struct *vma,
+                                      unsigned long vm_flags,
+                                      bool smaps)
+{
+       return false;
+}
+
 static inline void prep_transhuge_page(struct page *page) {}
 
 #define transparent_hugepage_flags 0UL
index 31ca8a7..ea5fd4c 100644 (file)
@@ -10,8 +10,6 @@ extern struct attribute_group khugepaged_attr_group;
 extern int khugepaged_init(void);
 extern void khugepaged_destroy(void);
 extern int start_stop_khugepaged(void);
-extern bool hugepage_vma_check(struct vm_area_struct *vma,
-                              unsigned long vm_flags);
 extern void __khugepaged_enter(struct mm_struct *mm);
 extern void __khugepaged_exit(struct mm_struct *mm);
 extern void khugepaged_enter_vma(struct vm_area_struct *vma,
index 2751649..8cbd21a 100644 (file)
@@ -69,21 +69,56 @@ static atomic_t huge_zero_refcount;
 struct page *huge_zero_page __read_mostly;
 unsigned long huge_zero_pfn __read_mostly = ~0UL;
 
-bool transparent_hugepage_active(struct vm_area_struct *vma)
+bool hugepage_vma_check(struct vm_area_struct *vma,
+                       unsigned long vm_flags,
+                       bool smaps)
 {
-       /* The addr is used to check if the vma size fits */
-       unsigned long addr = (vma->vm_end & HPAGE_PMD_MASK) - HPAGE_PMD_SIZE;
+       if (!vma->vm_mm)                /* vdso */
+               return false;
+
+       if (!transhuge_vma_enabled(vma, vm_flags))
+               return false;
 
-       if (!transhuge_vma_suitable(vma, addr))
+       if (vm_flags & VM_NO_KHUGEPAGED)
                return false;
-       if (vma_is_anonymous(vma))
-               return __transparent_hugepage_enabled(vma);
-       if (vma_is_shmem(vma))
+
+       /* Don't run khugepaged against DAX vma */
+       if (vma_is_dax(vma))
+               return false;
+
+       /* Check alignment for file vma and size for both file and anon vma */
+       if (!transhuge_vma_suitable(vma, (vma->vm_end - HPAGE_PMD_SIZE)))
+               return false;
+
+       /* Enabled via shmem mount options or sysfs settings. */
+       if (shmem_file(vma->vm_file))
                return shmem_huge_enabled(vma);
-       if (transhuge_vma_enabled(vma, vma->vm_flags) && file_thp_enabled(vma))
+
+       if (!khugepaged_enabled())
+               return false;
+
+       /* THP settings require madvise. */
+       if (!(vm_flags & VM_HUGEPAGE) && !khugepaged_always())
+               return false;
+
+       /* Only regular file is valid */
+       if (file_thp_enabled(vma))
                return true;
 
-       return false;
+       if (!vma_is_anonymous(vma))
+               return false;
+
+       if (vma_is_temporary_stack(vma))
+               return false;
+
+       /*
+        * THPeligible bit of smaps should show 1 for proper VMAs even
+        * though anon_vma is not initialized yet.
+        */
+       if (!vma->anon_vma)
+               return smaps;
+
+       return true;
 }
 
 static bool get_huge_zero_page(void)
index 67e144e..6bbf3ad 100644 (file)
@@ -430,46 +430,6 @@ static inline int khugepaged_test_exit(struct mm_struct *mm)
        return atomic_read(&mm->mm_users) == 0;
 }
 
-bool hugepage_vma_check(struct vm_area_struct *vma,
-                       unsigned long vm_flags)
-{
-       if (!transhuge_vma_enabled(vma, vm_flags))
-               return false;
-
-       if (vm_flags & VM_NO_KHUGEPAGED)
-               return false;
-
-       /* Don't run khugepaged against DAX vma */
-       if (vma_is_dax(vma))
-               return false;
-
-       /* Check alignment for file vma and size for both file and anon vma */
-       if (!transhuge_vma_suitable(vma, (vma->vm_end - HPAGE_PMD_SIZE)))
-               return false;
-
-       /* Enabled via shmem mount options or sysfs settings. */
-       if (shmem_file(vma->vm_file))
-               return shmem_huge_enabled(vma);
-
-       if (!khugepaged_enabled())
-               return false;
-
-       /* THP settings require madvise. */
-       if (!(vm_flags & VM_HUGEPAGE) && !khugepaged_always())
-               return false;
-
-       /* Only regular file is valid */
-       if (file_thp_enabled(vma))
-               return true;
-
-       if (!vma->anon_vma || !vma_is_anonymous(vma))
-               return false;
-       if (vma_is_temporary_stack(vma))
-               return false;
-
-       return true;
-}
-
 void __khugepaged_enter(struct mm_struct *mm)
 {
        struct mm_slot *mm_slot;
@@ -506,7 +466,7 @@ void khugepaged_enter_vma(struct vm_area_struct *vma,
 {
        if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
            khugepaged_enabled()) {
-               if (hugepage_vma_check(vma, vm_flags))
+               if (hugepage_vma_check(vma, vm_flags, false))
                        __khugepaged_enter(vma->vm_mm);
        }
 }
@@ -956,7 +916,7 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
 
        if (!transhuge_vma_suitable(vma, address))
                return SCAN_ADDRESS_RANGE;
-       if (!hugepage_vma_check(vma, vma->vm_flags))
+       if (!hugepage_vma_check(vma, vma->vm_flags, false))
                return SCAN_VMA_CHECK;
        /*
         * Anon VMA expected, the address may be unmapped then
@@ -1441,7 +1401,7 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
         * the valid THP. Add extra VM_HUGEPAGE so hugepage_vma_check()
         * will not fail the vma for missing VM_HUGEPAGE
         */
-       if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE))
+       if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE, false))
                return;
 
        /* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
@@ -2131,7 +2091,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
                        progress++;
                        break;
                }
-               if (!hugepage_vma_check(vma, vma->vm_flags)) {
+               if (!hugepage_vma_check(vma, vma->vm_flags, false)) {
 skip:
                        progress++;
                        continue;