mm: khugepaged: introduce khugepaged_enter_vma() helper
authorYang Shi <shy828301@gmail.com>
Thu, 19 May 2022 21:08:50 +0000 (14:08 -0700)
committerakpm <akpm@linux-foundation.org>
Thu, 19 May 2022 21:08:50 +0000 (14:08 -0700)
The khugepaged_enter_vma_merge() actually does as the same thing as the
khugepaged_enter() section called by shmem_mmap(), so consolidate them
into one helper and rename it to khugepaged_enter_vma().

Link: https://lkml.kernel.org/r/20220510203222.24246-8-shy828301@gmail.com
Signed-off-by: Yang Shi <shy828301@gmail.com>
Acked-by: Vlastmil Babka <vbabka@suse.cz>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Rik van Riel <riel@surriel.com>
Cc: Song Liu <song@kernel.org>
Cc: Song Liu <songliubraving@fb.com>
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/khugepaged.h
mm/khugepaged.c
mm/mmap.c
mm/shmem.c

index c340f6b..392d34c 100644 (file)
@@ -14,8 +14,8 @@ extern bool hugepage_vma_check(struct vm_area_struct *vma,
                               unsigned long vm_flags);
 extern void __khugepaged_enter(struct mm_struct *mm);
 extern void __khugepaged_exit(struct mm_struct *mm);
-extern void khugepaged_enter_vma_merge(struct vm_area_struct *vma,
-                                      unsigned long vm_flags);
+extern void khugepaged_enter_vma(struct vm_area_struct *vma,
+                                unsigned long vm_flags);
 extern void khugepaged_min_free_kbytes_update(void);
 #ifdef CONFIG_SHMEM
 extern void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr);
@@ -72,8 +72,8 @@ static inline void khugepaged_enter(struct vm_area_struct *vma,
                                    unsigned long vm_flags)
 {
 }
-static inline void khugepaged_enter_vma_merge(struct vm_area_struct *vma,
-                                             unsigned long vm_flags)
+static inline void khugepaged_enter_vma(struct vm_area_struct *vma,
+                                       unsigned long vm_flags)
 {
 }
 static inline void collapse_pte_mapped_thp(struct mm_struct *mm,
index 9ef626e..16be62d 100644 (file)
@@ -365,7 +365,7 @@ int hugepage_madvise(struct vm_area_struct *vma,
                 * register it here without waiting a page fault that
                 * may not happen any time soon.
                 */
-               khugepaged_enter_vma_merge(vma, *vm_flags);
+               khugepaged_enter_vma(vma, *vm_flags);
                break;
        case MADV_NOHUGEPAGE:
                *vm_flags &= ~VM_HUGEPAGE;
@@ -505,8 +505,8 @@ void __khugepaged_enter(struct mm_struct *mm)
                wake_up_interruptible(&khugepaged_wait);
 }
 
-void khugepaged_enter_vma_merge(struct vm_area_struct *vma,
-                              unsigned long vm_flags)
+void khugepaged_enter_vma(struct vm_area_struct *vma,
+                         unsigned long vm_flags)
 {
        if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
            khugepaged_enabled() &&
index 7f7d982..4456bf8 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1223,7 +1223,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
                                         end, prev->vm_pgoff, NULL, prev);
                if (err)
                        return NULL;
-               khugepaged_enter_vma_merge(prev, vm_flags);
+               khugepaged_enter_vma(prev, vm_flags);
                return prev;
        }
 
@@ -1250,7 +1250,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
                }
                if (err)
                        return NULL;
-               khugepaged_enter_vma_merge(area, vm_flags);
+               khugepaged_enter_vma(area, vm_flags);
                return area;
        }
 
@@ -2450,7 +2450,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
                }
        }
        anon_vma_unlock_write(vma->anon_vma);
-       khugepaged_enter_vma_merge(vma, vma->vm_flags);
+       khugepaged_enter_vma(vma, vma->vm_flags);
        validate_mm(mm);
        return error;
 }
@@ -2528,7 +2528,7 @@ int expand_downwards(struct vm_area_struct *vma,
                }
        }
        anon_vma_unlock_write(vma->anon_vma);
-       khugepaged_enter_vma_merge(vma, vma->vm_flags);
+       khugepaged_enter_vma(vma, vma->vm_flags);
        validate_mm(mm);
        return error;
 }
index 29701be..89f6f4f 100644 (file)
@@ -2232,11 +2232,7 @@ static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
 
        file_accessed(file);
        vma->vm_ops = &shmem_vm_ops;
-       if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
-                       ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
-                       (vma->vm_end & HPAGE_PMD_MASK)) {
-               khugepaged_enter(vma, vma->vm_flags);
-       }
+       khugepaged_enter_vma(vma, vma->vm_flags);
        return 0;
 }
 
@@ -4137,11 +4133,7 @@ int shmem_zero_setup(struct vm_area_struct *vma)
        vma->vm_file = file;
        vma->vm_ops = &shmem_vm_ops;
 
-       if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
-                       ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
-                       (vma->vm_end & HPAGE_PMD_MASK)) {
-               khugepaged_enter(vma, vma->vm_flags);
-       }
+       khugepaged_enter_vma(vma, vma->vm_flags);
 
        return 0;
 }