mm/pagewalk: add walk_page_range_vma()
authorDavid Hildenbrand <david@redhat.com>
Fri, 21 Oct 2022 10:11:39 +0000 (12:11 +0200)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 12 Dec 2022 02:12:08 +0000 (18:12 -0800)
Let's add walk_page_range_vma(), which is similar to walk_page_vma(),
however, is only interested in a subset of the VMA range.

To be used in KSM code to stop using follow_page() next.

Link: https://lkml.kernel.org/r/20221021101141.84170-8-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Peter Xu <peterx@redhat.com>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/pagewalk.h
mm/pagewalk.c

index 37dc020..959f52e 100644 (file)
@@ -101,6 +101,9 @@ int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
                          unsigned long end, const struct mm_walk_ops *ops,
                          pgd_t *pgd,
                          void *private);
+int walk_page_range_vma(struct vm_area_struct *vma, unsigned long start,
+                       unsigned long end, const struct mm_walk_ops *ops,
+                       void *private);
 int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
                void *private);
 int walk_page_mapping(struct address_space *mapping, pgoff_t first_index,
index 0a5d71a..7f1c9b2 100644 (file)
@@ -517,6 +517,26 @@ int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
        return walk_pgd_range(start, end, &walk);
 }
 
+int walk_page_range_vma(struct vm_area_struct *vma, unsigned long start,
+                       unsigned long end, const struct mm_walk_ops *ops,
+                       void *private)
+{
+       struct mm_walk walk = {
+               .ops            = ops,
+               .mm             = vma->vm_mm,
+               .vma            = vma,
+               .private        = private,
+       };
+
+       if (start >= end || !walk.mm)
+               return -EINVAL;
+       if (start < vma->vm_start || end > vma->vm_end)
+               return -EINVAL;
+
+       mmap_assert_locked(walk.mm);
+       return __walk_page_range(start, end, &walk);
+}
+
 int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
                void *private)
 {