mm: Add DEFINE_PAGE_VMA_WALK and DEFINE_FOLIO_VMA_WALK
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Thu, 3 Feb 2022 14:06:08 +0000 (09:06 -0500)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 21 Mar 2022 16:59:02 +0000 (12:59 -0400)
Instead of declaring a struct page_vma_mapped_walk directly,
use these helpers to allow us to transition to a PFN approach in the
following patches.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
include/linux/rmap.h
kernel/events/uprobes.c
mm/damon/paddr.c
mm/ksm.c
mm/migrate.c
mm/page_idle.c
mm/rmap.c

index ac29b07..0d894a2 100644 (file)
@@ -214,6 +214,22 @@ struct page_vma_mapped_walk {
        unsigned int flags;
 };
 
+#define DEFINE_PAGE_VMA_WALK(name, _page, _vma, _address, _flags)      \
+       struct page_vma_mapped_walk name = {                            \
+               .page = _page,                                          \
+               .vma = _vma,                                            \
+               .address = _address,                                    \
+               .flags = _flags,                                        \
+       }
+
+#define DEFINE_FOLIO_VMA_WALK(name, _folio, _vma, _address, _flags)    \
+       struct page_vma_mapped_walk name = {                            \
+               .page = &_folio->page,                                  \
+               .vma = _vma,                                            \
+               .address = _address,                                    \
+               .flags = _flags,                                        \
+       }
+
 static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw)
 {
        /* HugeTLB pte is set to the relevant page table entry without pte_mapped. */
index eed2f74..6418083 100644 (file)
@@ -155,11 +155,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
                                struct page *old_page, struct page *new_page)
 {
        struct mm_struct *mm = vma->vm_mm;
-       struct page_vma_mapped_walk pvmw = {
-               .page = compound_head(old_page),
-               .vma = vma,
-               .address = addr,
-       };
+       DEFINE_FOLIO_VMA_WALK(pvmw, page_folio(old_page), vma, addr, 0);
        int err;
        struct mmu_notifier_range range;
 
index 5e8244f..cb45d49 100644 (file)
 static bool __damon_pa_mkold(struct page *page, struct vm_area_struct *vma,
                unsigned long addr, void *arg)
 {
-       struct page_vma_mapped_walk pvmw = {
-               .page = page,
-               .vma = vma,
-               .address = addr,
-       };
+       DEFINE_PAGE_VMA_WALK(pvmw, page, vma, addr, 0);
 
        while (page_vma_mapped_walk(&pvmw)) {
                addr = pvmw.address;
@@ -93,11 +89,7 @@ static bool __damon_pa_young(struct page *page, struct vm_area_struct *vma,
                unsigned long addr, void *arg)
 {
        struct damon_pa_access_chk_result *result = arg;
-       struct page_vma_mapped_walk pvmw = {
-               .page = page,
-               .vma = vma,
-               .address = addr,
-       };
+       DEFINE_PAGE_VMA_WALK(pvmw, page, vma, addr, 0);
 
        result->accessed = false;
        result->page_sz = PAGE_SIZE;
index c5a4403..ea82fef 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1034,10 +1034,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
                              pte_t *orig_pte)
 {
        struct mm_struct *mm = vma->vm_mm;
-       struct page_vma_mapped_walk pvmw = {
-               .page = page,
-               .vma = vma,
-       };
+       DEFINE_PAGE_VMA_WALK(pvmw, page, vma, 0, 0);
        int swapped;
        int err = -EFAULT;
        struct mmu_notifier_range range;
index f407609..71f92e8 100644 (file)
@@ -174,12 +174,7 @@ void putback_movable_pages(struct list_head *l)
 static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
                                 unsigned long addr, void *old)
 {
-       struct page_vma_mapped_walk pvmw = {
-               .page = old,
-               .vma = vma,
-               .address = addr,
-               .flags = PVMW_SYNC | PVMW_MIGRATION,
-       };
+       DEFINE_PAGE_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
        struct page *new;
        pte_t pte;
        swp_entry_t entry;
index edead6a..3e05bf1 100644 (file)
@@ -48,11 +48,7 @@ static bool page_idle_clear_pte_refs_one(struct page *page,
                                        struct vm_area_struct *vma,
                                        unsigned long addr, void *arg)
 {
-       struct page_vma_mapped_walk pvmw = {
-               .page = page,
-               .vma = vma,
-               .address = addr,
-       };
+       DEFINE_PAGE_VMA_WALK(pvmw, page, vma, addr, 0);
        bool referenced = false;
 
        while (page_vma_mapped_walk(&pvmw)) {
index 1a13d5d..a7f06b7 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -802,11 +802,7 @@ static bool page_referenced_one(struct page *page, struct vm_area_struct *vma,
                        unsigned long address, void *arg)
 {
        struct page_referenced_arg *pra = arg;
-       struct page_vma_mapped_walk pvmw = {
-               .page = page,
-               .vma = vma,
-               .address = address,
-       };
+       DEFINE_PAGE_VMA_WALK(pvmw, page, vma, address, 0);
        int referenced = 0;
 
        while (page_vma_mapped_walk(&pvmw)) {
@@ -934,12 +930,7 @@ int page_referenced(struct page *page,
 static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
                            unsigned long address, void *arg)
 {
-       struct page_vma_mapped_walk pvmw = {
-               .page = page,
-               .vma = vma,
-               .address = address,
-               .flags = PVMW_SYNC,
-       };
+       DEFINE_PAGE_VMA_WALK(pvmw, page, vma, address, PVMW_SYNC);
        struct mmu_notifier_range range;
        int *cleaned = arg;
 
@@ -1419,11 +1410,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                     unsigned long address, void *arg)
 {
        struct mm_struct *mm = vma->vm_mm;
-       struct page_vma_mapped_walk pvmw = {
-               .page = page,
-               .vma = vma,
-               .address = address,
-       };
+       DEFINE_PAGE_VMA_WALK(pvmw, page, vma, address, 0);
        pte_t pteval;
        struct page *subpage;
        bool ret = true;
@@ -1714,11 +1701,7 @@ static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma,
                     unsigned long address, void *arg)
 {
        struct mm_struct *mm = vma->vm_mm;
-       struct page_vma_mapped_walk pvmw = {
-               .page = page,
-               .vma = vma,
-               .address = address,
-       };
+       DEFINE_PAGE_VMA_WALK(pvmw, page, vma, address, 0);
        pte_t pteval;
        struct page *subpage;
        bool ret = true;
@@ -2001,11 +1984,7 @@ static bool page_make_device_exclusive_one(struct page *page,
                struct vm_area_struct *vma, unsigned long address, void *priv)
 {
        struct mm_struct *mm = vma->vm_mm;
-       struct page_vma_mapped_walk pvmw = {
-               .page = page,
-               .vma = vma,
-               .address = address,
-       };
+       DEFINE_PAGE_VMA_WALK(pvmw, page, vma, address, 0);
        struct make_exclusive_args *args = priv;
        pte_t pteval;
        struct page *subpage;