mm: add vma_alloc_zeroed_movable_folio()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 16 Jan 2023 19:18:09 +0000 (19:18 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 3 Feb 2023 06:33:18 +0000 (22:33 -0800)
Replace alloc_zeroed_user_highpage_movable().  The main difference is
returning a folio containing a single page instead of returning the page,
but take the opportunity to rename the function to match other allocation
functions a little better and rewrite the documentation to place more
emphasis on the zeroing rather than the highmem aspect.

Link: https://lkml.kernel.org/r/20230116191813.2145215-2-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
arch/alpha/include/asm/page.h
arch/arm64/include/asm/page.h
arch/arm64/mm/fault.c
arch/ia64/include/asm/page.h
arch/m68k/include/asm/page_no.h
arch/s390/include/asm/page.h
arch/x86/include/asm/page.h
include/linux/highmem.h
mm/memory.c

index 8f3f5ee..bc5256f 100644 (file)
@@ -17,9 +17,8 @@
 extern void clear_page(void *page);
 #define clear_user_page(page, vaddr, pg)       clear_page(page)
 
-#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
-       alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr)
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
+#define vma_alloc_zeroed_movable_folio(vma, vaddr) \
+       vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false)
 
 extern void copy_page(void * _to, void * _from);
 #define copy_user_page(to, from, vaddr, pg)    copy_page(to, from)
index 993a27e..2312e6e 100644 (file)
@@ -29,9 +29,9 @@ void copy_user_highpage(struct page *to, struct page *from,
 void copy_highpage(struct page *to, struct page *from);
 #define __HAVE_ARCH_COPY_HIGHPAGE
 
-struct page *alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
+struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
                                                unsigned long vaddr);
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
+#define vma_alloc_zeroed_movable_folio vma_alloc_zeroed_movable_folio
 
 void tag_clear_highpage(struct page *to);
 #define __HAVE_ARCH_TAG_CLEAR_HIGHPAGE
index 596f46d..f4cb0f8 100644 (file)
@@ -925,7 +925,7 @@ NOKPROBE_SYMBOL(do_debug_exception);
 /*
  * Used during anonymous page fault handling.
  */
-struct page *alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
+struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
                                                unsigned long vaddr)
 {
        gfp_t flags = GFP_HIGHUSER_MOVABLE | __GFP_ZERO;
@@ -938,7 +938,7 @@ struct page *alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
        if (vma->vm_flags & VM_MTE)
                flags |= __GFP_ZEROTAGS;
 
-       return alloc_page_vma(flags, vma, vaddr);
+       return vma_alloc_folio(flags, 0, vma, vaddr, false);
 }
 
 void tag_clear_highpage(struct page *page)
index 1b99046..ba0b365 100644 (file)
@@ -82,17 +82,15 @@ do {                                                \
 } while (0)
 
 
-#define alloc_zeroed_user_highpage_movable(vma, vaddr)                 \
+#define vma_alloc_zeroed_movable_folio(vma, vaddr)                     \
 ({                                                                     \
-       struct page *page = alloc_page_vma(                             \
-               GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr);         \
-       if (page)                                                       \
-               flush_dcache_page(page);                                \
-       page;                                                           \
+       struct folio *folio = vma_alloc_folio(                          \
+               GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false); \
+       if (folio)                                                      \
+               flush_dcache_folio(folio);                              \
+       folio;                                                          \
 })
 
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
-
 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
 
 #include <asm-generic/memory_model.h>
index c9d0d84..abd2c3a 100644 (file)
@@ -13,9 +13,8 @@ extern unsigned long memory_end;
 #define clear_user_page(page, vaddr, pg)       clear_page(page)
 #define copy_user_page(to, from, vaddr, pg)    copy_page(to, from)
 
-#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
-       alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr)
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
+#define vma_alloc_zeroed_movable_folio(vma, vaddr) \
+       vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false)
 
 #define __pa(vaddr)            ((unsigned long)(vaddr))
 #define __va(paddr)            ((void *)((unsigned long)(paddr)))
index 61dea67..8a2a3b5 100644 (file)
@@ -73,9 +73,8 @@ static inline void copy_page(void *to, void *from)
 #define clear_user_page(page, vaddr, pg)       clear_page(page)
 #define copy_user_page(to, from, vaddr, pg)    copy_page(to, from)
 
-#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
-       alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr)
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
+#define vma_alloc_zeroed_movable_folio(vma, vaddr) \
+       vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false)
 
 /*
  * These are used to make use of C type-checking..
index 9cc82f3..d18e5c3 100644 (file)
@@ -34,9 +34,8 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
        copy_page(to, from);
 }
 
-#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
-       alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr)
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
+#define vma_alloc_zeroed_movable_folio(vma, vaddr) \
+       vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false)
 
 #ifndef __pa
 #define __pa(x)                __phys_addr((unsigned long)(x))
index d7097b8..e225094 100644 (file)
@@ -207,31 +207,30 @@ static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
 }
 #endif
 
-#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
+#ifndef vma_alloc_zeroed_movable_folio
 /**
- * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
- * @vma: The VMA the page is to be allocated for
- * @vaddr: The virtual address the page will be inserted into
+ * vma_alloc_zeroed_movable_folio - Allocate a zeroed page for a VMA.
+ * @vma: The VMA the page is to be allocated for.
+ * @vaddr: The virtual address the page will be inserted into.
  *
- * Returns: The allocated and zeroed HIGHMEM page
+ * This function will allocate a page suitable for inserting into this
+ * VMA at this virtual address.  It may be allocated from highmem or
+ * the movable zone.  An architecture may provide its own implementation.
  *
- * This function will allocate a page for a VMA that the caller knows will
- * be able to migrate in the future using move_pages() or reclaimed
- *
- * An architecture may override this function by defining
- * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE and providing their own
- * implementation.
+ * Return: A folio containing one allocated and zeroed page or NULL if
+ * we are out of memory.
  */
-static inline struct page *
-alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
+static inline
+struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
                                   unsigned long vaddr)
 {
-       struct page *page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
+       struct folio *folio;
 
-       if (page)
-               clear_user_highpage(page, vaddr);
+       folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr, false);
+       if (folio)
+               clear_user_highpage(&folio->page, vaddr);
 
-       return page;
+       return folio;
 }
 #endif
 
index 87b33b4..b6358ff 100644 (file)
@@ -3056,10 +3056,12 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
                goto oom;
 
        if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
-               new_page = alloc_zeroed_user_highpage_movable(vma,
-                                                             vmf->address);
-               if (!new_page)
+               struct folio *new_folio;
+
+               new_folio = vma_alloc_zeroed_movable_folio(vma, vmf->address);
+               if (!new_folio)
                        goto oom;
+               new_page = &new_folio->page;
        } else {
                new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
                                vmf->address);
@@ -3995,6 +3997,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
        struct page *page;
+       struct folio *folio;
        vm_fault_t ret = 0;
        pte_t entry;
 
@@ -4044,11 +4047,12 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
        /* Allocate our own private page. */
        if (unlikely(anon_vma_prepare(vma)))
                goto oom;
-       page = alloc_zeroed_user_highpage_movable(vma, vmf->address);
-       if (!page)
+       folio = vma_alloc_zeroed_movable_folio(vma, vmf->address);
+       if (!folio)
                goto oom;
 
-       if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL))
+       page = &folio->page;
+       if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL))
                goto oom_free_page;
        cgroup_throttle_swaprate(page, GFP_KERNEL);