mm/vmemmap: improve vmemmap_can_optimize and allow architectures to override
authorAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Mon, 24 Jul 2023 19:07:49 +0000 (00:37 +0530)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 18 Aug 2023 17:12:53 +0000 (10:12 -0700)
dax vmemmap optimization requires a minimum of 2 PAGE_SIZE area within
vmemmap such that tail page mapping can point to the second PAGE_SIZE
area.  Enforce that in vmemmap_can_optimize() function.

Architectures like powerpc also want to enable vmemmap optimization
conditionally (only with radix MMU translation).  Hence allow architecture
override.

Link: https://lkml.kernel.org/r/20230724190759.483013-4-aneesh.kumar@linux.ibm.com
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Joao Martins <joao.m.martins@oracle.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/mm.h
mm/mm_init.c

index 21299a0..d4ce73c 100644 (file)
@@ -3632,13 +3632,32 @@ void vmemmap_free(unsigned long start, unsigned long end,
                struct vmem_altmap *altmap);
 #endif
 
+#define VMEMMAP_RESERVE_NR     2
 #ifdef CONFIG_ARCH_WANT_OPTIMIZE_VMEMMAP
-static inline bool vmemmap_can_optimize(struct vmem_altmap *altmap,
-                                          struct dev_pagemap *pgmap)
+static inline bool __vmemmap_can_optimize(struct vmem_altmap *altmap,
+                                         struct dev_pagemap *pgmap)
 {
-       return is_power_of_2(sizeof(struct page)) &&
-               pgmap && (pgmap_vmemmap_nr(pgmap) > 1) && !altmap;
+       unsigned long nr_pages;
+       unsigned long nr_vmemmap_pages;
+
+       if (!pgmap || !is_power_of_2(sizeof(struct page)))
+               return false;
+
+       nr_pages = pgmap_vmemmap_nr(pgmap);
+       nr_vmemmap_pages = ((nr_pages * sizeof(struct page)) >> PAGE_SHIFT);
+       /*
+        * For vmemmap optimization with DAX we need minimum 2 vmemmap
+        * pages. See layout diagram in Documentation/mm/vmemmap_dedup.rst
+        */
+       return !altmap && (nr_vmemmap_pages > VMEMMAP_RESERVE_NR);
 }
+/*
+ * If we don't have an architecture override, use the generic rule
+ */
+#ifndef vmemmap_can_optimize
+#define vmemmap_can_optimize __vmemmap_can_optimize
+#endif
+
 #else
 static inline bool vmemmap_can_optimize(struct vmem_altmap *altmap,
                                           struct dev_pagemap *pgmap)
index acb0ac1..641c56f 100644 (file)
@@ -1020,7 +1020,7 @@ static inline unsigned long compound_nr_pages(struct vmem_altmap *altmap,
        if (!vmemmap_can_optimize(altmap, pgmap))
                return pgmap_vmemmap_nr(pgmap);
 
-       return 2 * (PAGE_SIZE / sizeof(struct page));
+       return VMEMMAP_RESERVE_NR * (PAGE_SIZE / sizeof(struct page));
 }
 
 static void __ref memmap_init_compound(struct page *head,