mm/vmalloc: enable mapping of huge pages at pte level in vmalloc
authorChristophe Leroy <christophe.leroy@csgroup.eu>
Thu, 1 Jul 2021 01:48:09 +0000 (18:48 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 1 Jul 2021 03:47:26 +0000 (20:47 -0700)
On some architectures like powerpc, there are huge pages that are mapped
at pte level.

Enable it in vmalloc.

For that, architectures can provide arch_vmap_pte_supported_shift() that
returns the shift for pages to map at pte level.

Link: https://lkml.kernel.org/r/2c717e3b1fba1894d890feb7669f83025bfa314d.1620795204.git.christophe.leroy@csgroup.eu
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Uladzislau Rezki <uladzislau.rezki@sony.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/vmalloc.h
mm/vmalloc.c

index 54ec073..1dabd6f 100644 (file)
@@ -112,6 +112,13 @@ static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, uns
 }
 #endif
 
+#ifndef arch_vmap_pte_supported_shift
+static inline int arch_vmap_pte_supported_shift(unsigned long size)
+{
+       return PAGE_SHIFT;
+}
+#endif
+
 /*
  *     Highlevel APIs for driver use
  */
index fe0af8d..71dd29f 100644 (file)
@@ -2927,8 +2927,7 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
                return NULL;
        }
 
-       if (vmap_allow_huge && !(vm_flags & VM_NO_HUGE_VMAP) &&
-                       arch_vmap_pmd_supported(prot)) {
+       if (vmap_allow_huge && !(vm_flags & VM_NO_HUGE_VMAP)) {
                unsigned long size_per_node;
 
                /*
@@ -2941,11 +2940,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
                size_per_node = size;
                if (node == NUMA_NO_NODE)
                        size_per_node /= num_online_nodes();
-               if (size_per_node >= PMD_SIZE) {
+               if (arch_vmap_pmd_supported(prot) && size_per_node >= PMD_SIZE)
                        shift = PMD_SHIFT;
-                       align = max(real_align, 1UL << shift);
-                       size = ALIGN(real_size, 1UL << shift);
-               }
+               else
+                       shift = arch_vmap_pte_supported_shift(size_per_node);
+
+               align = max(real_align, 1UL << shift);
+               size = ALIGN(real_size, 1UL << shift);
        }
 
 again: