mm/memory_hotplug: allow architecture to override memmap on memory support check
authorAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Tue, 8 Aug 2023 09:14:58 +0000 (14:44 +0530)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 21 Aug 2023 20:37:48 +0000 (13:37 -0700)
Some architectures would want different restrictions. Hence add an
architecture-specific override.

The PMD_SIZE check is moved there.

Link: https://lkml.kernel.org/r/20230808091501.287660-4-aneesh.kumar@linux.ibm.com
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Vishal Verma <vishal.l.verma@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memory_hotplug.c

index eca32cc..746cb7c 100644 (file)
@@ -1247,10 +1247,26 @@ static int online_memory_block(struct memory_block *mem, void *arg)
        return device_online(&mem->dev);
 }
 
+static inline unsigned long memory_block_memmap_size(void)
+{
+       return PHYS_PFN(memory_block_size_bytes()) * sizeof(struct page);
+}
+
+#ifndef arch_supports_memmap_on_memory
+static inline bool arch_supports_memmap_on_memory(unsigned long vmemmap_size)
+{
+       /*
+        * As default, we want the vmemmap to span a complete PMD such that we
+        * can map the vmemmap using a single PMD if supported by the
+        * architecture.
+        */
+       return IS_ALIGNED(vmemmap_size, PMD_SIZE);
+}
+#endif
+
 static bool mhp_supports_memmap_on_memory(unsigned long size)
 {
-       unsigned long nr_vmemmap_pages = size / PAGE_SIZE;
-       unsigned long vmemmap_size = nr_vmemmap_pages * sizeof(struct page);
+       unsigned long vmemmap_size = memory_block_memmap_size();
        unsigned long remaining_size = size - vmemmap_size;
 
        /*
@@ -1281,8 +1297,8 @@ static bool mhp_supports_memmap_on_memory(unsigned long size)
         */
        return mhp_memmap_on_memory() &&
               size == memory_block_size_bytes() &&
-              IS_ALIGNED(vmemmap_size, PMD_SIZE) &&
-              IS_ALIGNED(remaining_size, (pageblock_nr_pages << PAGE_SHIFT));
+              IS_ALIGNED(remaining_size, (pageblock_nr_pages << PAGE_SHIFT)) &&
+              arch_supports_memmap_on_memory(vmemmap_size);
 }
 
 /*