arm64: Force SPARSEMEM_VMEMMAP as the only memory management model
authorCatalin Marinas <catalin.marinas@arm.com>
Tue, 20 Apr 2021 09:35:59 +0000 (10:35 +0100)
committerCatalin Marinas <catalin.marinas@arm.com>
Fri, 23 Apr 2021 13:18:21 +0000 (14:18 +0100)
Currently arm64 allows a choice of FLATMEM, SPARSEMEM and
SPARSEMEM_VMEMMAP. However, only the latter is tested regularly. FLATMEM
does not seem to boot in certain configurations (guest under KVM with
Qemu as a VMM). Since the reduction of the SECTION_SIZE_BITS to 27 (4K
pages) or 29 (64K page), there's little argument against the memory
wasted by the mem_map array with SPARSEMEM.

Make SPARSEMEM_VMEMMAP the only available option, non-selectable, and
remove the corresponding #ifdefs under arch/arm64/.

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Acked-by: Will Deacon <will@kernel.org>
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Acked-by: Marc Zyngier <maz@kernel.org>
Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com>
Acked-by: Mike Rapoport <rppt@linux.ibm.com>
Link: https://lore.kernel.org/r/20210420093559.23168-1-catalin.marinas@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/Kconfig
arch/arm64/include/asm/kernel-pgtable.h
arch/arm64/include/asm/memory.h
arch/arm64/include/asm/sparsemem.h
arch/arm64/mm/init.c
arch/arm64/mm/mmu.c
arch/arm64/mm/ptdump.c

index 9b4d629f762882aace2196555058472f4d0d54b0..01c294035928b2bc96e0fb56dddce0ea341062f2 100644 (file)
@@ -1040,15 +1040,7 @@ source "kernel/Kconfig.hz"
 config ARCH_SPARSEMEM_ENABLE
        def_bool y
        select SPARSEMEM_VMEMMAP_ENABLE
-
-config ARCH_SPARSEMEM_DEFAULT
-       def_bool ARCH_SPARSEMEM_ENABLE
-
-config ARCH_SELECT_MEMORY_MODEL
-       def_bool ARCH_SPARSEMEM_ENABLE
-
-config ARCH_FLATMEM_ENABLE
-       def_bool !NUMA
+       select SPARSEMEM_VMEMMAP
 
 config HW_PERF_EVENTS
        def_bool y
index 587c504a4c8b2d5162a23c566283e6b019d60dff..d44df9d62fc9c2f9c1fcc492945c903c6baaeb6a 100644 (file)
  * has a direct correspondence, and needs to appear sufficiently aligned
  * in the virtual address space.
  */
-#if defined(CONFIG_SPARSEMEM_VMEMMAP) && ARM64_MEMSTART_SHIFT < SECTION_SIZE_BITS
+#if ARM64_MEMSTART_SHIFT < SECTION_SIZE_BITS
 #define ARM64_MEMSTART_ALIGN   (1UL << SECTION_SIZE_BITS)
 #else
 #define ARM64_MEMSTART_ALIGN   (1UL << ARM64_MEMSTART_SHIFT)
index b943879c1c242feeebf0adfce6fd13e509738edc..15018dc59554e1e80b5123f8129fe3dfc54f9c4b 100644 (file)
@@ -329,7 +329,7 @@ static inline void *phys_to_virt(phys_addr_t x)
  */
 #define ARCH_PFN_OFFSET                ((unsigned long)PHYS_PFN_OFFSET)
 
-#if !defined(CONFIG_SPARSEMEM_VMEMMAP) || defined(CONFIG_DEBUG_VIRTUAL)
+#if defined(CONFIG_DEBUG_VIRTUAL)
 #define page_to_virt(x)        ({                                              \
        __typeof__(x) __page = x;                                       \
        void *__addr = __va(page_to_phys(__page));                      \
@@ -349,7 +349,7 @@ static inline void *phys_to_virt(phys_addr_t x)
        u64 __addr = VMEMMAP_START + (__idx * sizeof(struct page));     \
        (struct page *)__addr;                                          \
 })
-#endif /* !CONFIG_SPARSEMEM_VMEMMAP || CONFIG_DEBUG_VIRTUAL */
+#endif /* CONFIG_DEBUG_VIRTUAL */
 
 #define virt_addr_valid(addr)  ({                                      \
        __typeof__(addr) __addr = __tag_reset(addr);                    \
index eb4a75d720ed85b3798fd887d50888b33edf8494..4b73463423c3108c1d14995aa43186e31601d633 100644 (file)
@@ -5,7 +5,6 @@
 #ifndef __ASM_SPARSEMEM_H
 #define __ASM_SPARSEMEM_H
 
-#ifdef CONFIG_SPARSEMEM
 #define MAX_PHYSMEM_BITS       CONFIG_ARM64_PA_BITS
 
 /*
@@ -27,6 +26,4 @@
 #define SECTION_SIZE_BITS 27
 #endif /* CONFIG_ARM64_64K_PAGES */
 
-#endif /* CONFIG_SPARSEMEM*/
-
 #endif
index 3685e12aba9b62b170cbd285dae412c339cae350..a205538aa1d5a4be2d41c453d9ea1dea5a9eaa40 100644 (file)
@@ -220,6 +220,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
 int pfn_valid(unsigned long pfn)
 {
        phys_addr_t addr = PFN_PHYS(pfn);
+       struct mem_section *ms;
 
        /*
         * Ensure the upper PAGE_SHIFT bits are clear in the
@@ -230,10 +231,6 @@ int pfn_valid(unsigned long pfn)
        if (PHYS_PFN(addr) != pfn)
                return 0;
 
-#ifdef CONFIG_SPARSEMEM
-{
-       struct mem_section *ms;
-
        if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
                return 0;
 
@@ -252,8 +249,7 @@ int pfn_valid(unsigned long pfn)
         */
        if (!early_section(ms))
                return pfn_section_valid(ms, pfn);
-}
-#endif
+
        return memblock_is_map_memory(addr);
 }
 EXPORT_SYMBOL(pfn_valid);
index fac957ff51872e143f8992743153a54ad6182470..af0ebcad3e1f6477aedde2e56f03b16066ae3e14 100644 (file)
@@ -1113,7 +1113,6 @@ static void free_empty_tables(unsigned long addr, unsigned long end,
 }
 #endif
 
-#ifdef CONFIG_SPARSEMEM_VMEMMAP
 #if !ARM64_SWAPPER_USES_SECTION_MAPS
 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
                struct vmem_altmap *altmap)
@@ -1177,7 +1176,6 @@ void vmemmap_free(unsigned long start, unsigned long end,
        free_empty_tables(start, end, VMEMMAP_START, VMEMMAP_END);
 #endif
 }
-#endif /* CONFIG_SPARSEMEM_VMEMMAP */
 
 static inline pud_t *fixmap_pud(unsigned long addr)
 {
index a50e92ea1878e0e787771333e736cc1000d104df..a1937dfff31c37160310c9b1a408cd69b71a007f 100644 (file)
@@ -51,10 +51,8 @@ static struct addr_marker address_markers[] = {
        { FIXADDR_TOP,                  "Fixmap end" },
        { PCI_IO_START,                 "PCI I/O start" },
        { PCI_IO_END,                   "PCI I/O end" },
-#ifdef CONFIG_SPARSEMEM_VMEMMAP
        { VMEMMAP_START,                "vmemmap start" },
        { VMEMMAP_START + VMEMMAP_SIZE, "vmemmap end" },
-#endif
        { -1,                           NULL },
 };