arm64: mm: make vabits_actual a build time constant if possible
authorArd Biesheuvel <ardb@kernel.org>
Fri, 24 Jun 2022 15:06:32 +0000 (17:06 +0200)
committerWill Deacon <will@kernel.org>
Fri, 24 Jun 2022 16:18:09 +0000 (17:18 +0100)
Currently, we only support 52-bit virtual addressing on 64k pages
configurations, and in all other cases, vabits_actual is guaranteed to
equal VA_BITS (== VA_BITS_MIN). So get rid of the variable entirely in
that case.

While at it, move the assignment out of the asm entry code - it has no
need to be there.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Link: https://lore.kernel.org/r/20220624150651.1358849-3-ardb@kernel.org
Signed-off-by: Will Deacon <will@kernel.org>
arch/arm64/include/asm/memory.h
arch/arm64/kernel/head.S
arch/arm64/mm/init.c
arch/arm64/mm/mmu.c

index 0af70d9abede3d9d65163101da185b4f9d749fa8..c751cd9b94f8c93ab391661642bae8493dd005eb 100644 (file)
 #include <linux/types.h>
 #include <asm/bug.h>
 
+#if VA_BITS > 48
 extern u64                     vabits_actual;
+#else
+#define vabits_actual          ((u64)VA_BITS)
+#endif
 
 extern s64                     memstart_addr;
 /* PHYS_OFFSET - the physical address of the start of memory. */
index 1cdecce552bb2ef9775bb8cde41b14e00be0c4b5..dc07858eb673d38ed9fd10b3685f841a1bfcd62e 100644 (file)
@@ -293,19 +293,6 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
        adrp    x0, idmap_pg_dir
        adrp    x3, __idmap_text_start          // __pa(__idmap_text_start)
 
-#ifdef CONFIG_ARM64_VA_BITS_52
-       mrs_s   x6, SYS_ID_AA64MMFR2_EL1
-       and     x6, x6, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
-       mov     x5, #52
-       cbnz    x6, 1f
-#endif
-       mov     x5, #VA_BITS_MIN
-1:
-       adr_l   x6, vabits_actual
-       str     x5, [x6]
-       dmb     sy
-       dc      ivac, x6                // Invalidate potentially stale cache line
-
        /*
         * VA_BITS may be too small to allow for an ID mapping to be created
         * that covers system RAM if that is located sufficiently high in the
@@ -713,7 +700,7 @@ SYM_FUNC_START(__enable_mmu)
 SYM_FUNC_END(__enable_mmu)
 
 SYM_FUNC_START(__cpu_secondary_check52bitva)
-#ifdef CONFIG_ARM64_VA_BITS_52
+#if VA_BITS > 48
        ldr_l   x0, vabits_actual
        cmp     x0, #52
        b.ne    2f
index 339ee84e5a61a0bf97e8d49ccd2bca824e467396..1faa6760895e898ce9bb1787344df474e45e4ea4 100644 (file)
@@ -265,7 +265,20 @@ early_param("mem", early_mem);
 
 void __init arm64_memblock_init(void)
 {
-       s64 linear_region_size = PAGE_END - _PAGE_OFFSET(vabits_actual);
+       s64 linear_region_size;
+
+#if VA_BITS > 48
+       if (cpuid_feature_extract_unsigned_field(
+                               read_sysreg_s(SYS_ID_AA64MMFR2_EL1),
+                               ID_AA64MMFR2_LVA_SHIFT))
+               vabits_actual = VA_BITS;
+
+       /* make the variable visible to secondaries with the MMU off */
+       dcache_clean_inval_poc((u64)&vabits_actual,
+                              (u64)&vabits_actual + sizeof(vabits_actual));
+#endif
+
+       linear_region_size = PAGE_END - _PAGE_OFFSET(vabits_actual);
 
        /*
         * Corner case: 52-bit VA capable systems running KVM in nVHE mode may
index fde2b326419a97859ab9010bb185f7110c64a873..88b4177254a0c732c2b70f5624c3b730f263d1f0 100644 (file)
 u64 idmap_t0sz = TCR_T0SZ(VA_BITS_MIN);
 u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
 
-u64 __section(".mmuoff.data.write") vabits_actual;
+#if VA_BITS > 48
+u64 vabits_actual __ro_after_init = VA_BITS_MIN;
 EXPORT_SYMBOL(vabits_actual);
+#endif
 
 u64 kimage_vaddr __ro_after_init = (u64)&_text;
 EXPORT_SYMBOL(kimage_vaddr);