From e8d13cced5c5038cc93de9561cf2cb4f22205061 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Fri, 24 Jun 2022 17:06:33 +0200 Subject: [PATCH] arm64: head: move assignment of idmap_t0sz to C code Setting idmap_t0sz involves fiddling with the caches if done with the MMU off. Since we will be creating an initial ID map with the MMU and caches off, and the permanent ID map with the MMU and caches on, let's move this assignment of idmap_t0sz out of the startup code, and replace it with a macro that simply issues the three instructions needed to calculate the value wherever it is needed before the MMU is turned on. Signed-off-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20220624150651.1358849-4-ardb@kernel.org Signed-off-by: Will Deacon --- arch/arm64/include/asm/assembler.h | 14 ++++++++++++++ arch/arm64/include/asm/mmu_context.h | 2 +- arch/arm64/kernel/head.S | 13 +------------ arch/arm64/mm/mmu.c | 4 +++- arch/arm64/mm/proc.S | 2 +- 5 files changed, 20 insertions(+), 15 deletions(-) diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 8c5a61a..9468f45 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -360,6 +360,20 @@ alternative_cb_end .endm /* + * idmap_get_t0sz - get the T0SZ value needed to cover the ID map + * + * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the + * entire ID map region can be mapped. As T0SZ == (64 - #bits used), + * this number conveniently equals the number of leading zeroes in + * the physical address of _end. + */ + .macro idmap_get_t0sz, reg + adrp \reg, _end + orr \reg, \reg, #(1 << VA_BITS_MIN) - 1 + clz \reg, \reg + .endm + +/* * tcr_compute_pa_size - set TCR.(I)PS to the highest supported * ID_AA64MMFR0_EL1.PARange value * diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index 6770667..6ac0086 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -60,7 +60,7 @@ static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm) * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in * physical memory, in which case it will be smaller. */ -extern u64 idmap_t0sz; +extern int idmap_t0sz; extern u64 idmap_ptrs_per_pgd; /* diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index dc07858..7f361bc 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -299,22 +299,11 @@ SYM_FUNC_START_LOCAL(__create_page_tables) * physical address space. So for the ID map, use an extended virtual * range in that case, and configure an additional translation level * if needed. - * - * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the - * entire ID map region can be mapped. As T0SZ == (64 - #bits used), - * this number conveniently equals the number of leading zeroes in - * the physical address of __idmap_text_end. */ - adrp x5, __idmap_text_end - clz x5, x5 + idmap_get_t0sz x5 cmp x5, TCR_T0SZ(VA_BITS_MIN) // default T0SZ small enough? b.ge 1f // .. then skip VA range extension - adr_l x6, idmap_t0sz - str x5, [x6] - dmb sy - dc ivac, x6 // Invalidate potentially stale cache line - #if (VA_BITS < 48) #define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3) #define EXTRA_PTRS (1 << (PHYS_MASK_SHIFT - EXTRA_SHIFT)) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 88b4177..9b4fc78 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -43,7 +43,7 @@ #define NO_CONT_MAPPINGS BIT(1) #define NO_EXEC_MAPPINGS BIT(2) /* assumes FEAT_HPDS is not used */ -u64 idmap_t0sz = TCR_T0SZ(VA_BITS_MIN); +int idmap_t0sz __ro_after_init; u64 idmap_ptrs_per_pgd = PTRS_PER_PGD; #if VA_BITS > 48 @@ -771,6 +771,8 @@ void __init paging_init(void) { pgd_t *pgdp = pgd_set_fixmap(__pa_symbol(swapper_pg_dir)); + idmap_t0sz = 63UL - __fls(__pa_symbol(_end) | GENMASK(VA_BITS_MIN - 1, 0)); + map_kernel(pgdp); map_mem(pgdp); diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 50bbed9..e802bad 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -469,7 +469,7 @@ SYM_FUNC_START(__cpu_setup) add x9, x9, #64 tcr_set_t1sz tcr, x9 #else - ldr_l x9, idmap_t0sz + idmap_get_t0sz x9 #endif tcr_set_t0sz tcr, x9 -- 2.7.4