From: Jun Yao Date: Mon, 24 Sep 2018 13:51:13 +0000 (+0100) Subject: arm64/mm: Pass ttbr1 as a parameter to __enable_mmu() X-Git-Tag: v5.15~7637^2~42 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=693d5639b44a8f3787444902d3600edc7e0105a2;p=platform%2Fkernel%2Flinux-starfive.git arm64/mm: Pass ttbr1 as a parameter to __enable_mmu() In subsequent patches we'll use a transient pgd during the primary cpu's boot process. To make this work while allowing secondary cpus to use the swapper_pg_dir, we need to pass the relevant TTBR1 pgd as a parameter to __enable_mmu(). This patch updates __enable__mmu() to take this as a parameter, updating callsites to pass swapper_pg_dir for now. There should be no functional change as a result of this patch. Signed-off-by: Jun Yao Reviewed-by: James Morse [Mark: simplify assembly, clarify commit message] Signed-off-by: Mark Rutland Signed-off-by: Catalin Marinas --- diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index b085306..7983ddf 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -706,6 +706,7 @@ secondary_startup: * Common entry point for secondary CPUs. */ bl __cpu_setup // initialise processor + adrp x1, swapper_pg_dir bl __enable_mmu ldr x8, =__secondary_switched br x8 @@ -748,6 +749,7 @@ ENDPROC(__secondary_switched) * Enable the MMU. * * x0 = SCTLR_EL1 value for turning on the MMU. + * x1 = TTBR1_EL1 value * * Returns to the caller via x30/lr. This requires the caller to be covered * by the .idmap.text section. @@ -756,17 +758,16 @@ ENDPROC(__secondary_switched) * If it isn't, park the CPU */ ENTRY(__enable_mmu) - mrs x1, ID_AA64MMFR0_EL1 - ubfx x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4 + mrs x2, ID_AA64MMFR0_EL1 + ubfx x2, x2, #ID_AA64MMFR0_TGRAN_SHIFT, 4 cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED b.ne __no_granule_support - update_early_cpu_boot_status 0, x1, x2 - adrp x1, idmap_pg_dir - adrp x2, swapper_pg_dir - phys_to_ttbr x3, x1 - phys_to_ttbr x4, x2 - msr ttbr0_el1, x3 // load TTBR0 - msr ttbr1_el1, x4 // load TTBR1 + update_early_cpu_boot_status 0, x2, x3 + adrp x2, idmap_pg_dir + phys_to_ttbr x1, x1 + phys_to_ttbr x2, x2 + msr ttbr0_el1, x2 // load TTBR0 + msr ttbr1_el1, x1 // load TTBR1 isb msr sctlr_el1, x0 isb @@ -823,6 +824,7 @@ __primary_switch: mrs x20, sctlr_el1 // preserve old SCTLR_EL1 value #endif + adrp x1, swapper_pg_dir bl __enable_mmu #ifdef CONFIG_RELOCATABLE bl __relocate_kernel diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S index bebec8e..3e53ffa 100644 --- a/arch/arm64/kernel/sleep.S +++ b/arch/arm64/kernel/sleep.S @@ -101,6 +101,7 @@ ENTRY(cpu_resume) bl el2_setup // if in EL2 drop to EL1 cleanly bl __cpu_setup /* enable the MMU early - so we can access sleep_save_stash by va */ + adrp x1, swapper_pg_dir bl __enable_mmu ldr x8, =_cpu_resume br x8