arm64: head: Move all finalise_el2 calls to after __enable_mmu
authorArd Biesheuvel <ardb@kernel.org>
Wed, 11 Jan 2023 10:22:31 +0000 (11:22 +0100)
committerCatalin Marinas <catalin.marinas@arm.com>
Tue, 24 Jan 2023 11:51:07 +0000 (11:51 +0000)
In the primary boot path, finalise_el2() is called much later than on
the secondary boot or resume-from-suspend paths, and this does not
appear to be intentional.

Since we aim to do as little as possible before enabling the MMU and
caches, align secondary and resume with primary boot, and defer the call
to after the MMU is turned on. This also removes the need to clean
finalise_el2() to the PoC once we enable support for booting with the
MMU on.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Link: https://lore.kernel.org/r/20230111102236.1430401-2-ardb@kernel.org
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/kernel/head.S
arch/arm64/kernel/sleep.S

index 952e17bd1c0b4f91a6c719afffc302e36c9e6f52..c4e12d466a5f35f0b73b73d320db52fd6a019868 100644 (file)
@@ -587,7 +587,6 @@ SYM_FUNC_START_LOCAL(secondary_startup)
         * Common entry point for secondary CPUs.
         */
        mov     x20, x0                         // preserve boot mode
-       bl      finalise_el2
        bl      __cpu_secondary_check52bitva
 #if VA_BITS > 48
        ldr_l   x0, vabits_actual
@@ -603,6 +602,10 @@ SYM_FUNC_END(secondary_startup)
 SYM_FUNC_START_LOCAL(__secondary_switched)
        mov     x0, x20
        bl      set_cpu_boot_mode_flag
+
+       mov     x0, x20
+       bl      finalise_el2
+
        str_l   xzr, __early_cpu_boot_status, x3
        adr_l   x5, vectors
        msr     vbar_el1, x5
index 97c9de57725dfddb59ee4a19ccbd883dd3b24cc9..7b7c56e048346e97620cca1de6a61e106feed323 100644 (file)
@@ -100,7 +100,7 @@ SYM_FUNC_END(__cpu_suspend_enter)
        .pushsection ".idmap.text", "awx"
 SYM_CODE_START(cpu_resume)
        bl      init_kernel_el
-       bl      finalise_el2
+       mov     x19, x0                 // preserve boot mode
 #if VA_BITS > 48
        ldr_l   x0, vabits_actual
 #endif
@@ -116,6 +116,9 @@ SYM_CODE_END(cpu_resume)
        .popsection
 
 SYM_FUNC_START(_cpu_resume)
+       mov     x0, x19
+       bl      finalise_el2
+
        mrs     x1, mpidr_el1
        adr_l   x8, mpidr_hash          // x8 = struct mpidr_hash virt address