arm64: head: avoid cache invalidation when entering with the MMU on
authorArd Biesheuvel <ardb@kernel.org>
Wed, 11 Jan 2023 10:22:34 +0000 (11:22 +0100)
committerCatalin Marinas <catalin.marinas@arm.com>
Tue, 24 Jan 2023 11:51:07 +0000 (11:51 +0000)
If we enter with the MMU on, there is no need for explicit cache
invalidation for stores to memory, as they will be coherent with the
caches.

Let's take advantage of this, and create the ID map with the MMU still
enabled if that is how we entered, and avoid any cache invalidation
calls in that case.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Link: https://lore.kernel.org/r/20230111102236.1430401-5-ardb@kernel.org
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/kernel/head.S

index c3b898e..d75f419 100644 (file)
@@ -89,9 +89,9 @@
 SYM_CODE_START(primary_entry)
        bl      record_mmu_state
        bl      preserve_boot_args
+       bl      create_idmap
        bl      init_kernel_el                  // w0=cpu_boot_mode
        mov     x20, x0
-       bl      create_idmap
 
        /*
         * The following calls CPU setup code, see arch/arm64/mm/proc.S for
@@ -377,12 +377,13 @@ SYM_FUNC_START_LOCAL(create_idmap)
         * accesses (MMU disabled), invalidate those tables again to
         * remove any speculatively loaded cache lines.
         */
+       cbnz    x19, 0f                         // skip cache invalidation if MMU is on
        dmb     sy
 
        adrp    x0, init_idmap_pg_dir
        adrp    x1, init_idmap_pg_end
        bl      dcache_inval_poc
-       ret     x28
+0:     ret     x28
 SYM_FUNC_END(create_idmap)
 
 SYM_FUNC_START_LOCAL(create_kernel_mapping)