powerpc/book3s64/keys/kuap: Reset AMR/IAMR values on kexec
authorAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Thu, 9 Jul 2020 03:29:41 +0000 (08:59 +0530)
committerMichael Ellerman <mpe@ellerman.id.au>
Mon, 20 Jul 2020 12:57:59 +0000 (22:57 +1000)
As we kexec across kernels that use AMR/IAMR for different purposes
we need to ensure that new kernels get kexec'd with a reset value
of AMR/IAMR. For ex: the new kernel can use key 0 for kernel mapping and the old
AMR value prevents access to key 0.

This patch also removes reset if IAMR and AMOR in kexec_sequence. Reset of AMOR
is not needed and the IAMR reset is partial (it doesn't do the reset
on secondary cpus) and is redundant with this patch.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200709032946.881753-19-aneesh.kumar@linux.ibm.com
arch/powerpc/include/asm/book3s/64/kexec.h [new file with mode: 0644]
arch/powerpc/include/asm/kexec.h
arch/powerpc/kernel/misc_64.S
arch/powerpc/kexec/core_64.c
arch/powerpc/mm/book3s64/pgtable.c

diff --git a/arch/powerpc/include/asm/book3s/64/kexec.h b/arch/powerpc/include/asm/book3s/64/kexec.h
new file mode 100644 (file)
index 0000000..6b5c3a2
--- /dev/null
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_POWERPC_BOOK3S_64_KEXEC_H_
+#define _ASM_POWERPC_BOOK3S_64_KEXEC_H_
+
+
+#define reset_sprs reset_sprs
+static inline void reset_sprs(void)
+{
+       if (cpu_has_feature(CPU_FTR_ARCH_206)) {
+               mtspr(SPRN_AMR, 0);
+               mtspr(SPRN_UAMOR, 0);
+       }
+
+       if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
+               mtspr(SPRN_IAMR, 0);
+       }
+
+       /*  Do we need isync()? We are going via a kexec reset */
+       isync();
+}
+
+#endif
index c684768..89f7e34 100644 (file)
@@ -150,6 +150,18 @@ static inline void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
 }
 
 #endif /* CONFIG_KEXEC_CORE */
+
+#ifdef CONFIG_PPC_BOOK3S_64
+#include <asm/book3s/64/kexec.h>
+#endif
+
+#ifndef reset_sprs
+#define reset_sprs reset_sprs
+static inline void reset_sprs(void)
+{
+}
+#endif
+
 #endif /* ! __ASSEMBLY__ */
 #endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_KEXEC_H */
index 1864605..7bb46ad 100644 (file)
@@ -413,20 +413,6 @@ _GLOBAL(kexec_sequence)
        li      r0,0
        std     r0,16(r1)
 
-BEGIN_FTR_SECTION
-       /*
-        * This is the best time to turn AMR/IAMR off.
-        * key 0 is used in radix for supervisor<->user
-        * protection, but on hash key 0 is reserved
-        * ideally we want to enter with a clean state.
-        * NOTE, we rely on r0 being 0 from above.
-        */
-       mtspr   SPRN_IAMR,r0
-BEGIN_FTR_SECTION_NESTED(42)
-       mtspr   SPRN_AMOR,r0
-END_FTR_SECTION_NESTED_IFSET(CPU_FTR_HVMODE, 42)
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
-
        /* save regs for local vars on new stack.
         * yes, we won't go back, but ...
         */
index b418409..8a449b2 100644 (file)
@@ -152,6 +152,8 @@ static void kexec_smp_down(void *arg)
        if (ppc_md.kexec_cpu_down)
                ppc_md.kexec_cpu_down(0, 1);
 
+       reset_sprs();
+
        kexec_smp_wait();
        /* NOTREACHED */
 }
index 85de5b5..e18ae50 100644 (file)
@@ -15,6 +15,7 @@
 #include <asm/powernv.h>
 #include <asm/firmware.h>
 #include <asm/ultravisor.h>
+#include <asm/kexec.h>
 
 #include <mm/mmu_decl.h>
 #include <trace/events/thp.h>
@@ -165,6 +166,8 @@ void mmu_cleanup_all(void)
                radix__mmu_cleanup_all();
        else if (mmu_hash_ops.hpte_clear_all)
                mmu_hash_ops.hpte_clear_all();
+
+       reset_sprs();
 }
 
 #ifdef CONFIG_MEMORY_HOTPLUG