From 000a42b35a54372597f0657f6b9875b38c641864 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Thu, 9 Jul 2020 08:59:41 +0530 Subject: [PATCH] powerpc/book3s64/keys/kuap: Reset AMR/IAMR values on kexec As we kexec across kernels that use AMR/IAMR for different purposes we need to ensure that new kernels get kexec'd with a reset value of AMR/IAMR. For ex: the new kernel can use key 0 for kernel mapping and the old AMR value prevents access to key 0. This patch also removes reset if IAMR and AMOR in kexec_sequence. Reset of AMOR is not needed and the IAMR reset is partial (it doesn't do the reset on secondary cpus) and is redundant with this patch. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200709032946.881753-19-aneesh.kumar@linux.ibm.com --- arch/powerpc/include/asm/book3s/64/kexec.h | 23 +++++++++++++++++++++++ arch/powerpc/include/asm/kexec.h | 12 ++++++++++++ arch/powerpc/kernel/misc_64.S | 14 -------------- arch/powerpc/kexec/core_64.c | 2 ++ arch/powerpc/mm/book3s64/pgtable.c | 3 +++ 5 files changed, 40 insertions(+), 14 deletions(-) create mode 100644 arch/powerpc/include/asm/book3s/64/kexec.h diff --git a/arch/powerpc/include/asm/book3s/64/kexec.h b/arch/powerpc/include/asm/book3s/64/kexec.h new file mode 100644 index 0000000..6b5c3a2 --- /dev/null +++ b/arch/powerpc/include/asm/book3s/64/kexec.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_POWERPC_BOOK3S_64_KEXEC_H_ +#define _ASM_POWERPC_BOOK3S_64_KEXEC_H_ + + +#define reset_sprs reset_sprs +static inline void reset_sprs(void) +{ + if (cpu_has_feature(CPU_FTR_ARCH_206)) { + mtspr(SPRN_AMR, 0); + mtspr(SPRN_UAMOR, 0); + } + + if (cpu_has_feature(CPU_FTR_ARCH_207S)) { + mtspr(SPRN_IAMR, 0); + } + + /* Do we need isync()? We are going via a kexec reset */ + isync(); +} + +#endif diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h index c684768..89f7e34 100644 --- a/arch/powerpc/include/asm/kexec.h +++ b/arch/powerpc/include/asm/kexec.h @@ -150,6 +150,18 @@ static inline void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) } #endif /* CONFIG_KEXEC_CORE */ + +#ifdef CONFIG_PPC_BOOK3S_64 +#include +#endif + +#ifndef reset_sprs +#define reset_sprs reset_sprs +static inline void reset_sprs(void) +{ +} +#endif + #endif /* ! __ASSEMBLY__ */ #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_KEXEC_H */ diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index 1864605..7bb46ad 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -413,20 +413,6 @@ _GLOBAL(kexec_sequence) li r0,0 std r0,16(r1) -BEGIN_FTR_SECTION - /* - * This is the best time to turn AMR/IAMR off. - * key 0 is used in radix for supervisor<->user - * protection, but on hash key 0 is reserved - * ideally we want to enter with a clean state. - * NOTE, we rely on r0 being 0 from above. - */ - mtspr SPRN_IAMR,r0 -BEGIN_FTR_SECTION_NESTED(42) - mtspr SPRN_AMOR,r0 -END_FTR_SECTION_NESTED_IFSET(CPU_FTR_HVMODE, 42) -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) - /* save regs for local vars on new stack. * yes, we won't go back, but ... */ diff --git a/arch/powerpc/kexec/core_64.c b/arch/powerpc/kexec/core_64.c index b41840921..8a449b2 100644 --- a/arch/powerpc/kexec/core_64.c +++ b/arch/powerpc/kexec/core_64.c @@ -152,6 +152,8 @@ static void kexec_smp_down(void *arg) if (ppc_md.kexec_cpu_down) ppc_md.kexec_cpu_down(0, 1); + reset_sprs(); + kexec_smp_wait(); /* NOTREACHED */ } diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c index 85de5b5..e18ae50 100644 --- a/arch/powerpc/mm/book3s64/pgtable.c +++ b/arch/powerpc/mm/book3s64/pgtable.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -165,6 +166,8 @@ void mmu_cleanup_all(void) radix__mmu_cleanup_all(); else if (mmu_hash_ops.hpte_clear_all) mmu_hash_ops.hpte_clear_all(); + + reset_sprs(); } #ifdef CONFIG_MEMORY_HOTPLUG -- 2.7.4