lkdtm/powerpc: Add SLB multihit test
authorGanesh Goudar <ganeshgr@linux.ibm.com>
Mon, 30 Nov 2020 08:30:57 +0000 (14:00 +0530)
committerMichael Ellerman <mpe@ellerman.id.au>
Thu, 3 Dec 2020 14:01:34 +0000 (01:01 +1100)
To check machine check handling, add support to inject slb
multihit errors.

Co-developed-by: Mahesh Salgaonkar <mahesh@linux.ibm.com>
Signed-off-by: Mahesh Salgaonkar <mahesh@linux.ibm.com>
Signed-off-by: Ganesh Goudar <ganeshgr@linux.ibm.com>
[mpe: Use CONFIG_PPC_BOOK3S_64 to fix compile errors reported by lkp@intel.com]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20201130083057.135610-1-ganeshgr@linux.ibm.com
arch/powerpc/include/asm/book3s/64/mmu-hash.h
arch/powerpc/mm/book3s64/hash_utils.c
arch/powerpc/mm/book3s64/slb.c
drivers/misc/lkdtm/Makefile
drivers/misc/lkdtm/core.c
drivers/misc/lkdtm/lkdtm.h
drivers/misc/lkdtm/powerpc.c [new file with mode: 0644]
tools/testing/selftests/lkdtm/tests.txt

index 9192cb0..066b1d3 100644 (file)
@@ -843,6 +843,32 @@ static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
 
 unsigned htab_shift_for_mem_size(unsigned long mem_size);
 
-#endif /* __ASSEMBLY__ */
+enum slb_index {
+       LINEAR_INDEX    = 0, /* Kernel linear map  (0xc000000000000000) */
+       KSTACK_INDEX    = 1, /* Kernel stack map */
+};
 
+#define slb_esid_mask(ssize)   \
+       (((ssize) == MMU_SEGSIZE_256M) ? ESID_MASK : ESID_MASK_1T)
+
+static inline unsigned long mk_esid_data(unsigned long ea, int ssize,
+                                        enum slb_index index)
+{
+       return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | index;
+}
+
+static inline unsigned long __mk_vsid_data(unsigned long vsid, int ssize,
+                                          unsigned long flags)
+{
+       return (vsid << slb_vsid_shift(ssize)) | flags |
+               ((unsigned long)ssize << SLB_VSID_SSIZE_SHIFT);
+}
+
+static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
+                                        unsigned long flags)
+{
+       return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags);
+}
+
+#endif /* __ASSEMBLY__ */
 #endif /* _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_ */
index e0fe1a4..73b06ad 100644 (file)
@@ -112,6 +112,7 @@ int mmu_linear_psize = MMU_PAGE_4K;
 EXPORT_SYMBOL_GPL(mmu_linear_psize);
 int mmu_virtual_psize = MMU_PAGE_4K;
 int mmu_vmalloc_psize = MMU_PAGE_4K;
+EXPORT_SYMBOL_GPL(mmu_vmalloc_psize);
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
 int mmu_vmemmap_psize = MMU_PAGE_4K;
 #endif
index 6d720c1..5845679 100644 (file)
 #include "internal.h"
 
 
-enum slb_index {
-       LINEAR_INDEX    = 0, /* Kernel linear map  (0xc000000000000000) */
-       KSTACK_INDEX    = 1, /* Kernel stack map */
-};
-
 static long slb_allocate_user(struct mm_struct *mm, unsigned long ea);
 
-#define slb_esid_mask(ssize)   \
-       (((ssize) == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T)
-
-static inline unsigned long mk_esid_data(unsigned long ea, int ssize,
-                                        enum slb_index index)
-{
-       return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | index;
-}
-
-static inline unsigned long __mk_vsid_data(unsigned long vsid, int ssize,
-                                        unsigned long flags)
-{
-       return (vsid << slb_vsid_shift(ssize)) | flags |
-               ((unsigned long) ssize << SLB_VSID_SSIZE_SHIFT);
-}
-
-static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
-                                        unsigned long flags)
-{
-       return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags);
-}
-
 bool stress_slb_enabled __initdata;
 
 static int __init parse_stress_slb(char *p)
index c70b382..5a92c74 100644 (file)
@@ -10,6 +10,7 @@ lkdtm-$(CONFIG_LKDTM)         += rodata_objcopy.o
 lkdtm-$(CONFIG_LKDTM)          += usercopy.o
 lkdtm-$(CONFIG_LKDTM)          += stackleak.o
 lkdtm-$(CONFIG_LKDTM)          += cfi.o
+lkdtm-$(CONFIG_PPC_BOOK3S_64)  += powerpc.o
 
 KASAN_SANITIZE_stackleak.o     := n
 KCOV_INSTRUMENT_rodata.o       := n
index 97803f2..1f612c7 100644 (file)
@@ -176,6 +176,9 @@ static const struct crashtype crashtypes[] = {
 #ifdef CONFIG_X86_32
        CRASHTYPE(DOUBLE_FAULT),
 #endif
+#ifdef CONFIG_PPC_BOOK3S_64
+       CRASHTYPE(PPC_SLB_MULTIHIT),
+#endif
 };
 
 
index 6dec4c9..79ec05c 100644 (file)
@@ -102,4 +102,7 @@ void lkdtm_STACKLEAK_ERASING(void);
 /* cfi.c */
 void lkdtm_CFI_FORWARD_PROTO(void);
 
+/* powerpc.c */
+void lkdtm_PPC_SLB_MULTIHIT(void);
+
 #endif
diff --git a/drivers/misc/lkdtm/powerpc.c b/drivers/misc/lkdtm/powerpc.c
new file mode 100644 (file)
index 0000000..077c9f9
--- /dev/null
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "lkdtm.h"
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <asm/mmu.h>
+
+/* Inserts new slb entries */
+static void insert_slb_entry(unsigned long p, int ssize, int page_size)
+{
+       unsigned long flags;
+
+       flags = SLB_VSID_KERNEL | mmu_psize_defs[page_size].sllp;
+       preempt_disable();
+
+       asm volatile("slbmte %0,%1" :
+                    : "r" (mk_vsid_data(p, ssize, flags)),
+                      "r" (mk_esid_data(p, ssize, SLB_NUM_BOLTED))
+                    : "memory");
+
+       asm volatile("slbmte %0,%1" :
+                       : "r" (mk_vsid_data(p, ssize, flags)),
+                         "r" (mk_esid_data(p, ssize, SLB_NUM_BOLTED + 1))
+                       : "memory");
+       preempt_enable();
+}
+
+/* Inject slb multihit on vmalloc-ed address i.e 0xD00... */
+static int inject_vmalloc_slb_multihit(void)
+{
+       char *p;
+
+       p = vmalloc(PAGE_SIZE);
+       if (!p)
+               return -ENOMEM;
+
+       insert_slb_entry((unsigned long)p, MMU_SEGSIZE_1T, mmu_vmalloc_psize);
+       /*
+        * This triggers exception, If handled correctly we must recover
+        * from this error.
+        */
+       p[0] = '!';
+       vfree(p);
+       return 0;
+}
+
+/* Inject slb multihit on kmalloc-ed address i.e 0xC00... */
+static int inject_kmalloc_slb_multihit(void)
+{
+       char *p;
+
+       p = kmalloc(2048, GFP_KERNEL);
+       if (!p)
+               return -ENOMEM;
+
+       insert_slb_entry((unsigned long)p, MMU_SEGSIZE_1T, mmu_linear_psize);
+       /*
+        * This triggers exception, If handled correctly we must recover
+        * from this error.
+        */
+       p[0] = '!';
+       kfree(p);
+       return 0;
+}
+
+/*
+ * Few initial SLB entries are bolted. Add a test to inject
+ * multihit in bolted entry 0.
+ */
+static void insert_dup_slb_entry_0(void)
+{
+       unsigned long test_address = PAGE_OFFSET, *test_ptr;
+       unsigned long esid, vsid;
+       unsigned long i = 0;
+
+       test_ptr = (unsigned long *)test_address;
+       preempt_disable();
+
+       asm volatile("slbmfee  %0,%1" : "=r" (esid) : "r" (i));
+       asm volatile("slbmfev  %0,%1" : "=r" (vsid) : "r" (i));
+
+       /* for i !=0 we would need to mask out the old entry number */
+       asm volatile("slbmte %0,%1" :
+                       : "r" (vsid),
+                         "r" (esid | SLB_NUM_BOLTED)
+                       : "memory");
+
+       asm volatile("slbmfee  %0,%1" : "=r" (esid) : "r" (i));
+       asm volatile("slbmfev  %0,%1" : "=r" (vsid) : "r" (i));
+
+       /* for i !=0 we would need to mask out the old entry number */
+       asm volatile("slbmte %0,%1" :
+                       : "r" (vsid),
+                         "r" (esid | (SLB_NUM_BOLTED + 1))
+                       : "memory");
+
+       pr_info("%s accessing test address 0x%lx: 0x%lx\n",
+               __func__, test_address, *test_ptr);
+
+       preempt_enable();
+}
+
+void lkdtm_PPC_SLB_MULTIHIT(void)
+{
+       if (!radix_enabled()) {
+               pr_info("Injecting SLB multihit errors\n");
+               /*
+                * These need not be separate tests, And they do pretty
+                * much same thing. In any case we must recover from the
+                * errors introduced by these functions, machine would not
+                * survive these tests in case of failure to handle.
+                */
+               inject_vmalloc_slb_multihit();
+               inject_kmalloc_slb_multihit();
+               insert_dup_slb_entry_0();
+               pr_info("Recovered from SLB multihit errors\n");
+       } else {
+               pr_err("XFAIL: This test is for ppc64 and with hash mode MMU only\n");
+       }
+}
index 74a8d32..18e4599 100644 (file)
@@ -68,3 +68,4 @@ USERCOPY_STACK_BEYOND
 USERCOPY_KERNEL
 STACKLEAK_ERASING OK: the rest of the thread stack is properly erased
 CFI_FORWARD_PROTO
+PPC_SLB_MULTIHIT Recovered