KVM: PPC: Book3S HV: Fix TLB management on SMT8 POWER9 and POWER10 processors
authorSuraj Jitindar Singh <sjitindarsingh@gmail.com>
Wed, 2 Jun 2021 04:04:41 +0000 (14:04 +1000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 14 Jul 2021 14:55:55 +0000 (16:55 +0200)
[ Upstream commit 77bbbc0cf84834ed130838f7ac1988567f4d0288 ]

The POWER9 vCPU TLB management code assumes all threads in a core share
a TLB, and that TLBIEL execued by one thread will invalidate TLBs for
all threads. This is not the case for SMT8 capable POWER9 and POWER10
(big core) processors, where the TLB is split between groups of threads.
This results in TLB multi-hits, random data corruption, etc.

Fix this by introducing cpu_first_tlb_thread_sibling etc., to determine
which siblings share TLBs, and use that in the guest TLB flushing code.

[npiggin@gmail.com: add changelog and comment]

Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Reviewed-by: Fabiano Rosas <farosas@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20210602040441.3984352-1-npiggin@gmail.com
Signed-off-by: Sasha Levin <sashal@kernel.org>
arch/powerpc/include/asm/cputhreads.h
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_hv_builtin.c
arch/powerpc/kvm/book3s_hv_rm_mmu.c

index 98c8bd155bf9d28aaded8cb8310aed785181adcb..b167186aaee4af09e4743142c71cd752892d3d1e 100644 (file)
@@ -98,6 +98,36 @@ static inline int cpu_last_thread_sibling(int cpu)
        return cpu | (threads_per_core - 1);
 }
 
+/*
+ * tlb_thread_siblings are siblings which share a TLB. This is not
+ * architected, is not something a hypervisor could emulate and a future
+ * CPU may change behaviour even in compat mode, so this should only be
+ * used on PowerNV, and only with care.
+ */
+static inline int cpu_first_tlb_thread_sibling(int cpu)
+{
+       if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
+               return cpu & ~0x6;      /* Big Core */
+       else
+               return cpu_first_thread_sibling(cpu);
+}
+
+static inline int cpu_last_tlb_thread_sibling(int cpu)
+{
+       if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
+               return cpu | 0x6;       /* Big Core */
+       else
+               return cpu_last_thread_sibling(cpu);
+}
+
+static inline int cpu_tlb_thread_sibling_step(void)
+{
+       if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
+               return 2;               /* Big Core */
+       else
+               return 1;
+}
+
 static inline u32 get_tensr(void)
 {
 #ifdef CONFIG_BOOKE
index 965b702208d850881182374a176795563ef5b1ab..2325b7a6e95f89cd96f7a94a3c09513f210979b7 100644 (file)
@@ -2578,7 +2578,7 @@ static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
        cpumask_t *cpu_in_guest;
        int i;
 
-       cpu = cpu_first_thread_sibling(cpu);
+       cpu = cpu_first_tlb_thread_sibling(cpu);
        if (nested) {
                cpumask_set_cpu(cpu, &nested->need_tlb_flush);
                cpu_in_guest = &nested->cpu_in_guest;
@@ -2592,9 +2592,10 @@ static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
         * the other side is the first smp_mb() in kvmppc_run_core().
         */
        smp_mb();
-       for (i = 0; i < threads_per_core; ++i)
-               if (cpumask_test_cpu(cpu + i, cpu_in_guest))
-                       smp_call_function_single(cpu + i, do_nothing, NULL, 1);
+       for (i = cpu; i <= cpu_last_tlb_thread_sibling(cpu);
+                                       i += cpu_tlb_thread_sibling_step())
+               if (cpumask_test_cpu(i, cpu_in_guest))
+                       smp_call_function_single(i, do_nothing, NULL, 1);
 }
 
 static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu)
@@ -2625,8 +2626,8 @@ static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu)
         */
        if (prev_cpu != pcpu) {
                if (prev_cpu >= 0 &&
-                   cpu_first_thread_sibling(prev_cpu) !=
-                   cpu_first_thread_sibling(pcpu))
+                   cpu_first_tlb_thread_sibling(prev_cpu) !=
+                   cpu_first_tlb_thread_sibling(pcpu))
                        radix_flush_cpu(kvm, prev_cpu, vcpu);
                if (nested)
                        nested->prev_cpu[vcpu->arch.nested_vcpu_id] = pcpu;
index 8f58dd20b362a9cb4727b8b7bb9eb97621ca189f..4621905bdd9ea05bbe0cfaf578958d1e3f6fb7a1 100644 (file)
@@ -893,7 +893,7 @@ void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu,
         * Thus we make all 4 threads use the same bit.
         */
        if (cpu_has_feature(CPU_FTR_ARCH_300))
-               pcpu = cpu_first_thread_sibling(pcpu);
+               pcpu = cpu_first_tlb_thread_sibling(pcpu);
 
        if (nested)
                need_tlb_flush = &nested->need_tlb_flush;
index 88da2764c1bb9c63d87a795bedaa5caf08593e75..3ddc83d2e8493eca06ec0ae0bbcb692fb49e5db1 100644 (file)
@@ -67,7 +67,7 @@ static int global_invalidates(struct kvm *kvm)
                 * so use the bit for the first thread to represent the core.
                 */
                if (cpu_has_feature(CPU_FTR_ARCH_300))
-                       cpu = cpu_first_thread_sibling(cpu);
+                       cpu = cpu_first_tlb_thread_sibling(cpu);
                cpumask_clear_cpu(cpu, &kvm->arch.need_tlb_flush);
        }