KVM: PPC: Book3S HV: Hold kvm->mmu_lock across updating nested pte rc bits
authorSuraj Jitindar Singh <sjitindarsingh@gmail.com>
Fri, 21 Dec 2018 03:28:39 +0000 (14:28 +1100)
committerPaul Mackerras <paulus@ozlabs.org>
Fri, 21 Dec 2018 03:37:43 +0000 (14:37 +1100)
We already hold the kvm->mmu_lock spin lock across updating the rc bits
in the pte for the L1 guest. Continue to hold the lock across updating
the rc bits in the pte for the nested guest as well to prevent
invalidations from occurring.

Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
arch/powerpc/kvm/book3s_hv_nested.c

index 9dce4b9..606c392 100644 (file)
@@ -1150,7 +1150,7 @@ static long kvmhv_handle_nested_set_rc(struct kvm_vcpu *vcpu,
        struct kvm *kvm = vcpu->kvm;
        bool writing = !!(dsisr & DSISR_ISSTORE);
        u64 pgflags;
-       bool ret;
+       long ret;
 
        /* Are the rc bits set in the L1 partition scoped pte? */
        pgflags = _PAGE_ACCESSED;
@@ -1163,16 +1163,22 @@ static long kvmhv_handle_nested_set_rc(struct kvm_vcpu *vcpu,
        /* Set the rc bit in the pte of our (L0) pgtable for the L1 guest */
        ret = kvmppc_hv_handle_set_rc(kvm, kvm->arch.pgtable, writing,
                                     gpte.raddr, kvm->arch.lpid);
-       spin_unlock(&kvm->mmu_lock);
-       if (!ret)
-               return -EINVAL;
+       if (!ret) {
+               ret = -EINVAL;
+               goto out_unlock;
+       }
 
        /* Set the rc bit in the pte of the shadow_pgtable for the nest guest */
        ret = kvmppc_hv_handle_set_rc(kvm, gp->shadow_pgtable, writing, n_gpa,
                                      gp->shadow_lpid);
        if (!ret)
-               return -EINVAL;
-       return 0;
+               ret = -EINVAL;
+       else
+               ret = 0;
+
+out_unlock:
+       spin_unlock(&kvm->mmu_lock);
+       return ret;
 }
 
 static inline int kvmppc_radix_level_to_shift(int level)