KVM: PPC: Book3S HV: Don't take kvm->lock around kvm_for_each_vcpu
authorPaul Mackerras <paulus@ozlabs.org>
Thu, 23 May 2019 06:36:32 +0000 (16:36 +1000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 22 Jun 2019 06:17:24 +0000 (08:17 +0200)
[ Upstream commit 5a3f49364c3ffa1107bd88f8292406e98c5d206c ]

Currently the HV KVM code takes the kvm->lock around calls to
kvm_for_each_vcpu() and kvm_get_vcpu_by_id() (which can call
kvm_for_each_vcpu() internally).  However, that leads to a lock
order inversion problem, because these are called in contexts where
the vcpu mutex is held, but the vcpu mutexes nest within kvm->lock
according to Documentation/virtual/kvm/locking.txt.  Hence there
is a possibility of deadlock.

To fix this, we simply don't take the kvm->lock mutex around these
calls.  This is safe because the implementations of kvm_for_each_vcpu()
and kvm_get_vcpu_by_id() have been designed to be able to be called
locklessly.

Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
Reviewed-by: Cédric Le Goater <clg@kaod.org>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
arch/powerpc/kvm/book3s_hv.c

index 0a2b247..e840f94 100644 (file)
@@ -374,12 +374,7 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
 
 static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
 {
-       struct kvm_vcpu *ret;
-
-       mutex_lock(&kvm->lock);
-       ret = kvm_get_vcpu_by_id(kvm, id);
-       mutex_unlock(&kvm->lock);
-       return ret;
+       return kvm_get_vcpu_by_id(kvm, id);
 }
 
 static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
@@ -1098,7 +1093,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
        struct kvmppc_vcore *vc = vcpu->arch.vcore;
        u64 mask;
 
-       mutex_lock(&kvm->lock);
        spin_lock(&vc->lock);
        /*
         * If ILE (interrupt little-endian) has changed, update the
@@ -1132,7 +1126,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
                mask &= 0xFFFFFFFF;
        vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
        spin_unlock(&vc->lock);
-       mutex_unlock(&kvm->lock);
 }
 
 static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,