KVM: MMU: clenaup locking in mmu_free_roots()
authorGleb Natapov <gleb@redhat.com>
Thu, 16 May 2013 08:55:51 +0000 (11:55 +0300)
committerGleb Natapov <gleb@redhat.com>
Thu, 16 May 2013 08:55:51 +0000 (11:55 +0300)
Do locking around each case separately instead of having one lock and two
unlocks. Move root_hpa assignment out of the lock.

Signed-off-by: Gleb Natapov <gleb@redhat.com>
arch/x86/kvm/mmu.c

index 40d7b2d..f8ca2f3 100644 (file)
@@ -2869,22 +2869,25 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
 
        if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
                return;
-       spin_lock(&vcpu->kvm->mmu_lock);
+
        if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL &&
            (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL ||
             vcpu->arch.mmu.direct_map)) {
                hpa_t root = vcpu->arch.mmu.root_hpa;
 
+               spin_lock(&vcpu->kvm->mmu_lock);
                sp = page_header(root);
                --sp->root_count;
                if (!sp->root_count && sp->role.invalid) {
                        kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
                        kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
                }
-               vcpu->arch.mmu.root_hpa = INVALID_PAGE;
                spin_unlock(&vcpu->kvm->mmu_lock);
+               vcpu->arch.mmu.root_hpa = INVALID_PAGE;
                return;
        }
+
+       spin_lock(&vcpu->kvm->mmu_lock);
        for (i = 0; i < 4; ++i) {
                hpa_t root = vcpu->arch.mmu.pae_root[i];