KVM: async_pf: Cleanup kvm_setup_async_pf()
authorVitaly Kuznetsov <vkuznets@redhat.com>
Wed, 10 Jun 2020 17:55:31 +0000 (19:55 +0200)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 11 Jun 2020 16:35:19 +0000 (12:35 -0400)
schedule_work() returns 'false' only when the work is already on the queue
and this can't happen as kvm_setup_async_pf() always allocates a new one.
Also, to avoid potential race, it makes sense to to schedule_work() at the
very end after we've added it to the queue.

While on it, do some minor cleanup. gfn_to_pfn_async() mentioned in a
comment does not currently exist and, moreover, we can check
kvm_is_error_hva() at the very beginning, before we try to allocate work so
'retry_sync' label can go away completely.

Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Message-Id: <20200610175532.779793-1-vkuznets@redhat.com>
Reviewed-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
virt/kvm/async_pf.c

index f1e07fa..ba08008 100644 (file)
@@ -164,7 +164,9 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
        if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU)
                return 0;
 
-       /* setup delayed work */
+       /* Arch specific code should not do async PF in this case */
+       if (unlikely(kvm_is_error_hva(hva)))
+               return 0;
 
        /*
         * do alloc nowait since if we are going to sleep anyway we
@@ -183,24 +185,15 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
        mmget(work->mm);
        kvm_get_kvm(work->vcpu->kvm);
 
-       /* this can't really happen otherwise gfn_to_pfn_async
-          would succeed */
-       if (unlikely(kvm_is_error_hva(work->addr)))
-               goto retry_sync;
-
        INIT_WORK(&work->work, async_pf_execute);
-       if (!schedule_work(&work->work))
-               goto retry_sync;
 
        list_add_tail(&work->queue, &vcpu->async_pf.queue);
        vcpu->async_pf.queued++;
        kvm_arch_async_page_not_present(vcpu, work);
+
+       schedule_work(&work->work);
+
        return 1;
-retry_sync:
-       kvm_put_kvm(work->vcpu->kvm);
-       mmput(work->mm);
-       kmem_cache_free(async_pf_cache, work);
-       return 0;
 }
 
 int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)