KVM: async_pf: Cleanup kvm_setup_async_pf()
[platform/kernel/linux-starfive.git] / virt / kvm / async_pf.c
index 10b533f..ba08008 100644 (file)
@@ -51,6 +51,7 @@ static void async_pf_execute(struct work_struct *work)
        unsigned long addr = apf->addr;
        gpa_t cr2_or_gpa = apf->cr2_or_gpa;
        int locked = 1;
+       bool first;
 
        might_sleep();
 
@@ -69,10 +70,14 @@ static void async_pf_execute(struct work_struct *work)
                kvm_arch_async_page_present(vcpu, apf);
 
        spin_lock(&vcpu->async_pf.lock);
+       first = list_empty(&vcpu->async_pf.done);
        list_add_tail(&apf->link, &vcpu->async_pf.done);
        apf->vcpu = NULL;
        spin_unlock(&vcpu->async_pf.lock);
 
+       if (!IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC) && first)
+               kvm_arch_async_page_present_queued(vcpu);
+
        /*
         * apf may be freed by kvm_check_async_pf_completion() after
         * this point
@@ -134,7 +139,7 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
        struct kvm_async_pf *work;
 
        while (!list_empty_careful(&vcpu->async_pf.done) &&
-             kvm_arch_can_inject_async_page_present(vcpu)) {
+             kvm_arch_can_dequeue_async_page_present(vcpu)) {
                spin_lock(&vcpu->async_pf.lock);
                work = list_first_entry(&vcpu->async_pf.done, typeof(*work),
                                              link);
@@ -159,7 +164,9 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
        if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU)
                return 0;
 
-       /* setup delayed work */
+       /* Arch specific code should not do async PF in this case */
+       if (unlikely(kvm_is_error_hva(hva)))
+               return 0;
 
        /*
         * do alloc nowait since if we are going to sleep anyway we
@@ -178,29 +185,21 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
        mmget(work->mm);
        kvm_get_kvm(work->vcpu->kvm);
 
-       /* this can't really happen otherwise gfn_to_pfn_async
-          would succeed */
-       if (unlikely(kvm_is_error_hva(work->addr)))
-               goto retry_sync;
-
        INIT_WORK(&work->work, async_pf_execute);
-       if (!schedule_work(&work->work))
-               goto retry_sync;
 
        list_add_tail(&work->queue, &vcpu->async_pf.queue);
        vcpu->async_pf.queued++;
        kvm_arch_async_page_not_present(vcpu, work);
+
+       schedule_work(&work->work);
+
        return 1;
-retry_sync:
-       kvm_put_kvm(work->vcpu->kvm);
-       mmput(work->mm);
-       kmem_cache_free(async_pf_cache, work);
-       return 0;
 }
 
 int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)
 {
        struct kvm_async_pf *work;
+       bool first;
 
        if (!list_empty_careful(&vcpu->async_pf.done))
                return 0;
@@ -213,9 +212,13 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)
        INIT_LIST_HEAD(&work->queue); /* for list_del to work */
 
        spin_lock(&vcpu->async_pf.lock);
+       first = list_empty(&vcpu->async_pf.done);
        list_add_tail(&work->link, &vcpu->async_pf.done);
        spin_unlock(&vcpu->async_pf.lock);
 
+       if (!IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC) && first)
+               kvm_arch_async_page_present_queued(vcpu);
+
        vcpu->async_pf.queued++;
        return 0;
 }