KVM: do not release the error page
authorXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Fri, 3 Aug 2012 07:42:52 +0000 (15:42 +0800)
committerAvi Kivity <avi@redhat.com>
Mon, 6 Aug 2012 13:04:58 +0000 (16:04 +0300)
After commit a2766325cf9f9, the error page is replaced by the
error code, it need not be released anymore

[ The patch has been compiling tested for powerpc ]

Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
arch/powerpc/kvm/44x_tlb.c
arch/powerpc/kvm/book3s_pr.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
include/linux/kvm_host.h
virt/kvm/async_pf.c
virt/kvm/kvm_main.c

index 33aa715..5dd3ab4 100644 (file)
@@ -319,7 +319,6 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
        if (is_error_page(new_page)) {
                printk(KERN_ERR "Couldn't get guest page for gfn %llx!\n",
                        (unsigned long long)gfn);
-               kvm_release_page_clean(new_page);
                return;
        }
        hpaddr = page_to_phys(new_page);
index a1baec3..05c28f5 100644 (file)
@@ -242,10 +242,8 @@ static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
        int i;
 
        hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
-       if (is_error_page(hpage)) {
-               kvm_release_page_clean(hpage);
+       if (is_error_page(hpage))
                return;
-       }
 
        hpage_offset = pte->raddr & ~PAGE_MASK;
        hpage_offset &= ~0xFFFULL;
index 687d0c3..31be4a5 100644 (file)
@@ -2105,7 +2105,6 @@ static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page)
        return kmap(page);
 
 error:
-       kvm_release_page_clean(page);
        kvm_inject_gp(&svm->vcpu, 0);
 
        return NULL;
index d6e4cbc..cc8ad98 100644 (file)
@@ -596,10 +596,9 @@ static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
 static struct page *nested_get_page(struct kvm_vcpu *vcpu, gpa_t addr)
 {
        struct page *page = gfn_to_page(vcpu->kvm, addr >> PAGE_SHIFT);
-       if (is_error_page(page)) {
-               kvm_release_page_clean(page);
+       if (is_error_page(page))
                return NULL;
-       }
+
        return page;
 }
 
index ebf2109..7953a9e 100644 (file)
@@ -1639,10 +1639,9 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
                vcpu->arch.time_page =
                                gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
 
-               if (is_error_page(vcpu->arch.time_page)) {
-                       kvm_release_page_clean(vcpu->arch.time_page);
+               if (is_error_page(vcpu->arch.time_page))
                        vcpu->arch.time_page = NULL;
-               }
+
                break;
        }
        case MSR_KVM_ASYNC_PF_EN:
@@ -3945,10 +3944,8 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
                goto emul_write;
 
        page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
-       if (is_error_page(page)) {
-               kvm_release_page_clean(page);
+       if (is_error_page(page))
                goto emul_write;
-       }
 
        kaddr = kmap_atomic(page);
        kaddr += offset_in_page(gpa);
index ce7c329..07226f8 100644 (file)
@@ -457,7 +457,7 @@ pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
 pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
                      bool *writable);
 pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
-void kvm_release_pfn_dirty(pfn_t);
+void kvm_release_pfn_dirty(pfn_t pfn);
 void kvm_release_pfn_clean(pfn_t pfn);
 void kvm_set_pfn_dirty(pfn_t pfn);
 void kvm_set_pfn_accessed(pfn_t pfn);
index 56f5533..ea475cd 100644 (file)
@@ -111,7 +111,7 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
                        list_entry(vcpu->async_pf.done.next,
                                   typeof(*work), link);
                list_del(&work->link);
-               if (work->page)
+               if (!is_error_page(work->page))
                        kvm_release_page_clean(work->page);
                kmem_cache_free(async_pf_cache, work);
        }
@@ -138,7 +138,7 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
 
                list_del(&work->queue);
                vcpu->async_pf.queued--;
-               if (work->page)
+               if (!is_error_page(work->page))
                        kvm_release_page_clean(work->page);
                kmem_cache_free(async_pf_cache, work);
        }
index eafba99..a2e85af 100644 (file)
@@ -1186,8 +1186,9 @@ EXPORT_SYMBOL_GPL(gfn_to_page);
 
 void kvm_release_page_clean(struct page *page)
 {
-       if (!is_error_page(page))
-               kvm_release_pfn_clean(page_to_pfn(page));
+       WARN_ON(is_error_page(page));
+
+       kvm_release_pfn_clean(page_to_pfn(page));
 }
 EXPORT_SYMBOL_GPL(kvm_release_page_clean);