From: Avi Kivity Date: Sat, 6 Jan 2007 00:36:39 +0000 (-0800) Subject: [PATCH] KVM: MMU: Load the pae pdptrs on cr3 change like the processor does X-Git-Tag: v2.6.20-rc4~54 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=1342d3536d6a12541ceb276da15f043db90716eb;p=platform%2Fkernel%2Flinux-exynos.git [PATCH] KVM: MMU: Load the pae pdptrs on cr3 change like the processor does In pae mode, a load of cr3 loads the four third-level page table entries in addition to cr3 itself. Signed-off-by: Avi Kivity Acked-by: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h index b65511e..8323f40 100644 --- a/drivers/kvm/kvm.h +++ b/drivers/kvm/kvm.h @@ -185,6 +185,7 @@ struct kvm_vcpu { unsigned long cr3; unsigned long cr4; unsigned long cr8; + u64 pdptrs[4]; /* pae */ u64 shadow_efer; u64 apic_base; int nmsrs; diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index f2a6b6f..4512d8c 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c @@ -298,14 +298,17 @@ static void inject_gp(struct kvm_vcpu *vcpu) kvm_arch_ops->inject_gp(vcpu, 0); } -static int pdptrs_have_reserved_bits_set(struct kvm_vcpu *vcpu, - unsigned long cr3) +/* + * Load the pae pdptrs. Return true is they are all valid. + */ +static int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3) { gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT; - unsigned offset = (cr3 & (PAGE_SIZE-1)) >> 5; + unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2; int i; u64 pdpte; u64 *pdpt; + int ret; struct kvm_memory_slot *memslot; spin_lock(&vcpu->kvm->lock); @@ -313,16 +316,23 @@ static int pdptrs_have_reserved_bits_set(struct kvm_vcpu *vcpu, /* FIXME: !memslot - emulate? 0xff? */ pdpt = kmap_atomic(gfn_to_page(memslot, pdpt_gfn), KM_USER0); + ret = 1; for (i = 0; i < 4; ++i) { pdpte = pdpt[offset + i]; - if ((pdpte & 1) && (pdpte & 0xfffffff0000001e6ull)) - break; + if ((pdpte & 1) && (pdpte & 0xfffffff0000001e6ull)) { + ret = 0; + goto out; + } } + for (i = 0; i < 4; ++i) + vcpu->pdptrs[i] = pdpt[offset + i]; + +out: kunmap_atomic(pdpt, KM_USER0); spin_unlock(&vcpu->kvm->lock); - return i != 4; + return ret; } void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) @@ -368,8 +378,7 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) } } else #endif - if (is_pae(vcpu) && - pdptrs_have_reserved_bits_set(vcpu, vcpu->cr3)) { + if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->cr3)) { printk(KERN_DEBUG "set_cr0: #GP, pdptrs " "reserved bits\n"); inject_gp(vcpu); @@ -411,7 +420,7 @@ void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) return; } } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & CR4_PAE_MASK) - && pdptrs_have_reserved_bits_set(vcpu, vcpu->cr3)) { + && !load_pdptrs(vcpu, vcpu->cr3)) { printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n"); inject_gp(vcpu); } @@ -443,7 +452,7 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) return; } if (is_paging(vcpu) && is_pae(vcpu) && - pdptrs_have_reserved_bits_set(vcpu, cr3)) { + !load_pdptrs(vcpu, cr3)) { printk(KERN_DEBUG "set_cr3: #GP, pdptrs " "reserved bits\n"); inject_gp(vcpu);