kvm: nVMX: Add support for fast unprotection of nested guest page tables
authorPaolo Bonzini <pbonzini@redhat.com>
Mon, 28 Nov 2016 13:39:58 +0000 (14:39 +0100)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 10 Aug 2017 14:44:04 +0000 (16:44 +0200)
This is the same as commit 147277540bbc ("kvm: svm: Add support for
additional SVM NPF error codes", 2016-11-23), but for Intel processors.
In this case, the exit qualification field's bit 8 says whether the
EPT violation occurred while translating the guest's final physical
address or rather while translating the guest page tables.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/mmu.c
arch/x86/kvm/vmx.c

index 1679aab..9e4862e 100644 (file)
@@ -204,7 +204,6 @@ enum {
 #define PFERR_GUEST_PAGE_MASK (1ULL << PFERR_GUEST_PAGE_BIT)
 
 #define PFERR_NESTED_GUEST_PAGE (PFERR_GUEST_PAGE_MASK |       \
-                                PFERR_USER_MASK |              \
                                 PFERR_WRITE_MASK |             \
                                 PFERR_PRESENT_MASK)
 
index 454d81d..7ee21c0 100644 (file)
@@ -4836,12 +4836,9 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
         * This can occur when using nested virtualization with nested
         * paging in both guests. If true, we simply unprotect the page
         * and resume the guest.
-        *
-        * Note: AMD only (since it supports the PFERR_GUEST_PAGE_MASK used
-        *       in PFERR_NEXT_GUEST_PAGE)
         */
        if (vcpu->arch.mmu.direct_map &&
-               error_code == PFERR_NESTED_GUEST_PAGE) {
+           (error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) {
                kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2));
                return 1;
        }
index c7cf5b1..ed1074e 100644 (file)
@@ -6358,7 +6358,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
 {
        unsigned long exit_qualification;
        gpa_t gpa;
-       u32 error_code;
+       u64 error_code;
 
        exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
 
@@ -6390,6 +6390,9 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
                        EPT_VIOLATION_EXECUTABLE))
                      ? PFERR_PRESENT_MASK : 0;
 
+       error_code |= (exit_qualification & 0x100) != 0 ?
+              PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK;
+
        vcpu->arch.gpa_available = true;
        vcpu->arch.exit_qualification = exit_qualification;