KVM: x86: Sync SPTEs when injecting page/EPT fault into L1
authorJunaid Shahid <junaids@google.com>
Fri, 20 Mar 2020 21:28:03 +0000 (14:28 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 20 Apr 2020 21:26:05 +0000 (17:26 -0400)
When injecting a page fault or EPT violation/misconfiguration, KVM is
not syncing any shadow PTEs associated with the faulting address,
including those in previous MMUs that are associated with L1's current
EPTP (in a nested EPT scenario), nor is it flushing any hardware TLB
entries.  All this is done by kvm_mmu_invalidate_gva.

Page faults that are either !PRESENT or RSVD are exempt from the flushing,
as the CPU is not allowed to cache such translations.

Signed-off-by: Junaid Shahid <junaids@google.com>
Co-developed-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200320212833.3507-8-sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c

index 8f3ff07..d0857bf 100644 (file)
@@ -4560,7 +4560,7 @@ static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
                return 1;
 
        if (kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e)) {
-               kvm_inject_page_fault(vcpu, &e);
+               kvm_inject_emulated_page_fault(vcpu, &e);
                return 1;
        }
 
@@ -4869,7 +4869,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
                        return 1;
                /* _system ok, nested_vmx_check_permission has verified cpl=0 */
                if (kvm_write_guest_virt_system(vcpu, gva, &value, len, &e)) {
-                       kvm_inject_page_fault(vcpu, &e);
+                       kvm_inject_emulated_page_fault(vcpu, &e);
                        return 1;
                }
        }
@@ -4943,7 +4943,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
                                        instr_info, false, len, &gva))
                        return 1;
                if (kvm_read_guest_virt(vcpu, gva, &value, len, &e)) {
-                       kvm_inject_page_fault(vcpu, &e);
+                       kvm_inject_emulated_page_fault(vcpu, &e);
                        return 1;
                }
        }
@@ -5108,7 +5108,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
        /* *_system ok, nested_vmx_check_permission has verified cpl=0 */
        if (kvm_write_guest_virt_system(vcpu, gva, (void *)&current_vmptr,
                                        sizeof(gpa_t), &e)) {
-               kvm_inject_page_fault(vcpu, &e);
+               kvm_inject_emulated_page_fault(vcpu, &e);
                return 1;
        }
        return nested_vmx_succeed(vcpu);
@@ -5152,7 +5152,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
                        vmx_instruction_info, false, sizeof(operand), &gva))
                return 1;
        if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
-               kvm_inject_page_fault(vcpu, &e);
+               kvm_inject_emulated_page_fault(vcpu, &e);
                return 1;
        }
 
@@ -5220,7 +5220,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
                        vmx_instruction_info, false, sizeof(operand), &gva))
                return 1;
        if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
-               kvm_inject_page_fault(vcpu, &e);
+               kvm_inject_emulated_page_fault(vcpu, &e);
                return 1;
        }
        if (operand.vpid >> 16)
index 0133009..e671bcf 100644 (file)
@@ -5393,7 +5393,7 @@ static int handle_invpcid(struct kvm_vcpu *vcpu)
                return 1;
 
        if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
-               kvm_inject_page_fault(vcpu, &e);
+               kvm_inject_emulated_page_fault(vcpu, &e);
                return 1;
        }
 
index 2ab821f..3984574 100644 (file)
@@ -619,8 +619,17 @@ bool kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
 
        fault_mmu = fault->nested_page_fault ? vcpu->arch.mmu :
                                               vcpu->arch.walk_mmu;
-       fault_mmu->inject_page_fault(vcpu, fault);
 
+       /*
+        * Invalidate the TLB entry for the faulting address, if it exists,
+        * else the access will fault indefinitely (and to emulate hardware).
+        */
+       if ((fault->error_code & PFERR_PRESENT_MASK) &&
+           !(fault->error_code & PFERR_RSVD_MASK))
+               kvm_mmu_invalidate_gva(vcpu, fault_mmu, fault->address,
+                                      fault_mmu->root_hpa);
+
+       fault_mmu->inject_page_fault(vcpu, fault);
        return fault->nested_page_fault;
 }
 EXPORT_SYMBOL_GPL(kvm_inject_emulated_page_fault);