KVM: nVMX: Emulate L1 individual-address invvpid by L0 individual-address invvpid
authorLiran Alon <liran.alon@oracle.com>
Tue, 22 May 2018 14:16:15 +0000 (17:16 +0300)
committerRadim Krčmář <rkrcmar@redhat.com>
Thu, 24 May 2018 17:45:45 +0000 (19:45 +0200)
When vmcs12 uses VPID, all TLB entries populated by L2 are tagged with
vmx->nested.vpid02. Currently, INVVPID executed by L1 is emulated by L0
by using INVVPID single/global-context to flush all TLB entries
tagged with vmx->nested.vpid02 regardless of INVVPID type executed by
L1.

However, we can easily optimize the case of L1 INVVPID on an
individual-address. Just INVVPID given individual-address tagged with
vmx->nested.vpid02.

Reviewed-by: Liam Merwick <liam.merwick@oracle.com>
Signed-off-by: Liran Alon <liran.alon@oracle.com>
Reviewed-by: Jim Mattson <jmattson@google.com>
[Squashed with a preparatory patch that added the !operand.vpid line.]
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
arch/x86/kvm/vmx.c

index 98f05e2..e50beb7 100644 (file)
@@ -1572,6 +1572,11 @@ static inline bool cpu_has_vmx_invept_global(void)
        return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT;
 }
 
+static inline bool cpu_has_vmx_invvpid_individual_addr(void)
+{
+       return vmx_capability.vpid & VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT;
+}
+
 static inline bool cpu_has_vmx_invvpid_single(void)
 {
        return vmx_capability.vpid & VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT;
@@ -8513,12 +8518,19 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
 
        switch (type) {
        case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
-               if (is_noncanonical_address(operand.gla, vcpu)) {
+               if (!operand.vpid ||
+                   is_noncanonical_address(operand.gla, vcpu)) {
                        nested_vmx_failValid(vcpu,
                                VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
                        return kvm_skip_emulated_instruction(vcpu);
                }
-               /* fall through */
+               if (cpu_has_vmx_invvpid_individual_addr() &&
+                   vmx->nested.vpid02) {
+                       __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR,
+                               vmx->nested.vpid02, operand.gla);
+               } else
+                       __vmx_flush_tlb(vcpu, vmx->nested.vpid02, true);
+               break;
        case VMX_VPID_EXTENT_SINGLE_CONTEXT:
        case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL:
                if (!operand.vpid) {
@@ -8526,15 +8538,16 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
                                VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
                        return kvm_skip_emulated_instruction(vcpu);
                }
+               __vmx_flush_tlb(vcpu, vmx->nested.vpid02, true);
                break;
        case VMX_VPID_EXTENT_ALL_CONTEXT:
+               __vmx_flush_tlb(vcpu, vmx->nested.vpid02, true);
                break;
        default:
                WARN_ON_ONCE(1);
                return kvm_skip_emulated_instruction(vcpu);
        }
 
-       __vmx_flush_tlb(vcpu, vmx->nested.vpid02, true);
        nested_vmx_succeed(vcpu);
 
        return kvm_skip_emulated_instruction(vcpu);