KVM: SVM: Add support for CR4 write traps for an SEV-ES guest
authorTom Lendacky <thomas.lendacky@amd.com>
Thu, 10 Dec 2020 17:09:57 +0000 (11:09 -0600)
committerPaolo Bonzini <pbonzini@redhat.com>
Tue, 15 Dec 2020 10:20:53 +0000 (05:20 -0500)
For SEV-ES guests, the interception of control register write access
is not recommended. Control register interception occurs prior to the
control register being modified and the hypervisor is unable to modify
the control register itself because the register is located in the
encrypted register state.

SEV-ES guests introduce new control register write traps. These traps
provide intercept support of a control register write after the control
register has been modified. The new control register value is provided in
the VMCB EXITINFO1 field, allowing the hypervisor to track the setting
of the guest control registers.

Add support to track the value of the guest CR4 register using the control
register write trap so that the hypervisor understands the guest operating
mode.

Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Message-Id: <c3880bf2db8693aa26f648528fbc6e967ab46e25.1607620209.git.thomas.lendacky@amd.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/include/uapi/asm/svm.h
arch/x86/kvm/svm/svm.c
arch/x86/kvm/x86.c

index f04d4c6f28f00f9ff43b5f97ff422f2dc23127a7..8ae099b48f0048cfd4abcbade28d1a655bde4f10 100644 (file)
@@ -1478,6 +1478,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
                    int reason, bool has_error_code, u32 error_code);
 
 void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0);
+void kvm_post_set_cr4(struct kvm_vcpu *vcpu, unsigned long old_cr4, unsigned long cr4);
 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
index 14b0d97b50e2a43e2e4e6ff1da76e3e5a4a0363e..c4152689ea936fcdd73a29d4fdedd070780e99de 100644 (file)
        { SVM_EXIT_XSETBV,      "xsetbv" }, \
        { SVM_EXIT_EFER_WRITE_TRAP,     "write_efer_trap" }, \
        { SVM_EXIT_CR0_WRITE_TRAP,      "write_cr0_trap" }, \
+       { SVM_EXIT_CR4_WRITE_TRAP,      "write_cr4_trap" }, \
        { SVM_EXIT_INVPCID,     "invpcid" }, \
        { SVM_EXIT_NPF,         "npf" }, \
        { SVM_EXIT_AVIC_INCOMPLETE_IPI,         "avic_incomplete_ipi" }, \
index ddcb7390bb0e74ce9851432d6706e45d82222a7c..4b3d935a1325ebb14a25d1860196a7d2ae736455 100644 (file)
@@ -2466,6 +2466,12 @@ static int cr_trap(struct vcpu_svm *svm)
 
                kvm_post_set_cr0(vcpu, old_value, new_value);
                break;
+       case 4:
+               old_value = kvm_read_cr4(vcpu);
+               svm_set_cr4(vcpu, new_value);
+
+               kvm_post_set_cr4(vcpu, old_value, new_value);
+               break;
        default:
                WARN(1, "unhandled CR%d write trap", cr);
                kvm_queue_exception(vcpu, UD_VECTOR);
@@ -3023,6 +3029,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
        [SVM_EXIT_RDPRU]                        = rdpru_interception,
        [SVM_EXIT_EFER_WRITE_TRAP]              = efer_trap,
        [SVM_EXIT_CR0_WRITE_TRAP]               = cr_trap,
+       [SVM_EXIT_CR4_WRITE_TRAP]               = cr_trap,
        [SVM_EXIT_INVPCID]                      = invpcid_interception,
        [SVM_EXIT_NPF]                          = npf_interception,
        [SVM_EXIT_RSM]                          = rsm_interception,
index efa70e30d23f742f9d8c119a2c17dada9ab1b2d8..c3686233508b831621006588bfd16399b4649b7f 100644 (file)
@@ -983,12 +983,22 @@ bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 }
 EXPORT_SYMBOL_GPL(kvm_is_valid_cr4);
 
+void kvm_post_set_cr4(struct kvm_vcpu *vcpu, unsigned long old_cr4, unsigned long cr4)
+{
+       unsigned long mmu_role_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
+                                     X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE;
+
+       if (((cr4 ^ old_cr4) & mmu_role_bits) ||
+           (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
+               kvm_mmu_reset_context(vcpu);
+}
+EXPORT_SYMBOL_GPL(kvm_post_set_cr4);
+
 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 {
        unsigned long old_cr4 = kvm_read_cr4(vcpu);
        unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
                                   X86_CR4_SMEP;
-       unsigned long mmu_role_bits = pdptr_bits | X86_CR4_SMAP | X86_CR4_PKE;
 
        if (!kvm_is_valid_cr4(vcpu, cr4))
                return 1;
@@ -1015,9 +1025,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 
        kvm_x86_ops.set_cr4(vcpu, cr4);
 
-       if (((cr4 ^ old_cr4) & mmu_role_bits) ||
-           (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
-               kvm_mmu_reset_context(vcpu);
+       kvm_post_set_cr4(vcpu, old_cr4, cr4);
 
        return 0;
 }