KVM: x86: Update __get_sregs() / __set_sregs() to support SEV-ES
authorTom Lendacky <thomas.lendacky@amd.com>
Thu, 10 Dec 2020 17:09:59 +0000 (11:09 -0600)
committerPaolo Bonzini <pbonzini@redhat.com>
Tue, 15 Dec 2020 10:20:54 +0000 (05:20 -0500)
Since many of the registers used by the SEV-ES are encrypted and cannot
be read or written, adjust the __get_sregs() / __set_sregs() to take into
account whether the VMSA/guest state is encrypted.

For __get_sregs(), return the actual value that is in use by the guest
for all registers being tracked using the write trap support.

For __set_sregs(), skip setting of all guest registers values.

Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Message-Id: <23051868db76400a9b07a2020525483a1e62dbcf.1607620209.git.thomas.lendacky@amd.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/x86.c

index c368623..86947e7 100644 (file)
@@ -9439,6 +9439,9 @@ static void __get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
 {
        struct desc_ptr dt;
 
+       if (vcpu->arch.guest_state_protected)
+               goto skip_protected_regs;
+
        kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
        kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
        kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
@@ -9456,9 +9459,11 @@ static void __get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
        sregs->gdt.limit = dt.size;
        sregs->gdt.base = dt.address;
 
-       sregs->cr0 = kvm_read_cr0(vcpu);
        sregs->cr2 = vcpu->arch.cr2;
        sregs->cr3 = kvm_read_cr3(vcpu);
+
+skip_protected_regs:
+       sregs->cr0 = kvm_read_cr0(vcpu);
        sregs->cr4 = kvm_read_cr4(vcpu);
        sregs->cr8 = kvm_get_cr8(vcpu);
        sregs->efer = vcpu->arch.efer;
@@ -9595,6 +9600,9 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
        if (kvm_set_apic_base(vcpu, &apic_base_msr))
                goto out;
 
+       if (vcpu->arch.guest_state_protected)
+               goto skip_protected_regs;
+
        dt.size = sregs->idt.limit;
        dt.address = sregs->idt.base;
        kvm_x86_ops.set_idt(vcpu, &dt);
@@ -9629,14 +9637,6 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
        if (mmu_reset_needed)
                kvm_mmu_reset_context(vcpu);
 
-       max_bits = KVM_NR_INTERRUPTS;
-       pending_vec = find_first_bit(
-               (const unsigned long *)sregs->interrupt_bitmap, max_bits);
-       if (pending_vec < max_bits) {
-               kvm_queue_interrupt(vcpu, pending_vec, false);
-               pr_debug("Set back pending irq %d\n", pending_vec);
-       }
-
        kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
        kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
        kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
@@ -9655,6 +9655,15 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
            !is_protmode(vcpu))
                vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
 
+skip_protected_regs:
+       max_bits = KVM_NR_INTERRUPTS;
+       pending_vec = find_first_bit(
+               (const unsigned long *)sregs->interrupt_bitmap, max_bits);
+       if (pending_vec < max_bits) {
+               kvm_queue_interrupt(vcpu, pending_vec, false);
+               pr_debug("Set back pending irq %d\n", pending_vec);
+       }
+
        kvm_make_request(KVM_REQ_EVENT, vcpu);
 
        ret = 0;