KVM: x86: stubs for SMM support
authorPaolo Bonzini <pbonzini@redhat.com>
Thu, 7 May 2015 09:36:11 +0000 (11:36 +0200)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 4 Jun 2015 14:01:45 +0000 (16:01 +0200)
This patch adds the interface between x86.c and the emulator: the
SMBASE register, a new emulator flag, the RSM instruction.  It also
adds a new request bit that will be used by the KVM_SMI ioctl.

Reviewed-by: Radim Krčmář <rkrcmar@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_emulate.h
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/emulate.c
arch/x86/kvm/lapic.c
arch/x86/kvm/svm.c
arch/x86/kvm/x86.c
include/linux/kvm_host.h

index 7410879..e16466e 100644 (file)
@@ -193,6 +193,8 @@ struct x86_emulate_ops {
        int (*cpl)(struct x86_emulate_ctxt *ctxt);
        int (*get_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong *dest);
        int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value);
+       u64 (*get_smbase)(struct x86_emulate_ctxt *ctxt);
+       void (*set_smbase)(struct x86_emulate_ctxt *ctxt, u64 smbase);
        int (*set_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data);
        int (*get_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata);
        int (*check_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc);
@@ -264,6 +266,8 @@ enum x86emul_mode {
 
 /* These match some of the HF_* flags defined in kvm_host.h  */
 #define X86EMUL_GUEST_MASK           (1 << 5) /* VCPU is in guest-mode */
+#define X86EMUL_SMM_MASK             (1 << 6)
+#define X86EMUL_SMM_INSIDE_NMI_MASK  (1 << 7)
 
 struct x86_emulate_ctxt {
        const struct x86_emulate_ops *ops;
index d52d7ae..12a7318 100644 (file)
@@ -368,6 +368,7 @@ struct kvm_vcpu_arch {
        int32_t apic_arb_prio;
        int mp_state;
        u64 ia32_misc_enable_msr;
+       u64 smbase;
        bool tpr_access_reporting;
        u64 ia32_xss;
 
index a1c6c25..e763a9b 100644 (file)
@@ -2259,6 +2259,14 @@ static int em_lseg(struct x86_emulate_ctxt *ctxt)
        return rc;
 }
 
+static int em_rsm(struct x86_emulate_ctxt *ctxt)
+{
+       if ((ctxt->emul_flags & X86EMUL_SMM_MASK) == 0)
+               return emulate_ud(ctxt);
+
+       return X86EMUL_UNHANDLEABLE;
+}
+
 static void
 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
                        struct desc_struct *cs, struct desc_struct *ss)
@@ -4197,7 +4205,7 @@ static const struct opcode twobyte_table[256] = {
        F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
        /* 0xA8 - 0xAF */
        I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
-       DI(ImplicitOps, rsm),
+       II(No64 | EmulateOnUD | ImplicitOps, em_rsm, rsm),
        F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
        F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
        F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
index c789e00..b8e47e2 100644 (file)
@@ -808,7 +808,9 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
                break;
 
        case APIC_DM_SMI:
-               apic_debug("Ignoring guest SMI\n");
+               result = 1;
+               kvm_make_request(KVM_REQ_SMI, vcpu);
+               kvm_vcpu_kick(vcpu);
                break;
 
        case APIC_DM_NMI:
index a08df41..c48748c 100644 (file)
@@ -3394,6 +3394,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
        [SVM_EXIT_MWAIT]                        = mwait_interception,
        [SVM_EXIT_XSETBV]                       = xsetbv_interception,
        [SVM_EXIT_NPF]                          = pf_interception,
+       [SVM_EXIT_RSM]                          = emulate_on_interception,
 };
 
 static void dump_vmcb(struct kvm_vcpu *vcpu)
index aa46ac1..ab977e7 100644 (file)
@@ -954,6 +954,7 @@ static u32 emulated_msrs[] = {
        MSR_IA32_MISC_ENABLE,
        MSR_IA32_MCG_STATUS,
        MSR_IA32_MCG_CTL,
+       MSR_IA32_SMBASE,
 };
 
 static unsigned num_emulated_msrs;
@@ -2282,6 +2283,11 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        case MSR_IA32_MISC_ENABLE:
                vcpu->arch.ia32_misc_enable_msr = data;
                break;
+       case MSR_IA32_SMBASE:
+               if (!msr_info->host_initiated)
+                       return 1;
+               vcpu->arch.smbase = data;
+               break;
        case MSR_KVM_WALL_CLOCK_NEW:
        case MSR_KVM_WALL_CLOCK:
                vcpu->kvm->arch.wall_clock = data;
@@ -2679,6 +2685,11 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        case MSR_IA32_MISC_ENABLE:
                msr_info->data = vcpu->arch.ia32_misc_enable_msr;
                break;
+       case MSR_IA32_SMBASE:
+               if (!msr_info->host_initiated)
+                       return 1;
+               msr_info->data = vcpu->arch.smbase;
+               break;
        case MSR_IA32_PERF_STATUS:
                /* TSC increment by tick */
                msr_info->data = 1000ULL;
@@ -3103,6 +3114,8 @@ static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
 
 static int kvm_vcpu_ioctl_smi(struct kvm_vcpu *vcpu)
 {
+       kvm_make_request(KVM_REQ_SMI, vcpu);
+
        return 0;
 }
 
@@ -5129,6 +5142,20 @@ static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
        return kvm_set_msr(emul_to_vcpu(ctxt), &msr);
 }
 
+static u64 emulator_get_smbase(struct x86_emulate_ctxt *ctxt)
+{
+       struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+
+       return vcpu->arch.smbase;
+}
+
+static void emulator_set_smbase(struct x86_emulate_ctxt *ctxt, u64 smbase)
+{
+       struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+
+       vcpu->arch.smbase = smbase;
+}
+
 static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt,
                              u32 pmc)
 {
@@ -5214,6 +5241,8 @@ static const struct x86_emulate_ops emulate_ops = {
        .cpl                 = emulator_get_cpl,
        .get_dr              = emulator_get_dr,
        .set_dr              = emulator_set_dr,
+       .get_smbase          = emulator_get_smbase,
+       .set_smbase          = emulator_set_smbase,
        .set_msr             = emulator_set_msr,
        .get_msr             = emulator_get_msr,
        .check_pmc           = emulator_check_pmc,
@@ -5276,6 +5305,8 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
                     cs_db                              ? X86EMUL_MODE_PROT32 :
                                                          X86EMUL_MODE_PROT16;
        BUILD_BUG_ON(HF_GUEST_MASK != X86EMUL_GUEST_MASK);
+       BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK);
+       BUILD_BUG_ON(HF_SMM_INSIDE_NMI_MASK != X86EMUL_SMM_INSIDE_NMI_MASK);
        ctxt->emul_flags = vcpu->arch.hflags;
 
        init_decode_cache(ctxt);
@@ -5445,9 +5476,24 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
 static int complete_emulated_mmio(struct kvm_vcpu *vcpu);
 static int complete_emulated_pio(struct kvm_vcpu *vcpu);
 
-void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags)
+static void kvm_smm_changed(struct kvm_vcpu *vcpu)
 {
+       if (!(vcpu->arch.hflags & HF_SMM_MASK)) {
+               if (unlikely(vcpu->arch.smi_pending)) {
+                       kvm_make_request(KVM_REQ_SMI, vcpu);
+                       vcpu->arch.smi_pending = 0;
+               }
+       }
+}
+
+static void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags)
+{
+       unsigned changed = vcpu->arch.hflags ^ emul_flags;
+
        vcpu->arch.hflags = emul_flags;
+
+       if (changed & HF_SMM_MASK)
+               kvm_smm_changed(vcpu);
 }
 
 static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
@@ -6341,6 +6387,16 @@ static void process_nmi(struct kvm_vcpu *vcpu)
        kvm_make_request(KVM_REQ_EVENT, vcpu);
 }
 
+static void process_smi(struct kvm_vcpu *vcpu)
+{
+       if (is_smm(vcpu)) {
+               vcpu->arch.smi_pending = true;
+               return;
+       }
+
+       printk_once(KERN_DEBUG "Ignoring guest SMI\n");
+}
+
 static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
 {
        u64 eoi_exit_bitmap[4];
@@ -6449,6 +6505,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                }
                if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
                        record_steal_time(vcpu);
+               if (kvm_check_request(KVM_REQ_SMI, vcpu))
+                       process_smi(vcpu);
                if (kvm_check_request(KVM_REQ_NMI, vcpu))
                        process_nmi(vcpu);
                if (kvm_check_request(KVM_REQ_PMU, vcpu))
@@ -7363,8 +7421,10 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
        kvm_async_pf_hash_reset(vcpu);
        vcpu->arch.apf.halted = false;
 
-       if (!init_event)
+       if (!init_event) {
                kvm_pmu_reset(vcpu);
+               vcpu->arch.smbase = 0x30000;
+       }
 
        memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
        vcpu->arch.regs_avail = ~0;
index a8bcbc9..b019fee 100644 (file)
@@ -134,6 +134,7 @@ static inline bool is_error_page(struct page *page)
 #define KVM_REQ_ENABLE_IBS        23
 #define KVM_REQ_DISABLE_IBS       24
 #define KVM_REQ_APIC_PAGE_RELOAD  25
+#define KVM_REQ_SMI               26
 
 #define KVM_USERSPACE_IRQ_SOURCE_ID            0
 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID       1