KVM: x86: advertise KVM_CAP_X86_SMM
authorPaolo Bonzini <pbonzini@redhat.com>
Wed, 1 Apr 2015 12:25:33 +0000 (14:25 +0200)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 5 Jun 2015 15:26:38 +0000 (17:26 +0200)
... and we're done. :)

Because SMBASE is usually relocated above 1M on modern chipsets, and
SMM handlers might indeed rely on 4G segment limits, we only expose it
if KVM is able to run the guest in big real mode.  This includes any
of VMX+emulate_invalid_guest_state, VMX+unrestricted_guest, or SVM.

Reviewed-by: Radim Krčmář <rkrcmar@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c

index 4700668..8ca32cf 100644 (file)
@@ -709,6 +709,7 @@ struct kvm_x86_ops {
        int (*hardware_setup)(void);               /* __init */
        void (*hardware_unsetup)(void);            /* __exit */
        bool (*cpu_has_accelerated_tpr)(void);
+       bool (*cpu_has_high_real_mode_segbase)(void);
        void (*cpuid_update)(struct kvm_vcpu *vcpu);
 
        /* Create, but do not attach this VCPU */
index 6ff1faf..6807531 100644 (file)
@@ -4080,6 +4080,11 @@ static bool svm_cpu_has_accelerated_tpr(void)
        return false;
 }
 
+static bool svm_has_high_real_mode_segbase(void)
+{
+       return true;
+}
+
 static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
 {
        return 0;
@@ -4353,6 +4358,7 @@ static struct kvm_x86_ops svm_x86_ops = {
        .hardware_enable = svm_hardware_enable,
        .hardware_disable = svm_hardware_disable,
        .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
+       .cpu_has_high_real_mode_segbase = svm_has_high_real_mode_segbase,
 
        .vcpu_create = svm_create_vcpu,
        .vcpu_free = svm_free_vcpu,
index 862fa8f..06a186b 100644 (file)
@@ -8139,6 +8139,11 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
                local_irq_enable();
 }
 
+static bool vmx_has_high_real_mode_segbase(void)
+{
+       return enable_unrestricted_guest || emulate_invalid_guest_state;
+}
+
 static bool vmx_mpx_supported(void)
 {
        return (vmcs_config.vmexit_ctrl & VM_EXIT_CLEAR_BNDCFGS) &&
@@ -10296,6 +10301,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
        .hardware_enable = hardware_enable,
        .hardware_disable = hardware_disable,
        .cpu_has_accelerated_tpr = report_flexpriority,
+       .cpu_has_high_real_mode_segbase = vmx_has_high_real_mode_segbase,
 
        .vcpu_create = vmx_create_vcpu,
        .vcpu_free = vmx_free_vcpu,
index 7489871..43f0df7 100644 (file)
@@ -2900,6 +2900,17 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
 #endif
                r = 1;
                break;
+       case KVM_CAP_X86_SMM:
+               /* SMBASE is usually relocated above 1M on modern chipsets,
+                * and SMM handlers might indeed rely on 4G segment limits,
+                * so do not report SMM to be available if real mode is
+                * emulated via vm86 mode.  Still, do not go to great lengths
+                * to avoid userspace's usage of the feature, because it is a
+                * fringe case that is not enabled except via specific settings
+                * of the module parameters.
+                */
+               r = kvm_x86_ops->cpu_has_high_real_mode_segbase();
+               break;
        case KVM_CAP_COALESCED_MMIO:
                r = KVM_COALESCED_MMIO_PAGE_OFFSET;
                break;
@@ -4299,6 +4310,10 @@ static void kvm_init_msr_list(void)
 
        for (i = j = 0; i < ARRAY_SIZE(emulated_msrs); i++) {
                switch (emulated_msrs[i]) {
+               case MSR_IA32_SMBASE:
+                       if (!kvm_x86_ops->cpu_has_high_real_mode_segbase())
+                               continue;
+                       break;
                default:
                        break;
                }