KVM: SVM: Check that the current CPU supports SVM in kvm_is_svm_supported()
[platform/kernel/linux-starfive.git] / arch / x86 / kvm / svm / svm.c
index 54089f9..9e44916 100644 (file)
@@ -39,6 +39,7 @@
 #include <asm/spec-ctrl.h>
 #include <asm/cpu_device_id.h>
 #include <asm/traps.h>
+#include <asm/reboot.h>
 #include <asm/fpu/api.h>
 
 #include <asm/virtext.h>
@@ -244,15 +245,6 @@ static u8 rsm_ins_bytes[] = "\x0f\xaa";
 
 static unsigned long iopm_base;
 
-struct kvm_ldttss_desc {
-       u16 limit0;
-       u16 base0;
-       unsigned base1:8, type:5, dpl:2, p:1;
-       unsigned limit1:4, zero0:3, g:1, base2:8;
-       u32 base3;
-       u32 zero1;
-} __attribute__((packed));
-
 DEFINE_PER_CPU(struct svm_cpu_data, svm_data);
 
 /*
@@ -526,14 +518,21 @@ static void svm_init_osvw(struct kvm_vcpu *vcpu)
                vcpu->arch.osvw.status |= 1;
 }
 
-static bool kvm_is_svm_supported(void)
+static bool __kvm_is_svm_supported(void)
 {
-       int cpu = raw_smp_processor_id();
-       const char *msg;
+       int cpu = smp_processor_id();
+       struct cpuinfo_x86 *c = &cpu_data(cpu);
+
        u64 vm_cr;
 
-       if (!cpu_has_svm(&msg)) {
-               pr_err("SVM not supported by CPU %d, %s\n", cpu, msg);
+       if (c->x86_vendor != X86_VENDOR_AMD &&
+           c->x86_vendor != X86_VENDOR_HYGON) {
+               pr_err("CPU %d isn't AMD or Hygon\n", cpu);
+               return false;
+       }
+
+       if (!cpu_has(c, X86_FEATURE_SVM)) {
+               pr_err("SVM not supported by CPU %d\n", cpu);
                return false;
        }
 
@@ -551,9 +550,20 @@ static bool kvm_is_svm_supported(void)
        return true;
 }
 
+static bool kvm_is_svm_supported(void)
+{
+       bool supported;
+
+       migrate_disable();
+       supported = __kvm_is_svm_supported();
+       migrate_enable();
+
+       return supported;
+}
+
 static int svm_check_processor_compat(void)
 {
-       if (!kvm_is_svm_supported())
+       if (!__kvm_is_svm_supported())
                return -EIO;
 
        return 0;
@@ -572,6 +582,11 @@ out:
        preempt_enable();
 }
 
+static void svm_emergency_disable(void)
+{
+       cpu_svm_disable();
+}
+
 static void svm_hardware_disable(void)
 {
        /* Make sure we clean up behind us */
@@ -588,7 +603,6 @@ static int svm_hardware_enable(void)
 
        struct svm_cpu_data *sd;
        uint64_t efer;
-       struct desc_struct *gdt;
        int me = raw_smp_processor_id();
 
        rdmsrl(MSR_EFER, efer);
@@ -601,9 +615,6 @@ static int svm_hardware_enable(void)
        sd->next_asid = sd->max_asid + 1;
        sd->min_asid = max_sev_asid + 1;
 
-       gdt = get_current_gdt_rw();
-       sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
-
        wrmsrl(MSR_EFER, efer | EFER_SVME);
 
        wrmsrl(MSR_VM_HSAVE_PA, sd->save_area_pa);
@@ -752,7 +763,7 @@ static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
 
        BUG_ON(offset == MSR_INVALID);
 
-       return !!test_bit(bit_write,  &tmp);
+       return test_bit(bit_write, &tmp);
 }
 
 static void set_msr_interception_bitmap(struct kvm_vcpu *vcpu, u32 *msrpm,
@@ -2939,9 +2950,10 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
 
                break;
        case MSR_IA32_CR_PAT:
-               if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
-                       return 1;
-               vcpu->arch.pat = data;
+               ret = kvm_set_msr_common(vcpu, msr);
+               if (ret)
+                       break;
+
                svm->vmcb01.ptr->save.g_pat = data;
                if (is_guest_mode(vcpu))
                        nested_vmcb02_compute_g_pat(svm);
@@ -3418,8 +3430,6 @@ static int svm_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
        struct kvm_run *kvm_run = vcpu->run;
        u32 exit_code = svm->vmcb->control.exit_code;
 
-       trace_kvm_exit(vcpu, KVM_ISA_SVM);
-
        /* SEV-ES guests must use the CR write traps to track CR registers. */
        if (!sev_es_guest(vcpu->kvm)) {
                if (!svm_is_intercept(svm, INTERCEPT_CR0_WRITE))
@@ -3457,14 +3467,6 @@ static int svm_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
        return svm_invoke_exit_handler(vcpu, exit_code);
 }
 
-static void reload_tss(struct kvm_vcpu *vcpu)
-{
-       struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu);
-
-       sd->tss_desc->type = 9; /* available 32/64-bit TSS */
-       load_TR_desc();
-}
-
 static void pre_svm_run(struct kvm_vcpu *vcpu)
 {
        struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu);
@@ -4099,9 +4101,6 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
 
        svm_vcpu_enter_exit(vcpu, spec_ctrl_intercepted);
 
-       if (!sev_es_guest(vcpu->kvm))
-               reload_tss(vcpu);
-
        if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL))
                x86_spec_ctrl_restore_host(svm->virt_spec_ctrl);
 
@@ -4156,6 +4155,8 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
                     SVM_EXIT_EXCP_BASE + MC_VECTOR))
                svm_handle_mce(vcpu);
 
+       trace_kvm_exit(vcpu, KVM_ISA_SVM);
+
        svm_complete_interrupts(vcpu);
 
        if (is_guest_mode(vcpu))
@@ -5025,9 +5026,22 @@ static __init void svm_set_cpu_caps(void)
            boot_cpu_has(X86_FEATURE_AMD_SSBD))
                kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD);
 
-       /* AMD PMU PERFCTR_CORE CPUID */
-       if (enable_pmu && boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
-               kvm_cpu_cap_set(X86_FEATURE_PERFCTR_CORE);
+       if (enable_pmu) {
+               /*
+                * Enumerate support for PERFCTR_CORE if and only if KVM has
+                * access to enough counters to virtualize "core" support,
+                * otherwise limit vPMU support to the legacy number of counters.
+                */
+               if (kvm_pmu_cap.num_counters_gp < AMD64_NUM_COUNTERS_CORE)
+                       kvm_pmu_cap.num_counters_gp = min(AMD64_NUM_COUNTERS,
+                                                         kvm_pmu_cap.num_counters_gp);
+               else
+                       kvm_cpu_cap_check_and_set(X86_FEATURE_PERFCTR_CORE);
+
+               if (kvm_pmu_cap.version != 2 ||
+                   !kvm_cpu_cap_has(X86_FEATURE_PERFCTR_CORE))
+                       kvm_cpu_cap_clear(X86_FEATURE_PERFMON_V2);
+       }
 
        /* CPUID 0x8000001F (SME/SEV features) */
        sev_set_cpu_caps();
@@ -5219,6 +5233,13 @@ static struct kvm_x86_init_ops svm_init_ops __initdata = {
        .pmu_ops = &amd_pmu_ops,
 };
 
+static void __svm_exit(void)
+{
+       kvm_x86_vendor_exit();
+
+       cpu_emergency_unregister_virt_callback(svm_emergency_disable);
+}
+
 static int __init svm_init(void)
 {
        int r;
@@ -5232,6 +5253,8 @@ static int __init svm_init(void)
        if (r)
                return r;
 
+       cpu_emergency_register_virt_callback(svm_emergency_disable);
+
        /*
         * Common KVM initialization _must_ come last, after this, /dev/kvm is
         * exposed to userspace!
@@ -5244,14 +5267,14 @@ static int __init svm_init(void)
        return 0;
 
 err_kvm_init:
-       kvm_x86_vendor_exit();
+       __svm_exit();
        return r;
 }
 
 static void __exit svm_exit(void)
 {
        kvm_exit();
-       kvm_x86_vendor_exit();
+       __svm_exit();
 }
 
 module_init(svm_init)