From 8f014550dfb114cc7f42a517d20d2cf887a0b771 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Tue, 26 Jan 2021 14:48:14 +0100 Subject: [PATCH] KVM: x86: hyper-v: Make Hyper-V emulation enablement conditional Hyper-V emulation is enabled in KVM unconditionally. This is bad at least from security standpoint as it is an extra attack surface. Ideally, there should be a per-VM capability explicitly enabled by VMM but currently it is not the case and we can't mandate one without breaking backwards compatibility. We can, however, check guest visible CPUIDs and only enable Hyper-V emulation when "Hv#1" interface was exposed in HYPERV_CPUID_INTERFACE. Note, VMMs are free to act in any sequence they like, e.g. they can try to set MSRs first and CPUIDs later so we still need to allow the host to read/write Hyper-V specific MSRs unconditionally. Signed-off-by: Vitaly Kuznetsov Message-Id: <20210126134816.1880136-14-vkuznets@redhat.com> [Add selftest vcpu_set_hv_cpuid API to avoid breaking xen_vmcall_test. - Paolo] Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/cpuid.c | 2 + arch/x86/kvm/hyperv.c | 27 +++++++++++-- arch/x86/kvm/hyperv.h | 3 +- arch/x86/kvm/x86.c | 2 +- arch/x86/kvm/xen.c | 2 +- .../selftests/kvm/include/x86_64/processor.h | 1 + .../selftests/kvm/lib/x86_64/processor.c | 35 ++++++++++++++++ .../testing/selftests/kvm/x86_64/evmcs_test.c | 40 +------------------ .../selftests/kvm/x86_64/xen_vmcall_test.c | 2 +- 10 files changed, 69 insertions(+), 46 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 95eb6e93a4b7..84499aad01a4 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -736,6 +736,7 @@ struct kvm_vcpu_arch { /* used for guest single stepping over the given code position */ unsigned long singlestep_rip; + bool hyperv_enabled; struct kvm_vcpu_hv *hyperv; struct kvm_vcpu_xen xen; diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index f47bbd4b0837..c8f2592ccc99 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -179,6 +179,8 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) vcpu->arch.cr4_guest_rsvd_bits = __cr4_reserved_bits(guest_cpuid_has, vcpu); + kvm_hv_set_cpuid(vcpu); + /* Invoke the vendor callback only after the above state is updated. */ static_call(kvm_x86_vcpu_after_set_cpuid)(vcpu); diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 333bd200f186..11d16fe52324 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -37,6 +37,9 @@ #include "trace.h" #include "irq.h" +/* "Hv#1" signature */ +#define HYPERV_CPUID_SIGNATURE_EAX 0x31237648 + #define KVM_HV_MAX_SPARSE_VCPU_SET_BITS DIV_ROUND_UP(KVM_MAX_VCPUS, 64) static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer, @@ -1473,6 +1476,9 @@ int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) { struct kvm_hv *hv = to_kvm_hv(vcpu->kvm); + if (!host && !vcpu->arch.hyperv_enabled) + return 1; + if (kvm_hv_msr_partition_wide(msr)) { int r; @@ -1488,6 +1494,9 @@ int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) { struct kvm_hv *hv = to_kvm_hv(vcpu->kvm); + if (!host && !vcpu->arch.hyperv_enabled) + return 1; + if (kvm_hv_msr_partition_wide(msr)) { int r; @@ -1701,9 +1710,20 @@ ret_success: return HV_STATUS_SUCCESS; } -bool kvm_hv_hypercall_enabled(struct kvm *kvm) +void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu) +{ + struct kvm_cpuid_entry2 *entry; + + entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_INTERFACE, 0); + if (entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX) + vcpu->arch.hyperv_enabled = true; + else + vcpu->arch.hyperv_enabled = false; +} + +bool kvm_hv_hypercall_enabled(struct kvm_vcpu *vcpu) { - return to_kvm_hv(kvm)->hv_guest_os_id != 0; + return vcpu->arch.hyperv_enabled && to_kvm_hv(vcpu->kvm)->hv_guest_os_id; } static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result) @@ -2036,8 +2056,7 @@ int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, break; case HYPERV_CPUID_INTERFACE: - memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12); - ent->eax = signature[0]; + ent->eax = HYPERV_CPUID_SIGNATURE_EAX; break; case HYPERV_CPUID_VERSION: diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h index 4bc2ab0caa92..f28e7fb88765 100644 --- a/arch/x86/kvm/hyperv.h +++ b/arch/x86/kvm/hyperv.h @@ -92,7 +92,7 @@ static inline u32 kvm_hv_get_vpindex(struct kvm_vcpu *vcpu) int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host); int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host); -bool kvm_hv_hypercall_enabled(struct kvm *kvm); +bool kvm_hv_hypercall_enabled(struct kvm_vcpu *vcpu); int kvm_hv_hypercall(struct kvm_vcpu *vcpu); void kvm_hv_irq_routing_update(struct kvm *kvm); @@ -141,6 +141,7 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm, void kvm_hv_init_vm(struct kvm *kvm); void kvm_hv_destroy_vm(struct kvm *kvm); +void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu); int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args); int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 __user *entries); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index a75d01523c4f..1e5c304d14ab 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8162,7 +8162,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) if (kvm_xen_hypercall_enabled(vcpu->kvm)) return kvm_xen_hypercall(vcpu); - if (kvm_hv_hypercall_enabled(vcpu->kvm)) + if (kvm_hv_hypercall_enabled(vcpu)) return kvm_hv_hypercall(vcpu); nr = kvm_rax_read(vcpu); diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index b414fed1576b..b08a688e56f9 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -369,7 +369,7 @@ int kvm_xen_hypercall(struct kvm_vcpu *vcpu) /* Hyper-V hypercalls get bit 31 set in EAX */ if ((input & 0x80000000) && - kvm_hv_hypercall_enabled(vcpu->kvm)) + kvm_hv_hypercall_enabled(vcpu)) return kvm_hv_hypercall(vcpu); longmode = is_64_bit_mode(vcpu); diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h index 42292d676eff..0b30b4e15c38 100644 --- a/tools/testing/selftests/kvm/include/x86_64/processor.h +++ b/tools/testing/selftests/kvm/include/x86_64/processor.h @@ -407,6 +407,7 @@ uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2, uint64_t a3); struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void); +void vcpu_set_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid); struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid); /* diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c index 18483df2b331..de0c76177d02 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/processor.c +++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c @@ -1323,6 +1323,41 @@ struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void) return cpuid; } +void vcpu_set_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid) +{ + static struct kvm_cpuid2 *cpuid_full; + struct kvm_cpuid2 *cpuid_sys, *cpuid_hv; + int i, nent = 0; + + if (!cpuid_full) { + cpuid_sys = kvm_get_supported_cpuid(); + cpuid_hv = kvm_get_supported_hv_cpuid(); + + cpuid_full = malloc(sizeof(*cpuid_full) + + (cpuid_sys->nent + cpuid_hv->nent) * + sizeof(struct kvm_cpuid_entry2)); + if (!cpuid_full) { + perror("malloc"); + abort(); + } + + /* Need to skip KVM CPUID leaves 0x400000xx */ + for (i = 0; i < cpuid_sys->nent; i++) { + if (cpuid_sys->entries[i].function >= 0x40000000 && + cpuid_sys->entries[i].function < 0x40000100) + continue; + cpuid_full->entries[nent] = cpuid_sys->entries[i]; + nent++; + } + + memcpy(&cpuid_full->entries[nent], cpuid_hv->entries, + cpuid_hv->nent * sizeof(struct kvm_cpuid_entry2)); + cpuid_full->nent = nent + cpuid_hv->nent; + } + + vcpu_set_cpuid(vm, vcpuid, cpuid_full); +} + struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid) { static struct kvm_cpuid2 *cpuid; diff --git a/tools/testing/selftests/kvm/x86_64/evmcs_test.c b/tools/testing/selftests/kvm/x86_64/evmcs_test.c index 39a3cb2bd103..ca22ee6d19cb 100644 --- a/tools/testing/selftests/kvm/x86_64/evmcs_test.c +++ b/tools/testing/selftests/kvm/x86_64/evmcs_test.c @@ -78,42 +78,6 @@ void guest_code(struct vmx_pages *vmx_pages) GUEST_ASSERT(vmlaunch()); } -struct kvm_cpuid2 *guest_get_cpuid(void) -{ - static struct kvm_cpuid2 *cpuid_full; - struct kvm_cpuid2 *cpuid_sys, *cpuid_hv; - int i, nent = 0; - - if (cpuid_full) - return cpuid_full; - - cpuid_sys = kvm_get_supported_cpuid(); - cpuid_hv = kvm_get_supported_hv_cpuid(); - - cpuid_full = malloc(sizeof(*cpuid_full) + - (cpuid_sys->nent + cpuid_hv->nent) * - sizeof(struct kvm_cpuid_entry2)); - if (!cpuid_full) { - perror("malloc"); - abort(); - } - - /* Need to skip KVM CPUID leaves 0x400000xx */ - for (i = 0; i < cpuid_sys->nent; i++) { - if (cpuid_sys->entries[i].function >= 0x40000000 && - cpuid_sys->entries[i].function < 0x40000100) - continue; - cpuid_full->entries[nent] = cpuid_sys->entries[i]; - nent++; - } - - memcpy(&cpuid_full->entries[nent], cpuid_hv->entries, - cpuid_hv->nent * sizeof(struct kvm_cpuid_entry2)); - cpuid_full->nent = nent + cpuid_hv->nent; - - return cpuid_full; -} - int main(int argc, char *argv[]) { vm_vaddr_t vmx_pages_gva = 0; @@ -135,7 +99,7 @@ int main(int argc, char *argv[]) exit(KSFT_SKIP); } - vcpu_set_cpuid(vm, VCPU_ID, guest_get_cpuid()); + vcpu_set_hv_cpuid(vm, VCPU_ID); vcpu_enable_evmcs(vm, VCPU_ID); run = vcpu_state(vm, VCPU_ID); @@ -179,7 +143,7 @@ int main(int argc, char *argv[]) /* Restore state in a new VM. */ kvm_vm_restart(vm, O_RDWR); vm_vcpu_add(vm, VCPU_ID); - vcpu_set_cpuid(vm, VCPU_ID, guest_get_cpuid()); + vcpu_set_hv_cpuid(vm, VCPU_ID); vcpu_enable_evmcs(vm, VCPU_ID); vcpu_load_state(vm, VCPU_ID, state); run = vcpu_state(vm, VCPU_ID); diff --git a/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c b/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c index 6e7b069322fd..86653361c695 100644 --- a/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c +++ b/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c @@ -92,7 +92,7 @@ int main(int argc, char *argv[]) } vm = vm_create_default(VCPU_ID, 0, (void *) guest_code); - vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); + vcpu_set_hv_cpuid(vm, VCPU_ID); struct kvm_xen_hvm_config hvmc = { .flags = KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL, -- 2.34.1