1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2021, Red Hat, Inc.
5 * Tests for Hyper-V features enablement
7 #include <asm/kvm_para.h>
8 #include <linux/kvm_para.h>
11 #include "test_util.h"
13 #include "processor.h"
17 #define LINUX_OS_ID ((u64)0x8100 << 48)
19 extern unsigned char rdmsr_start;
20 extern unsigned char rdmsr_end;
22 static u64 do_rdmsr(u32 idx)
26 asm volatile("rdmsr_start: rdmsr;"
31 return (((u64) hi) << 32) | lo;
34 extern unsigned char wrmsr_start;
35 extern unsigned char wrmsr_end;
37 static void do_wrmsr(u32 idx, u64 val)
44 asm volatile("wrmsr_start: wrmsr;"
46 : : "a"(lo), "c"(idx), "d"(hi));
51 static inline u64 hypercall(u64 control, vm_vaddr_t input_address,
52 vm_vaddr_t output_address)
56 asm volatile("mov %3, %%r8\n"
59 "+c" (control), "+d" (input_address)
60 : "r" (output_address)
61 : "cc", "memory", "r8", "r9", "r10", "r11");
66 static void guest_gp_handler(struct ex_regs *regs)
68 unsigned char *rip = (unsigned char *)regs->rip;
71 r = rip == &rdmsr_start;
72 w = rip == &wrmsr_start;
78 regs->rip = (uint64_t)&rdmsr_end;
80 regs->rip = (uint64_t)&wrmsr_end;
95 static void guest_msr(struct msr_data *msr)
100 WRITE_ONCE(nr_gp, 0);
104 do_wrmsr(msr->idx, msr->write_val);
107 GUEST_ASSERT(READ_ONCE(nr_gp) == 0);
109 GUEST_ASSERT(READ_ONCE(nr_gp) == 1);
117 static void guest_hcall(vm_vaddr_t pgs_gpa, struct hcall_data *hcall)
121 wrmsr(HV_X64_MSR_GUEST_OS_ID, LINUX_OS_ID);
122 wrmsr(HV_X64_MSR_HYPERCALL, pgs_gpa);
124 while (hcall->control) {
125 GUEST_ASSERT(hypercall(hcall->control, pgs_gpa,
126 pgs_gpa + 4096) == hcall->expect);
133 static void hv_set_cpuid(struct kvm_vm *vm, struct kvm_cpuid2 *cpuid,
134 struct kvm_cpuid_entry2 *feat,
135 struct kvm_cpuid_entry2 *recomm,
136 struct kvm_cpuid_entry2 *dbg)
138 TEST_ASSERT(set_cpuid(cpuid, feat),
139 "failed to set KVM_CPUID_FEATURES leaf");
140 TEST_ASSERT(set_cpuid(cpuid, recomm),
141 "failed to set HYPERV_CPUID_ENLIGHTMENT_INFO leaf");
142 TEST_ASSERT(set_cpuid(cpuid, dbg),
143 "failed to set HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES leaf");
144 vcpu_set_cpuid(vm, VCPU_ID, cpuid);
147 static void guest_test_msrs_access(struct kvm_vm *vm, struct msr_data *msr,
148 struct kvm_cpuid2 *best)
153 struct kvm_cpuid_entry2 feat = {
154 .function = HYPERV_CPUID_FEATURES
156 struct kvm_cpuid_entry2 recomm = {
157 .function = HYPERV_CPUID_ENLIGHTMENT_INFO
159 struct kvm_cpuid_entry2 dbg = {
160 .function = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES
162 struct kvm_enable_cap cap = {0};
164 run = vcpu_state(vm, VCPU_ID);
170 * Only available when Hyper-V identification is set
172 msr->idx = HV_X64_MSR_GUEST_OS_ID;
177 msr->idx = HV_X64_MSR_HYPERCALL;
182 feat.eax |= HV_MSR_HYPERCALL_AVAILABLE;
184 * HV_X64_MSR_GUEST_OS_ID has to be written first to make
185 * HV_X64_MSR_HYPERCALL available.
187 msr->idx = HV_X64_MSR_GUEST_OS_ID;
189 msr->write_val = LINUX_OS_ID;
193 msr->idx = HV_X64_MSR_GUEST_OS_ID;
198 msr->idx = HV_X64_MSR_HYPERCALL;
204 msr->idx = HV_X64_MSR_VP_RUNTIME;
209 feat.eax |= HV_MSR_VP_RUNTIME_AVAILABLE;
221 msr->idx = HV_X64_MSR_TIME_REF_COUNT;
226 feat.eax |= HV_MSR_TIME_REF_COUNT_AVAILABLE;
238 msr->idx = HV_X64_MSR_VP_INDEX;
243 feat.eax |= HV_MSR_VP_INDEX_AVAILABLE;
255 msr->idx = HV_X64_MSR_RESET;
260 feat.eax |= HV_MSR_RESET_AVAILABLE;
271 msr->idx = HV_X64_MSR_REFERENCE_TSC;
276 feat.eax |= HV_MSR_REFERENCE_TSC_AVAILABLE;
287 msr->idx = HV_X64_MSR_EOM;
293 * Remains unavailable even with KVM_CAP_HYPERV_SYNIC2
294 * capability enabled and guest visible CPUID bit unset.
296 cap.cap = KVM_CAP_HYPERV_SYNIC2;
297 vcpu_enable_cap(vm, VCPU_ID, &cap);
300 feat.eax |= HV_MSR_SYNIC_AVAILABLE;
311 msr->idx = HV_X64_MSR_STIMER0_CONFIG;
316 feat.eax |= HV_MSR_SYNTIMER_AVAILABLE;
326 /* Direct mode test */
328 msr->write_val = 1 << 12;
332 feat.edx |= HV_STIMER_DIRECT_MODE_AVAILABLE;
337 msr->idx = HV_X64_MSR_EOI;
342 feat.eax |= HV_MSR_APIC_ACCESS_AVAILABLE;
349 msr->idx = HV_X64_MSR_TSC_FREQUENCY;
354 feat.eax |= HV_ACCESS_FREQUENCY_MSRS;
366 msr->idx = HV_X64_MSR_REENLIGHTENMENT_CONTROL;
371 feat.eax |= HV_ACCESS_REENLIGHTENMENT;
381 /* Can only write '0' */
382 msr->idx = HV_X64_MSR_TSC_EMULATION_STATUS;
389 msr->idx = HV_X64_MSR_CRASH_P0;
394 feat.edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
405 msr->idx = HV_X64_MSR_SYNDBG_STATUS;
410 feat.edx |= HV_FEATURE_DEBUG_MSRS_AVAILABLE;
411 dbg.eax |= HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
427 hv_set_cpuid(vm, best, &feat, &recomm, &dbg);
430 pr_debug("Stage %d: testing msr: 0x%x for %s\n", stage,
431 msr->idx, msr->write ? "write" : "read");
433 pr_debug("Stage %d: finish\n", stage);
435 r = _vcpu_run(vm, VCPU_ID);
436 TEST_ASSERT(!r, "vcpu_run failed: %d\n", r);
437 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
438 "unexpected exit reason: %u (%s)",
439 run->exit_reason, exit_reason_str(run->exit_reason));
441 switch (get_ucall(vm, VCPU_ID, &uc)) {
443 TEST_ASSERT(uc.args[1] == stage,
444 "Unexpected stage: %ld (%d expected)\n",
448 TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
449 __FILE__, uc.args[1]);
459 static void guest_test_hcalls_access(struct kvm_vm *vm, struct hcall_data *hcall,
460 void *input, void *output, struct kvm_cpuid2 *best)
465 struct kvm_cpuid_entry2 feat = {
466 .function = HYPERV_CPUID_FEATURES,
467 .eax = HV_MSR_HYPERCALL_AVAILABLE
469 struct kvm_cpuid_entry2 recomm = {
470 .function = HYPERV_CPUID_ENLIGHTMENT_INFO
472 struct kvm_cpuid_entry2 dbg = {
473 .function = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES
476 run = vcpu_state(vm, VCPU_ID);
481 hcall->control = 0xdeadbeef;
482 hcall->expect = HV_STATUS_INVALID_HYPERCALL_CODE;
486 hcall->control = HVCALL_POST_MESSAGE;
487 hcall->expect = HV_STATUS_ACCESS_DENIED;
490 feat.ebx |= HV_POST_MESSAGES;
491 hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT;
495 hcall->control = HVCALL_SIGNAL_EVENT;
496 hcall->expect = HV_STATUS_ACCESS_DENIED;
499 feat.ebx |= HV_SIGNAL_EVENTS;
500 hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT;
504 hcall->control = HVCALL_RESET_DEBUG_SESSION;
505 hcall->expect = HV_STATUS_INVALID_HYPERCALL_CODE;
508 dbg.eax |= HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
509 hcall->expect = HV_STATUS_ACCESS_DENIED;
512 feat.ebx |= HV_DEBUGGING;
513 hcall->expect = HV_STATUS_OPERATION_DENIED;
517 hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE;
518 hcall->expect = HV_STATUS_ACCESS_DENIED;
521 recomm.eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
522 hcall->expect = HV_STATUS_SUCCESS;
525 hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX;
526 hcall->expect = HV_STATUS_ACCESS_DENIED;
529 recomm.eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED;
530 hcall->expect = HV_STATUS_SUCCESS;
534 hcall->control = HVCALL_SEND_IPI;
535 hcall->expect = HV_STATUS_ACCESS_DENIED;
538 recomm.eax |= HV_X64_CLUSTER_IPI_RECOMMENDED;
539 hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT;
542 /* Nothing in 'sparse banks' -> success */
543 hcall->control = HVCALL_SEND_IPI_EX;
544 hcall->expect = HV_STATUS_SUCCESS;
548 hcall->control = HVCALL_NOTIFY_LONG_SPIN_WAIT;
549 hcall->expect = HV_STATUS_ACCESS_DENIED;
553 hcall->expect = HV_STATUS_SUCCESS;
562 hv_set_cpuid(vm, best, &feat, &recomm, &dbg);
565 pr_debug("Stage %d: testing hcall: 0x%lx\n", stage,
568 pr_debug("Stage %d: finish\n", stage);
570 r = _vcpu_run(vm, VCPU_ID);
571 TEST_ASSERT(!r, "vcpu_run failed: %d\n", r);
572 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
573 "unexpected exit reason: %u (%s)",
574 run->exit_reason, exit_reason_str(run->exit_reason));
576 switch (get_ucall(vm, VCPU_ID, &uc)) {
578 TEST_ASSERT(uc.args[1] == stage,
579 "Unexpected stage: %ld (%d expected)\n",
583 TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
584 __FILE__, uc.args[1]);
596 struct kvm_cpuid2 *best;
598 vm_vaddr_t msr_gva, hcall_page, hcall_params;
599 struct kvm_enable_cap cap = {
600 .cap = KVM_CAP_HYPERV_ENFORCE_CPUID,
605 vm = vm_create_default(VCPU_ID, 0, guest_msr);
607 msr_gva = vm_vaddr_alloc_page(vm);
608 memset(addr_gva2hva(vm, msr_gva), 0x0, getpagesize());
609 vcpu_args_set(vm, VCPU_ID, 1, msr_gva);
610 vcpu_enable_cap(vm, VCPU_ID, &cap);
612 vcpu_set_hv_cpuid(vm, VCPU_ID);
614 best = kvm_get_supported_hv_cpuid();
616 vm_init_descriptor_tables(vm);
617 vcpu_init_descriptor_tables(vm, VCPU_ID);
618 vm_handle_exception(vm, GP_VECTOR, guest_gp_handler);
620 pr_info("Testing access to Hyper-V specific MSRs\n");
621 guest_test_msrs_access(vm, addr_gva2hva(vm, msr_gva),
625 /* Test hypercalls */
626 vm = vm_create_default(VCPU_ID, 0, guest_hcall);
628 /* Hypercall input/output */
629 hcall_page = vm_vaddr_alloc_pages(vm, 2);
630 memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize());
632 hcall_params = vm_vaddr_alloc_page(vm);
633 memset(addr_gva2hva(vm, hcall_params), 0x0, getpagesize());
635 vcpu_args_set(vm, VCPU_ID, 2, addr_gva2gpa(vm, hcall_page), hcall_params);
636 vcpu_enable_cap(vm, VCPU_ID, &cap);
638 vcpu_set_hv_cpuid(vm, VCPU_ID);
640 best = kvm_get_supported_hv_cpuid();
642 pr_info("Testing access to Hyper-V hypercalls\n");
643 guest_test_hcalls_access(vm, addr_gva2hva(vm, hcall_params),
644 addr_gva2hva(vm, hcall_page),
645 addr_gva2hva(vm, hcall_page) + getpagesize(),