KVM: selftests: Hardcode VMCALL/VMMCALL opcodes in "fix hypercall" test
authorSean Christopherson <seanjc@google.com>
Wed, 28 Sep 2022 23:36:49 +0000 (23:36 +0000)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 30 Sep 2022 10:39:32 +0000 (06:39 -0400)
Hardcode the VMCALL/VMMCALL opcodes in dedicated arrays instead of
extracting the opcodes from inline asm, and patch in the "other" opcode
so as to preserve the original opcode, i.e. the opcode that the test
executes in the guest.

Preserving the original opcode (by not patching the source), will make
it easier to implement a check that KVM doesn't modify the opcode (the
test currently only verifies that a #UD occurred).

Use INT3 (0xcc) as the placeholder so that the guest will likely die a
horrible death if the test's patching goes awry.

As a bonus, patching from within the test dedups a decent chunk of code.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20220928233652.783504-5-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c

index 6864eb0d5d14eb4cce3a8410fe0aeba3913e496d..cebc84b263521d36cd9bf75cc56f7480f9693e58 100644 (file)
@@ -25,27 +25,16 @@ static void guest_ud_handler(struct ex_regs *regs)
        GUEST_DONE();
 }
 
-extern uint8_t svm_hypercall_insn[HYPERCALL_INSN_SIZE];
-static uint64_t svm_do_sched_yield(uint8_t apic_id)
-{
-       uint64_t ret;
-
-       asm volatile("svm_hypercall_insn:\n\t"
-                    "vmmcall\n\t"
-                    : "=a"(ret)
-                    : "a"((uint64_t)KVM_HC_SCHED_YIELD), "b"((uint64_t)apic_id)
-                    : "memory");
+static const uint8_t vmx_vmcall[HYPERCALL_INSN_SIZE]  = { 0x0f, 0x01, 0xc1 };
+static const uint8_t svm_vmmcall[HYPERCALL_INSN_SIZE] = { 0x0f, 0x01, 0xd9 };
 
-       return ret;
-}
-
-extern uint8_t vmx_hypercall_insn[HYPERCALL_INSN_SIZE];
-static uint64_t vmx_do_sched_yield(uint8_t apic_id)
+extern uint8_t hypercall_insn[HYPERCALL_INSN_SIZE];
+static uint64_t do_sched_yield(uint8_t apic_id)
 {
        uint64_t ret;
 
-       asm volatile("vmx_hypercall_insn:\n\t"
-                    "vmcall\n\t"
+       asm volatile("hypercall_insn:\n\t"
+                    ".byte 0xcc,0xcc,0xcc\n\t"
                     : "=a"(ret)
                     : "a"((uint64_t)KVM_HC_SCHED_YIELD), "b"((uint64_t)apic_id)
                     : "memory");
@@ -55,25 +44,25 @@ static uint64_t vmx_do_sched_yield(uint8_t apic_id)
 
 static void guest_main(void)
 {
-       uint8_t *native_hypercall_insn, *hypercall_insn;
-       uint8_t apic_id;
-
-       apic_id = GET_APIC_ID_FIELD(xapic_read_reg(APIC_ID));
+       const uint8_t *native_hypercall_insn;
+       const uint8_t *other_hypercall_insn;
 
        if (is_intel_cpu()) {
-               native_hypercall_insn = vmx_hypercall_insn;
-               hypercall_insn = svm_hypercall_insn;
-               svm_do_sched_yield(apic_id);
+               native_hypercall_insn = vmx_vmcall;
+               other_hypercall_insn  = svm_vmmcall;
        } else if (is_amd_cpu()) {
-               native_hypercall_insn = svm_hypercall_insn;
-               hypercall_insn = vmx_hypercall_insn;
-               vmx_do_sched_yield(apic_id);
+               native_hypercall_insn = svm_vmmcall;
+               other_hypercall_insn  = vmx_vmcall;
        } else {
                GUEST_ASSERT(0);
                /* unreachable */
                return;
        }
 
+       memcpy(hypercall_insn, other_hypercall_insn, HYPERCALL_INSN_SIZE);
+
+       do_sched_yield(GET_APIC_ID_FIELD(xapic_read_reg(APIC_ID)));
+
        /*
         * The hypercall didn't #UD (guest_ud_handler() signals "done" if a #UD
         * occurs).  Verify that a #UD is NOT expected and that KVM patched in