powerpc/mm: Avoid calling arch_enter/leave_lazy_mmu() in set_ptes
[platform/kernel/linux-starfive.git] / tools / testing / selftests / kvm / x86_64 / state_test.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * KVM_GET/SET_* tests
4  *
5  * Copyright (C) 2018, Red Hat, Inc.
6  *
7  * Tests for vCPU state save/restore, including nested guest state.
8  */
9 #define _GNU_SOURCE /* for program_invocation_short_name */
10 #include <fcntl.h>
11 #include <stdio.h>
12 #include <stdlib.h>
13 #include <string.h>
14 #include <sys/ioctl.h>
15
16 #include "test_util.h"
17
18 #include "kvm_util.h"
19 #include "processor.h"
20 #include "vmx.h"
21 #include "svm_util.h"
22
23 #define L2_GUEST_STACK_SIZE 256
24
25 void svm_l2_guest_code(void)
26 {
27         GUEST_SYNC(4);
28         /* Exit to L1 */
29         vmcall();
30         GUEST_SYNC(6);
31         /* Done, exit to L1 and never come back.  */
32         vmcall();
33 }
34
35 static void svm_l1_guest_code(struct svm_test_data *svm)
36 {
37         unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
38         struct vmcb *vmcb = svm->vmcb;
39
40         GUEST_ASSERT(svm->vmcb_gpa);
41         /* Prepare for L2 execution. */
42         generic_svm_setup(svm, svm_l2_guest_code,
43                           &l2_guest_stack[L2_GUEST_STACK_SIZE]);
44
45         GUEST_SYNC(3);
46         run_guest(vmcb, svm->vmcb_gpa);
47         GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL);
48         GUEST_SYNC(5);
49         vmcb->save.rip += 3;
50         run_guest(vmcb, svm->vmcb_gpa);
51         GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL);
52         GUEST_SYNC(7);
53 }
54
55 void vmx_l2_guest_code(void)
56 {
57         GUEST_SYNC(6);
58
59         /* Exit to L1 */
60         vmcall();
61
62         /* L1 has now set up a shadow VMCS for us.  */
63         GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
64         GUEST_SYNC(10);
65         GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
66         GUEST_ASSERT(!vmwrite(GUEST_RIP, 0xc0fffee));
67         GUEST_SYNC(11);
68         GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0fffee);
69         GUEST_ASSERT(!vmwrite(GUEST_RIP, 0xc0ffffee));
70         GUEST_SYNC(12);
71
72         /* Done, exit to L1 and never come back.  */
73         vmcall();
74 }
75
76 static void vmx_l1_guest_code(struct vmx_pages *vmx_pages)
77 {
78         unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
79
80         GUEST_ASSERT(vmx_pages->vmcs_gpa);
81         GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
82         GUEST_SYNC(3);
83         GUEST_ASSERT(load_vmcs(vmx_pages));
84         GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
85
86         GUEST_SYNC(4);
87         GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
88
89         prepare_vmcs(vmx_pages, vmx_l2_guest_code,
90                      &l2_guest_stack[L2_GUEST_STACK_SIZE]);
91
92         GUEST_SYNC(5);
93         GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
94         GUEST_ASSERT(!vmlaunch());
95         GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
96         GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
97
98         /* Check that the launched state is preserved.  */
99         GUEST_ASSERT(vmlaunch());
100
101         GUEST_ASSERT(!vmresume());
102         GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
103
104         GUEST_SYNC(7);
105         GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
106
107         GUEST_ASSERT(!vmresume());
108         GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
109
110         vmwrite(GUEST_RIP, vmreadz(GUEST_RIP) + 3);
111
112         vmwrite(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_SHADOW_VMCS);
113         vmwrite(VMCS_LINK_POINTER, vmx_pages->shadow_vmcs_gpa);
114
115         GUEST_ASSERT(!vmptrld(vmx_pages->shadow_vmcs_gpa));
116         GUEST_ASSERT(vmlaunch());
117         GUEST_SYNC(8);
118         GUEST_ASSERT(vmlaunch());
119         GUEST_ASSERT(vmresume());
120
121         vmwrite(GUEST_RIP, 0xc0ffee);
122         GUEST_SYNC(9);
123         GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
124
125         GUEST_ASSERT(!vmptrld(vmx_pages->vmcs_gpa));
126         GUEST_ASSERT(!vmresume());
127         GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
128
129         GUEST_ASSERT(!vmptrld(vmx_pages->shadow_vmcs_gpa));
130         GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffffee);
131         GUEST_ASSERT(vmlaunch());
132         GUEST_ASSERT(vmresume());
133         GUEST_SYNC(13);
134         GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffffee);
135         GUEST_ASSERT(vmlaunch());
136         GUEST_ASSERT(vmresume());
137 }
138
139 static void __attribute__((__flatten__)) guest_code(void *arg)
140 {
141         GUEST_SYNC(1);
142         GUEST_SYNC(2);
143
144         if (arg) {
145                 if (this_cpu_has(X86_FEATURE_SVM))
146                         svm_l1_guest_code(arg);
147                 else
148                         vmx_l1_guest_code(arg);
149         }
150
151         GUEST_DONE();
152 }
153
154 int main(int argc, char *argv[])
155 {
156         vm_vaddr_t nested_gva = 0;
157
158         struct kvm_regs regs1, regs2;
159         struct kvm_vcpu *vcpu;
160         struct kvm_vm *vm;
161         struct kvm_x86_state *state;
162         struct ucall uc;
163         int stage;
164
165         /* Create VM */
166         vm = vm_create_with_one_vcpu(&vcpu, guest_code);
167
168         vcpu_regs_get(vcpu, &regs1);
169
170         if (kvm_has_cap(KVM_CAP_NESTED_STATE)) {
171                 if (kvm_cpu_has(X86_FEATURE_SVM))
172                         vcpu_alloc_svm(vm, &nested_gva);
173                 else if (kvm_cpu_has(X86_FEATURE_VMX))
174                         vcpu_alloc_vmx(vm, &nested_gva);
175         }
176
177         if (!nested_gva)
178                 pr_info("will skip nested state checks\n");
179
180         vcpu_args_set(vcpu, 1, nested_gva);
181
182         for (stage = 1;; stage++) {
183                 vcpu_run(vcpu);
184                 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
185
186                 switch (get_ucall(vcpu, &uc)) {
187                 case UCALL_ABORT:
188                         REPORT_GUEST_ASSERT(uc);
189                         /* NOT REACHED */
190                 case UCALL_SYNC:
191                         break;
192                 case UCALL_DONE:
193                         goto done;
194                 default:
195                         TEST_FAIL("Unknown ucall %lu", uc.cmd);
196                 }
197
198                 /* UCALL_SYNC is handled here.  */
199                 TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
200                             uc.args[1] == stage, "Stage %d: Unexpected register values vmexit, got %lx",
201                             stage, (ulong)uc.args[1]);
202
203                 state = vcpu_save_state(vcpu);
204                 memset(&regs1, 0, sizeof(regs1));
205                 vcpu_regs_get(vcpu, &regs1);
206
207                 kvm_vm_release(vm);
208
209                 /* Restore state in a new VM.  */
210                 vcpu = vm_recreate_with_one_vcpu(vm);
211                 vcpu_load_state(vcpu, state);
212                 kvm_x86_state_cleanup(state);
213
214                 memset(&regs2, 0, sizeof(regs2));
215                 vcpu_regs_get(vcpu, &regs2);
216                 TEST_ASSERT(!memcmp(&regs1, &regs2, sizeof(regs2)),
217                             "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
218                             (ulong) regs2.rdi, (ulong) regs2.rsi);
219         }
220
221 done:
222         kvm_vm_free(vm);
223 }