1 // SPDX-License-Identifier: GPL-2.0
11 #include <linux/bitmap.h>
12 #include <linux/bitops.h>
13 #include <linux/atomic.h>
16 #include "test_util.h"
17 #include "guest_modes.h"
18 #include "processor.h"
20 static void guest_code(uint64_t start_gpa, uint64_t end_gpa, uint64_t stride)
24 for (gpa = start_gpa; gpa < end_gpa; gpa += stride)
25 *((volatile uint64_t *)gpa) = gpa;
38 static atomic_t rendezvous;
40 static void rendezvous_with_boss(void)
42 int orig = atomic_read(&rendezvous);
45 atomic_dec_and_test(&rendezvous);
46 while (atomic_read(&rendezvous) > 0)
49 atomic_inc(&rendezvous);
50 while (atomic_read(&rendezvous) < 0)
55 static void run_vcpu(struct kvm_vm *vm, uint32_t vcpu_id)
57 vcpu_run(vm, vcpu_id);
58 ASSERT_EQ(get_ucall(vm, vcpu_id, NULL), UCALL_DONE);
61 static void *vcpu_worker(void *data)
63 struct vcpu_info *vcpu = data;
64 struct kvm_vm *vm = vcpu->vm;
65 struct kvm_sregs sregs;
68 vcpu_args_set(vm, vcpu->id, 3, vcpu->start_gpa, vcpu->end_gpa,
69 vm_get_page_size(vm));
71 /* Snapshot regs before the first run. */
72 vcpu_regs_get(vm, vcpu->id, ®s);
73 rendezvous_with_boss();
75 run_vcpu(vm, vcpu->id);
76 rendezvous_with_boss();
77 vcpu_regs_set(vm, vcpu->id, ®s);
78 vcpu_sregs_get(vm, vcpu->id, &sregs);
80 /* Toggle CR0.WP to trigger a MMU context reset. */
81 sregs.cr0 ^= X86_CR0_WP;
83 vcpu_sregs_set(vm, vcpu->id, &sregs);
84 rendezvous_with_boss();
86 run_vcpu(vm, vcpu->id);
87 rendezvous_with_boss();
92 static pthread_t *spawn_workers(struct kvm_vm *vm, uint64_t start_gpa,
95 struct vcpu_info *info;
96 uint64_t gpa, nr_bytes;
100 threads = malloc(nr_vcpus * sizeof(*threads));
101 TEST_ASSERT(threads, "Failed to allocate vCPU threads");
103 info = malloc(nr_vcpus * sizeof(*info));
104 TEST_ASSERT(info, "Failed to allocate vCPU gpa ranges");
106 nr_bytes = ((end_gpa - start_gpa) / nr_vcpus) &
107 ~((uint64_t)vm_get_page_size(vm) - 1);
108 TEST_ASSERT(nr_bytes, "C'mon, no way you have %d CPUs", nr_vcpus);
110 for (i = 0, gpa = start_gpa; i < nr_vcpus; i++, gpa += nr_bytes) {
113 info[i].start_gpa = gpa;
114 info[i].end_gpa = gpa + nr_bytes;
115 pthread_create(&threads[i], NULL, vcpu_worker, &info[i]);
120 static void rendezvous_with_vcpus(struct timespec *time, const char *name)
124 pr_info("Waiting for vCPUs to finish %s...\n", name);
126 rendezvoused = atomic_read(&rendezvous);
127 for (i = 0; abs(rendezvoused) != 1; i++) {
130 pr_info("\r%d vCPUs haven't rendezvoused...",
131 abs(rendezvoused) - 1);
132 rendezvoused = atomic_read(&rendezvous);
135 clock_gettime(CLOCK_MONOTONIC, time);
137 /* Release the vCPUs after getting the time of the previous action. */
138 pr_info("\rAll vCPUs finished %s, releasing...\n", name);
139 if (rendezvoused > 0)
140 atomic_set(&rendezvous, -nr_vcpus - 1);
142 atomic_set(&rendezvous, nr_vcpus + 1);
145 static void calc_default_nr_vcpus(void)
147 cpu_set_t possible_mask;
150 r = sched_getaffinity(0, sizeof(possible_mask), &possible_mask);
151 TEST_ASSERT(!r, "sched_getaffinity failed, errno = %d (%s)",
152 errno, strerror(errno));
154 nr_vcpus = CPU_COUNT(&possible_mask) * 3/4;
155 TEST_ASSERT(nr_vcpus > 0, "Uh, no CPUs?");
158 int main(int argc, char *argv[])
161 * Skip the first 4gb and slot0. slot0 maps <1gb and is used to back
162 * the guest's code, stack, and page tables. Because selftests creates
163 * an IRQCHIP, a.k.a. a local APIC, KVM creates an internal memslot
164 * just below the 4gb boundary. This test could create memory at
165 * 1gb-3gb,but it's simpler to skip straight to 4gb.
167 const uint64_t size_1gb = (1 << 30);
168 const uint64_t start_gpa = (4ull * size_1gb);
169 const int first_slot = 1;
171 struct timespec time_start, time_run1, time_reset, time_run2;
172 uint64_t max_gpa, gpa, slot_size, max_mem, i;
173 int max_slots, slot, opt, fd;
174 bool hugepages = false;
180 * Default to 2gb so that maxing out systems with MAXPHADDR=46, which
181 * are quite common for x86, requires changing only max_mem (KVM allows
182 * 32k memslots, 32k * 2gb == ~64tb of guest memory).
184 slot_size = 2 * size_1gb;
186 max_slots = kvm_check_cap(KVM_CAP_NR_MEMSLOTS);
187 TEST_ASSERT(max_slots > first_slot, "KVM is broken");
189 /* All KVM MMUs should be able to survive a 128gb guest. */
190 max_mem = 128 * size_1gb;
192 calc_default_nr_vcpus();
194 while ((opt = getopt(argc, argv, "c:h:m:s:H")) != -1) {
197 nr_vcpus = atoi(optarg);
198 TEST_ASSERT(nr_vcpus > 0, "number of vcpus must be >0");
201 max_mem = atoi(optarg) * size_1gb;
202 TEST_ASSERT(max_mem > 0, "memory size must be >0");
205 slot_size = atoi(optarg) * size_1gb;
206 TEST_ASSERT(slot_size > 0, "slot size must be >0");
213 printf("usage: %s [-c nr_vcpus] [-m max_mem_in_gb] [-s slot_size_in_gb] [-H]\n", argv[0]);
218 vm = vm_create_default_with_vcpus(nr_vcpus, 0, 0, guest_code, NULL);
220 max_gpa = vm_get_max_gfn(vm) << vm_get_page_shift(vm);
221 TEST_ASSERT(max_gpa > (4 * slot_size), "MAXPHYADDR <4gb ");
223 fd = kvm_memfd_alloc(slot_size, hugepages);
224 mem = mmap(NULL, slot_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
225 TEST_ASSERT(mem != MAP_FAILED, "mmap() failed");
227 TEST_ASSERT(!madvise(mem, slot_size, MADV_NOHUGEPAGE), "madvise() failed");
229 /* Pre-fault the memory to avoid taking mmap_sem on guest page faults. */
230 for (i = 0; i < slot_size; i += vm_get_page_size(vm))
231 ((uint8_t *)mem)[i] = 0xaa;
234 for (slot = first_slot; slot < max_slots; slot++) {
235 gpa = start_gpa + ((slot - first_slot) * slot_size);
236 if (gpa + slot_size > max_gpa)
239 if ((gpa - start_gpa) >= max_mem)
242 vm_set_user_memory_region(vm, slot, 0, gpa, slot_size, mem);
245 /* Identity map memory in the guest using 1gb pages. */
246 for (i = 0; i < slot_size; i += size_1gb)
247 __virt_pg_map(vm, gpa + i, gpa + i, X86_PAGE_SIZE_1G);
249 for (i = 0; i < slot_size; i += vm_get_page_size(vm))
250 virt_pg_map(vm, gpa + i, gpa + i);
254 atomic_set(&rendezvous, nr_vcpus + 1);
255 threads = spawn_workers(vm, start_gpa, gpa);
257 pr_info("Running with %lugb of guest memory and %u vCPUs\n",
258 (gpa - start_gpa) / size_1gb, nr_vcpus);
260 rendezvous_with_vcpus(&time_start, "spawning");
261 rendezvous_with_vcpus(&time_run1, "run 1");
262 rendezvous_with_vcpus(&time_reset, "reset");
263 rendezvous_with_vcpus(&time_run2, "run 2");
265 time_run2 = timespec_sub(time_run2, time_reset);
266 time_reset = timespec_sub(time_reset, time_run1);
267 time_run1 = timespec_sub(time_run1, time_start);
269 pr_info("run1 = %ld.%.9lds, reset = %ld.%.9lds, run2 = %ld.%.9lds\n",
270 time_run1.tv_sec, time_run1.tv_nsec,
271 time_reset.tv_sec, time_reset.tv_nsec,
272 time_run2.tv_sec, time_run2.tv_nsec);
275 * Delete even numbered slots (arbitrary) and unmap the first half of
276 * the backing (also arbitrary) to verify KVM correctly drops all
277 * references to the removed regions.
279 for (slot = (slot - 1) & ~1ull; slot >= first_slot; slot -= 2)
280 vm_set_user_memory_region(vm, slot, 0, 0, 0, NULL);
282 munmap(mem, slot_size / 2);
284 /* Sanity check that the vCPUs actually ran. */
285 for (i = 0; i < nr_vcpus; i++)
286 pthread_join(threads[i], NULL);
289 * Deliberately exit without deleting the remaining memslots or closing
290 * kvm_fd to test cleanup via mmu_notifier.release.