KVM: selftests: Add a helper to check EPT/VPID capabilities
[platform/kernel/linux-starfive.git] / tools / testing / selftests / kvm / lib / x86_64 / vmx.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * tools/testing/selftests/kvm/lib/x86_64/vmx.c
4  *
5  * Copyright (C) 2018, Google LLC.
6  */
7
8 #include "test_util.h"
9 #include "kvm_util.h"
10 #include "../kvm_util_internal.h"
11 #include "processor.h"
12 #include "vmx.h"
13
14 #define PAGE_SHIFT_4K  12
15
16 #define KVM_EPT_PAGE_TABLE_MIN_PADDR 0x1c0000
17
18 bool enable_evmcs;
19
20 struct hv_enlightened_vmcs *current_evmcs;
21 struct hv_vp_assist_page *current_vp_assist;
22
23 struct eptPageTableEntry {
24         uint64_t readable:1;
25         uint64_t writable:1;
26         uint64_t executable:1;
27         uint64_t memory_type:3;
28         uint64_t ignore_pat:1;
29         uint64_t page_size:1;
30         uint64_t accessed:1;
31         uint64_t dirty:1;
32         uint64_t ignored_11_10:2;
33         uint64_t address:40;
34         uint64_t ignored_62_52:11;
35         uint64_t suppress_ve:1;
36 };
37
38 struct eptPageTablePointer {
39         uint64_t memory_type:3;
40         uint64_t page_walk_length:3;
41         uint64_t ad_enabled:1;
42         uint64_t reserved_11_07:5;
43         uint64_t address:40;
44         uint64_t reserved_63_52:12;
45 };
46 int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id)
47 {
48         uint16_t evmcs_ver;
49
50         struct kvm_enable_cap enable_evmcs_cap = {
51                 .cap = KVM_CAP_HYPERV_ENLIGHTENED_VMCS,
52                  .args[0] = (unsigned long)&evmcs_ver
53         };
54
55         vcpu_ioctl(vm, vcpu_id, KVM_ENABLE_CAP, &enable_evmcs_cap);
56
57         /* KVM should return supported EVMCS version range */
58         TEST_ASSERT(((evmcs_ver >> 8) >= (evmcs_ver & 0xff)) &&
59                     (evmcs_ver & 0xff) > 0,
60                     "Incorrect EVMCS version range: %x:%x\n",
61                     evmcs_ver & 0xff, evmcs_ver >> 8);
62
63         return evmcs_ver;
64 }
65
66 /* Allocate memory regions for nested VMX tests.
67  *
68  * Input Args:
69  *   vm - The VM to allocate guest-virtual addresses in.
70  *
71  * Output Args:
72  *   p_vmx_gva - The guest virtual address for the struct vmx_pages.
73  *
74  * Return:
75  *   Pointer to structure with the addresses of the VMX areas.
76  */
77 struct vmx_pages *
78 vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva)
79 {
80         vm_vaddr_t vmx_gva = vm_vaddr_alloc_page(vm);
81         struct vmx_pages *vmx = addr_gva2hva(vm, vmx_gva);
82
83         /* Setup of a region of guest memory for the vmxon region. */
84         vmx->vmxon = (void *)vm_vaddr_alloc_page(vm);
85         vmx->vmxon_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmxon);
86         vmx->vmxon_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmxon);
87
88         /* Setup of a region of guest memory for a vmcs. */
89         vmx->vmcs = (void *)vm_vaddr_alloc_page(vm);
90         vmx->vmcs_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmcs);
91         vmx->vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmcs);
92
93         /* Setup of a region of guest memory for the MSR bitmap. */
94         vmx->msr = (void *)vm_vaddr_alloc_page(vm);
95         vmx->msr_hva = addr_gva2hva(vm, (uintptr_t)vmx->msr);
96         vmx->msr_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->msr);
97         memset(vmx->msr_hva, 0, getpagesize());
98
99         /* Setup of a region of guest memory for the shadow VMCS. */
100         vmx->shadow_vmcs = (void *)vm_vaddr_alloc_page(vm);
101         vmx->shadow_vmcs_hva = addr_gva2hva(vm, (uintptr_t)vmx->shadow_vmcs);
102         vmx->shadow_vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->shadow_vmcs);
103
104         /* Setup of a region of guest memory for the VMREAD and VMWRITE bitmaps. */
105         vmx->vmread = (void *)vm_vaddr_alloc_page(vm);
106         vmx->vmread_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmread);
107         vmx->vmread_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmread);
108         memset(vmx->vmread_hva, 0, getpagesize());
109
110         vmx->vmwrite = (void *)vm_vaddr_alloc_page(vm);
111         vmx->vmwrite_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmwrite);
112         vmx->vmwrite_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmwrite);
113         memset(vmx->vmwrite_hva, 0, getpagesize());
114
115         /* Setup of a region of guest memory for the VP Assist page. */
116         vmx->vp_assist = (void *)vm_vaddr_alloc_page(vm);
117         vmx->vp_assist_hva = addr_gva2hva(vm, (uintptr_t)vmx->vp_assist);
118         vmx->vp_assist_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vp_assist);
119
120         /* Setup of a region of guest memory for the enlightened VMCS. */
121         vmx->enlightened_vmcs = (void *)vm_vaddr_alloc_page(vm);
122         vmx->enlightened_vmcs_hva =
123                 addr_gva2hva(vm, (uintptr_t)vmx->enlightened_vmcs);
124         vmx->enlightened_vmcs_gpa =
125                 addr_gva2gpa(vm, (uintptr_t)vmx->enlightened_vmcs);
126
127         *p_vmx_gva = vmx_gva;
128         return vmx;
129 }
130
131 bool prepare_for_vmx_operation(struct vmx_pages *vmx)
132 {
133         uint64_t feature_control;
134         uint64_t required;
135         unsigned long cr0;
136         unsigned long cr4;
137
138         /*
139          * Ensure bits in CR0 and CR4 are valid in VMX operation:
140          * - Bit X is 1 in _FIXED0: bit X is fixed to 1 in CRx.
141          * - Bit X is 0 in _FIXED1: bit X is fixed to 0 in CRx.
142          */
143         __asm__ __volatile__("mov %%cr0, %0" : "=r"(cr0) : : "memory");
144         cr0 &= rdmsr(MSR_IA32_VMX_CR0_FIXED1);
145         cr0 |= rdmsr(MSR_IA32_VMX_CR0_FIXED0);
146         __asm__ __volatile__("mov %0, %%cr0" : : "r"(cr0) : "memory");
147
148         __asm__ __volatile__("mov %%cr4, %0" : "=r"(cr4) : : "memory");
149         cr4 &= rdmsr(MSR_IA32_VMX_CR4_FIXED1);
150         cr4 |= rdmsr(MSR_IA32_VMX_CR4_FIXED0);
151         /* Enable VMX operation */
152         cr4 |= X86_CR4_VMXE;
153         __asm__ __volatile__("mov %0, %%cr4" : : "r"(cr4) : "memory");
154
155         /*
156          * Configure IA32_FEATURE_CONTROL MSR to allow VMXON:
157          *  Bit 0: Lock bit. If clear, VMXON causes a #GP.
158          *  Bit 2: Enables VMXON outside of SMX operation. If clear, VMXON
159          *    outside of SMX causes a #GP.
160          */
161         required = FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX;
162         required |= FEAT_CTL_LOCKED;
163         feature_control = rdmsr(MSR_IA32_FEAT_CTL);
164         if ((feature_control & required) != required)
165                 wrmsr(MSR_IA32_FEAT_CTL, feature_control | required);
166
167         /* Enter VMX root operation. */
168         *(uint32_t *)(vmx->vmxon) = vmcs_revision();
169         if (vmxon(vmx->vmxon_gpa))
170                 return false;
171
172         return true;
173 }
174
175 bool load_vmcs(struct vmx_pages *vmx)
176 {
177         if (!enable_evmcs) {
178                 /* Load a VMCS. */
179                 *(uint32_t *)(vmx->vmcs) = vmcs_revision();
180                 if (vmclear(vmx->vmcs_gpa))
181                         return false;
182
183                 if (vmptrld(vmx->vmcs_gpa))
184                         return false;
185
186                 /* Setup shadow VMCS, do not load it yet. */
187                 *(uint32_t *)(vmx->shadow_vmcs) =
188                         vmcs_revision() | 0x80000000ul;
189                 if (vmclear(vmx->shadow_vmcs_gpa))
190                         return false;
191         } else {
192                 if (evmcs_vmptrld(vmx->enlightened_vmcs_gpa,
193                                   vmx->enlightened_vmcs))
194                         return false;
195                 current_evmcs->revision_id = EVMCS_VERSION;
196         }
197
198         return true;
199 }
200
201 static bool ept_vpid_cap_supported(uint64_t mask)
202 {
203         return rdmsr(MSR_IA32_VMX_EPT_VPID_CAP) & mask;
204 }
205
206 /*
207  * Initialize the control fields to the most basic settings possible.
208  */
209 static inline void init_vmcs_control_fields(struct vmx_pages *vmx)
210 {
211         uint32_t sec_exec_ctl = 0;
212
213         vmwrite(VIRTUAL_PROCESSOR_ID, 0);
214         vmwrite(POSTED_INTR_NV, 0);
215
216         vmwrite(PIN_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_TRUE_PINBASED_CTLS));
217
218         if (vmx->eptp_gpa) {
219                 uint64_t ept_paddr;
220                 struct eptPageTablePointer eptp = {
221                         .memory_type = VMX_BASIC_MEM_TYPE_WB,
222                         .page_walk_length = 3, /* + 1 */
223                         .ad_enabled = ept_vpid_cap_supported(VMX_EPT_VPID_CAP_AD_BITS),
224                         .address = vmx->eptp_gpa >> PAGE_SHIFT_4K,
225                 };
226
227                 memcpy(&ept_paddr, &eptp, sizeof(ept_paddr));
228                 vmwrite(EPT_POINTER, ept_paddr);
229                 sec_exec_ctl |= SECONDARY_EXEC_ENABLE_EPT;
230         }
231
232         if (!vmwrite(SECONDARY_VM_EXEC_CONTROL, sec_exec_ctl))
233                 vmwrite(CPU_BASED_VM_EXEC_CONTROL,
234                         rdmsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS) | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS);
235         else {
236                 vmwrite(CPU_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS));
237                 GUEST_ASSERT(!sec_exec_ctl);
238         }
239
240         vmwrite(EXCEPTION_BITMAP, 0);
241         vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0);
242         vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, -1); /* Never match */
243         vmwrite(CR3_TARGET_COUNT, 0);
244         vmwrite(VM_EXIT_CONTROLS, rdmsr(MSR_IA32_VMX_EXIT_CTLS) |
245                 VM_EXIT_HOST_ADDR_SPACE_SIZE);    /* 64-bit host */
246         vmwrite(VM_EXIT_MSR_STORE_COUNT, 0);
247         vmwrite(VM_EXIT_MSR_LOAD_COUNT, 0);
248         vmwrite(VM_ENTRY_CONTROLS, rdmsr(MSR_IA32_VMX_ENTRY_CTLS) |
249                 VM_ENTRY_IA32E_MODE);             /* 64-bit guest */
250         vmwrite(VM_ENTRY_MSR_LOAD_COUNT, 0);
251         vmwrite(VM_ENTRY_INTR_INFO_FIELD, 0);
252         vmwrite(TPR_THRESHOLD, 0);
253
254         vmwrite(CR0_GUEST_HOST_MASK, 0);
255         vmwrite(CR4_GUEST_HOST_MASK, 0);
256         vmwrite(CR0_READ_SHADOW, get_cr0());
257         vmwrite(CR4_READ_SHADOW, get_cr4());
258
259         vmwrite(MSR_BITMAP, vmx->msr_gpa);
260         vmwrite(VMREAD_BITMAP, vmx->vmread_gpa);
261         vmwrite(VMWRITE_BITMAP, vmx->vmwrite_gpa);
262 }
263
264 /*
265  * Initialize the host state fields based on the current host state, with
266  * the exception of HOST_RSP and HOST_RIP, which should be set by vmlaunch
267  * or vmresume.
268  */
269 static inline void init_vmcs_host_state(void)
270 {
271         uint32_t exit_controls = vmreadz(VM_EXIT_CONTROLS);
272
273         vmwrite(HOST_ES_SELECTOR, get_es());
274         vmwrite(HOST_CS_SELECTOR, get_cs());
275         vmwrite(HOST_SS_SELECTOR, get_ss());
276         vmwrite(HOST_DS_SELECTOR, get_ds());
277         vmwrite(HOST_FS_SELECTOR, get_fs());
278         vmwrite(HOST_GS_SELECTOR, get_gs());
279         vmwrite(HOST_TR_SELECTOR, get_tr());
280
281         if (exit_controls & VM_EXIT_LOAD_IA32_PAT)
282                 vmwrite(HOST_IA32_PAT, rdmsr(MSR_IA32_CR_PAT));
283         if (exit_controls & VM_EXIT_LOAD_IA32_EFER)
284                 vmwrite(HOST_IA32_EFER, rdmsr(MSR_EFER));
285         if (exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
286                 vmwrite(HOST_IA32_PERF_GLOBAL_CTRL,
287                         rdmsr(MSR_CORE_PERF_GLOBAL_CTRL));
288
289         vmwrite(HOST_IA32_SYSENTER_CS, rdmsr(MSR_IA32_SYSENTER_CS));
290
291         vmwrite(HOST_CR0, get_cr0());
292         vmwrite(HOST_CR3, get_cr3());
293         vmwrite(HOST_CR4, get_cr4());
294         vmwrite(HOST_FS_BASE, rdmsr(MSR_FS_BASE));
295         vmwrite(HOST_GS_BASE, rdmsr(MSR_GS_BASE));
296         vmwrite(HOST_TR_BASE,
297                 get_desc64_base((struct desc64 *)(get_gdt().address + get_tr())));
298         vmwrite(HOST_GDTR_BASE, get_gdt().address);
299         vmwrite(HOST_IDTR_BASE, get_idt().address);
300         vmwrite(HOST_IA32_SYSENTER_ESP, rdmsr(MSR_IA32_SYSENTER_ESP));
301         vmwrite(HOST_IA32_SYSENTER_EIP, rdmsr(MSR_IA32_SYSENTER_EIP));
302 }
303
304 /*
305  * Initialize the guest state fields essentially as a clone of
306  * the host state fields. Some host state fields have fixed
307  * values, and we set the corresponding guest state fields accordingly.
308  */
309 static inline void init_vmcs_guest_state(void *rip, void *rsp)
310 {
311         vmwrite(GUEST_ES_SELECTOR, vmreadz(HOST_ES_SELECTOR));
312         vmwrite(GUEST_CS_SELECTOR, vmreadz(HOST_CS_SELECTOR));
313         vmwrite(GUEST_SS_SELECTOR, vmreadz(HOST_SS_SELECTOR));
314         vmwrite(GUEST_DS_SELECTOR, vmreadz(HOST_DS_SELECTOR));
315         vmwrite(GUEST_FS_SELECTOR, vmreadz(HOST_FS_SELECTOR));
316         vmwrite(GUEST_GS_SELECTOR, vmreadz(HOST_GS_SELECTOR));
317         vmwrite(GUEST_LDTR_SELECTOR, 0);
318         vmwrite(GUEST_TR_SELECTOR, vmreadz(HOST_TR_SELECTOR));
319         vmwrite(GUEST_INTR_STATUS, 0);
320         vmwrite(GUEST_PML_INDEX, 0);
321
322         vmwrite(VMCS_LINK_POINTER, -1ll);
323         vmwrite(GUEST_IA32_DEBUGCTL, 0);
324         vmwrite(GUEST_IA32_PAT, vmreadz(HOST_IA32_PAT));
325         vmwrite(GUEST_IA32_EFER, vmreadz(HOST_IA32_EFER));
326         vmwrite(GUEST_IA32_PERF_GLOBAL_CTRL,
327                 vmreadz(HOST_IA32_PERF_GLOBAL_CTRL));
328
329         vmwrite(GUEST_ES_LIMIT, -1);
330         vmwrite(GUEST_CS_LIMIT, -1);
331         vmwrite(GUEST_SS_LIMIT, -1);
332         vmwrite(GUEST_DS_LIMIT, -1);
333         vmwrite(GUEST_FS_LIMIT, -1);
334         vmwrite(GUEST_GS_LIMIT, -1);
335         vmwrite(GUEST_LDTR_LIMIT, -1);
336         vmwrite(GUEST_TR_LIMIT, 0x67);
337         vmwrite(GUEST_GDTR_LIMIT, 0xffff);
338         vmwrite(GUEST_IDTR_LIMIT, 0xffff);
339         vmwrite(GUEST_ES_AR_BYTES,
340                 vmreadz(GUEST_ES_SELECTOR) == 0 ? 0x10000 : 0xc093);
341         vmwrite(GUEST_CS_AR_BYTES, 0xa09b);
342         vmwrite(GUEST_SS_AR_BYTES, 0xc093);
343         vmwrite(GUEST_DS_AR_BYTES,
344                 vmreadz(GUEST_DS_SELECTOR) == 0 ? 0x10000 : 0xc093);
345         vmwrite(GUEST_FS_AR_BYTES,
346                 vmreadz(GUEST_FS_SELECTOR) == 0 ? 0x10000 : 0xc093);
347         vmwrite(GUEST_GS_AR_BYTES,
348                 vmreadz(GUEST_GS_SELECTOR) == 0 ? 0x10000 : 0xc093);
349         vmwrite(GUEST_LDTR_AR_BYTES, 0x10000);
350         vmwrite(GUEST_TR_AR_BYTES, 0x8b);
351         vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
352         vmwrite(GUEST_ACTIVITY_STATE, 0);
353         vmwrite(GUEST_SYSENTER_CS, vmreadz(HOST_IA32_SYSENTER_CS));
354         vmwrite(VMX_PREEMPTION_TIMER_VALUE, 0);
355
356         vmwrite(GUEST_CR0, vmreadz(HOST_CR0));
357         vmwrite(GUEST_CR3, vmreadz(HOST_CR3));
358         vmwrite(GUEST_CR4, vmreadz(HOST_CR4));
359         vmwrite(GUEST_ES_BASE, 0);
360         vmwrite(GUEST_CS_BASE, 0);
361         vmwrite(GUEST_SS_BASE, 0);
362         vmwrite(GUEST_DS_BASE, 0);
363         vmwrite(GUEST_FS_BASE, vmreadz(HOST_FS_BASE));
364         vmwrite(GUEST_GS_BASE, vmreadz(HOST_GS_BASE));
365         vmwrite(GUEST_LDTR_BASE, 0);
366         vmwrite(GUEST_TR_BASE, vmreadz(HOST_TR_BASE));
367         vmwrite(GUEST_GDTR_BASE, vmreadz(HOST_GDTR_BASE));
368         vmwrite(GUEST_IDTR_BASE, vmreadz(HOST_IDTR_BASE));
369         vmwrite(GUEST_DR7, 0x400);
370         vmwrite(GUEST_RSP, (uint64_t)rsp);
371         vmwrite(GUEST_RIP, (uint64_t)rip);
372         vmwrite(GUEST_RFLAGS, 2);
373         vmwrite(GUEST_PENDING_DBG_EXCEPTIONS, 0);
374         vmwrite(GUEST_SYSENTER_ESP, vmreadz(HOST_IA32_SYSENTER_ESP));
375         vmwrite(GUEST_SYSENTER_EIP, vmreadz(HOST_IA32_SYSENTER_EIP));
376 }
377
378 void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp)
379 {
380         init_vmcs_control_fields(vmx);
381         init_vmcs_host_state();
382         init_vmcs_guest_state(guest_rip, guest_rsp);
383 }
384
385 bool nested_vmx_supported(void)
386 {
387         struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
388
389         return entry->ecx & CPUID_VMX;
390 }
391
392 void nested_vmx_check_supported(void)
393 {
394         if (!nested_vmx_supported()) {
395                 print_skip("nested VMX not enabled");
396                 exit(KSFT_SKIP);
397         }
398 }
399
400 static void nested_create_pte(struct kvm_vm *vm,
401                               struct eptPageTableEntry *pte,
402                               uint64_t nested_paddr,
403                               uint64_t paddr,
404                               int current_level,
405                               int target_level)
406 {
407         if (!pte->readable) {
408                 pte->writable = true;
409                 pte->readable = true;
410                 pte->executable = true;
411                 pte->page_size = (current_level == target_level);
412                 if (pte->page_size)
413                         pte->address = paddr >> vm->page_shift;
414                 else
415                         pte->address = vm_alloc_page_table(vm) >> vm->page_shift;
416         } else {
417                 /*
418                  * Entry already present.  Assert that the caller doesn't want
419                  * a hugepage at this level, and that there isn't a hugepage at
420                  * this level.
421                  */
422                 TEST_ASSERT(current_level != target_level,
423                             "Cannot create hugepage at level: %u, nested_paddr: 0x%lx\n",
424                             current_level, nested_paddr);
425                 TEST_ASSERT(!pte->page_size,
426                             "Cannot create page table at level: %u, nested_paddr: 0x%lx\n",
427                             current_level, nested_paddr);
428         }
429 }
430
431
432 void __nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
433                      uint64_t nested_paddr, uint64_t paddr, int target_level)
434 {
435         const uint64_t page_size = PG_LEVEL_SIZE(target_level);
436         struct eptPageTableEntry *pt = vmx->eptp_hva, *pte;
437         uint16_t index;
438
439         TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
440                     "unknown or unsupported guest mode, mode: 0x%x", vm->mode);
441
442         TEST_ASSERT((nested_paddr % page_size) == 0,
443                     "Nested physical address not on page boundary,\n"
444                     "  nested_paddr: 0x%lx page_size: 0x%lx",
445                     nested_paddr, page_size);
446         TEST_ASSERT((nested_paddr >> vm->page_shift) <= vm->max_gfn,
447                     "Physical address beyond beyond maximum supported,\n"
448                     "  nested_paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
449                     paddr, vm->max_gfn, vm->page_size);
450         TEST_ASSERT((paddr % page_size) == 0,
451                     "Physical address not on page boundary,\n"
452                     "  paddr: 0x%lx page_size: 0x%lx",
453                     paddr, page_size);
454         TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
455                     "Physical address beyond beyond maximum supported,\n"
456                     "  paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
457                     paddr, vm->max_gfn, vm->page_size);
458
459         for (int level = PG_LEVEL_512G; level >= PG_LEVEL_4K; level--) {
460                 index = (nested_paddr >> PG_LEVEL_SHIFT(level)) & 0x1ffu;
461                 pte = &pt[index];
462
463                 nested_create_pte(vm, pte, nested_paddr, paddr, level, target_level);
464
465                 if (pte->page_size)
466                         break;
467
468                 pt = addr_gpa2hva(vm, pte->address * vm->page_size);
469         }
470
471         /*
472          * For now mark these as accessed and dirty because the only
473          * testcase we have needs that.  Can be reconsidered later.
474          */
475         pte->accessed = true;
476         pte->dirty = true;
477
478 }
479
480 void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
481                    uint64_t nested_paddr, uint64_t paddr)
482 {
483         __nested_pg_map(vmx, vm, nested_paddr, paddr, PG_LEVEL_4K);
484 }
485
486 /*
487  * Map a range of EPT guest physical addresses to the VM's physical address
488  *
489  * Input Args:
490  *   vm - Virtual Machine
491  *   nested_paddr - Nested guest physical address to map
492  *   paddr - VM Physical Address
493  *   size - The size of the range to map
494  *   level - The level at which to map the range
495  *
496  * Output Args: None
497  *
498  * Return: None
499  *
500  * Within the VM given by vm, creates a nested guest translation for the
501  * page range starting at nested_paddr to the page range starting at paddr.
502  */
503 void __nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
504                   uint64_t nested_paddr, uint64_t paddr, uint64_t size,
505                   int level)
506 {
507         size_t page_size = PG_LEVEL_SIZE(level);
508         size_t npages = size / page_size;
509
510         TEST_ASSERT(nested_paddr + size > nested_paddr, "Vaddr overflow");
511         TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
512
513         while (npages--) {
514                 __nested_pg_map(vmx, vm, nested_paddr, paddr, level);
515                 nested_paddr += page_size;
516                 paddr += page_size;
517         }
518 }
519
520 void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
521                 uint64_t nested_paddr, uint64_t paddr, uint64_t size)
522 {
523         __nested_map(vmx, vm, nested_paddr, paddr, size, PG_LEVEL_4K);
524 }
525
526 /* Prepare an identity extended page table that maps all the
527  * physical pages in VM.
528  */
529 void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm,
530                         uint32_t memslot)
531 {
532         sparsebit_idx_t i, last;
533         struct userspace_mem_region *region =
534                 memslot2region(vm, memslot);
535
536         i = (region->region.guest_phys_addr >> vm->page_shift) - 1;
537         last = i + (region->region.memory_size >> vm->page_shift);
538         for (;;) {
539                 i = sparsebit_next_clear(region->unused_phy_pages, i);
540                 if (i > last)
541                         break;
542
543                 nested_map(vmx, vm,
544                            (uint64_t)i << vm->page_shift,
545                            (uint64_t)i << vm->page_shift,
546                            1 << vm->page_shift);
547         }
548 }
549
550 void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
551                   uint32_t eptp_memslot)
552 {
553         vmx->eptp = (void *)vm_vaddr_alloc_page(vm);
554         vmx->eptp_hva = addr_gva2hva(vm, (uintptr_t)vmx->eptp);
555         vmx->eptp_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->eptp);
556 }
557
558 void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm)
559 {
560         vmx->apic_access = (void *)vm_vaddr_alloc_page(vm);
561         vmx->apic_access_hva = addr_gva2hva(vm, (uintptr_t)vmx->apic_access);
562         vmx->apic_access_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->apic_access);
563 }