Merge tag 'staging-5.19-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh...
[platform/kernel/linux-starfive.git] / tools / testing / selftests / kvm / lib / x86_64 / vmx.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * tools/testing/selftests/kvm/lib/x86_64/vmx.c
4  *
5  * Copyright (C) 2018, Google LLC.
6  */
7
8 #include "test_util.h"
9 #include "kvm_util.h"
10 #include "../kvm_util_internal.h"
11 #include "processor.h"
12 #include "vmx.h"
13
14 #define PAGE_SHIFT_4K  12
15
16 #define KVM_EPT_PAGE_TABLE_MIN_PADDR 0x1c0000
17
18 bool enable_evmcs;
19
20 struct hv_enlightened_vmcs *current_evmcs;
21 struct hv_vp_assist_page *current_vp_assist;
22
23 struct eptPageTableEntry {
24         uint64_t readable:1;
25         uint64_t writable:1;
26         uint64_t executable:1;
27         uint64_t memory_type:3;
28         uint64_t ignore_pat:1;
29         uint64_t page_size:1;
30         uint64_t accessed:1;
31         uint64_t dirty:1;
32         uint64_t ignored_11_10:2;
33         uint64_t address:40;
34         uint64_t ignored_62_52:11;
35         uint64_t suppress_ve:1;
36 };
37
38 struct eptPageTablePointer {
39         uint64_t memory_type:3;
40         uint64_t page_walk_length:3;
41         uint64_t ad_enabled:1;
42         uint64_t reserved_11_07:5;
43         uint64_t address:40;
44         uint64_t reserved_63_52:12;
45 };
46 int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id)
47 {
48         uint16_t evmcs_ver;
49
50         struct kvm_enable_cap enable_evmcs_cap = {
51                 .cap = KVM_CAP_HYPERV_ENLIGHTENED_VMCS,
52                  .args[0] = (unsigned long)&evmcs_ver
53         };
54
55         vcpu_ioctl(vm, vcpu_id, KVM_ENABLE_CAP, &enable_evmcs_cap);
56
57         /* KVM should return supported EVMCS version range */
58         TEST_ASSERT(((evmcs_ver >> 8) >= (evmcs_ver & 0xff)) &&
59                     (evmcs_ver & 0xff) > 0,
60                     "Incorrect EVMCS version range: %x:%x\n",
61                     evmcs_ver & 0xff, evmcs_ver >> 8);
62
63         return evmcs_ver;
64 }
65
66 /* Allocate memory regions for nested VMX tests.
67  *
68  * Input Args:
69  *   vm - The VM to allocate guest-virtual addresses in.
70  *
71  * Output Args:
72  *   p_vmx_gva - The guest virtual address for the struct vmx_pages.
73  *
74  * Return:
75  *   Pointer to structure with the addresses of the VMX areas.
76  */
77 struct vmx_pages *
78 vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva)
79 {
80         vm_vaddr_t vmx_gva = vm_vaddr_alloc_page(vm);
81         struct vmx_pages *vmx = addr_gva2hva(vm, vmx_gva);
82
83         /* Setup of a region of guest memory for the vmxon region. */
84         vmx->vmxon = (void *)vm_vaddr_alloc_page(vm);
85         vmx->vmxon_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmxon);
86         vmx->vmxon_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmxon);
87
88         /* Setup of a region of guest memory for a vmcs. */
89         vmx->vmcs = (void *)vm_vaddr_alloc_page(vm);
90         vmx->vmcs_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmcs);
91         vmx->vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmcs);
92
93         /* Setup of a region of guest memory for the MSR bitmap. */
94         vmx->msr = (void *)vm_vaddr_alloc_page(vm);
95         vmx->msr_hva = addr_gva2hva(vm, (uintptr_t)vmx->msr);
96         vmx->msr_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->msr);
97         memset(vmx->msr_hva, 0, getpagesize());
98
99         /* Setup of a region of guest memory for the shadow VMCS. */
100         vmx->shadow_vmcs = (void *)vm_vaddr_alloc_page(vm);
101         vmx->shadow_vmcs_hva = addr_gva2hva(vm, (uintptr_t)vmx->shadow_vmcs);
102         vmx->shadow_vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->shadow_vmcs);
103
104         /* Setup of a region of guest memory for the VMREAD and VMWRITE bitmaps. */
105         vmx->vmread = (void *)vm_vaddr_alloc_page(vm);
106         vmx->vmread_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmread);
107         vmx->vmread_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmread);
108         memset(vmx->vmread_hva, 0, getpagesize());
109
110         vmx->vmwrite = (void *)vm_vaddr_alloc_page(vm);
111         vmx->vmwrite_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmwrite);
112         vmx->vmwrite_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmwrite);
113         memset(vmx->vmwrite_hva, 0, getpagesize());
114
115         /* Setup of a region of guest memory for the VP Assist page. */
116         vmx->vp_assist = (void *)vm_vaddr_alloc_page(vm);
117         vmx->vp_assist_hva = addr_gva2hva(vm, (uintptr_t)vmx->vp_assist);
118         vmx->vp_assist_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vp_assist);
119
120         /* Setup of a region of guest memory for the enlightened VMCS. */
121         vmx->enlightened_vmcs = (void *)vm_vaddr_alloc_page(vm);
122         vmx->enlightened_vmcs_hva =
123                 addr_gva2hva(vm, (uintptr_t)vmx->enlightened_vmcs);
124         vmx->enlightened_vmcs_gpa =
125                 addr_gva2gpa(vm, (uintptr_t)vmx->enlightened_vmcs);
126
127         *p_vmx_gva = vmx_gva;
128         return vmx;
129 }
130
131 bool prepare_for_vmx_operation(struct vmx_pages *vmx)
132 {
133         uint64_t feature_control;
134         uint64_t required;
135         unsigned long cr0;
136         unsigned long cr4;
137
138         /*
139          * Ensure bits in CR0 and CR4 are valid in VMX operation:
140          * - Bit X is 1 in _FIXED0: bit X is fixed to 1 in CRx.
141          * - Bit X is 0 in _FIXED1: bit X is fixed to 0 in CRx.
142          */
143         __asm__ __volatile__("mov %%cr0, %0" : "=r"(cr0) : : "memory");
144         cr0 &= rdmsr(MSR_IA32_VMX_CR0_FIXED1);
145         cr0 |= rdmsr(MSR_IA32_VMX_CR0_FIXED0);
146         __asm__ __volatile__("mov %0, %%cr0" : : "r"(cr0) : "memory");
147
148         __asm__ __volatile__("mov %%cr4, %0" : "=r"(cr4) : : "memory");
149         cr4 &= rdmsr(MSR_IA32_VMX_CR4_FIXED1);
150         cr4 |= rdmsr(MSR_IA32_VMX_CR4_FIXED0);
151         /* Enable VMX operation */
152         cr4 |= X86_CR4_VMXE;
153         __asm__ __volatile__("mov %0, %%cr4" : : "r"(cr4) : "memory");
154
155         /*
156          * Configure IA32_FEATURE_CONTROL MSR to allow VMXON:
157          *  Bit 0: Lock bit. If clear, VMXON causes a #GP.
158          *  Bit 2: Enables VMXON outside of SMX operation. If clear, VMXON
159          *    outside of SMX causes a #GP.
160          */
161         required = FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX;
162         required |= FEAT_CTL_LOCKED;
163         feature_control = rdmsr(MSR_IA32_FEAT_CTL);
164         if ((feature_control & required) != required)
165                 wrmsr(MSR_IA32_FEAT_CTL, feature_control | required);
166
167         /* Enter VMX root operation. */
168         *(uint32_t *)(vmx->vmxon) = vmcs_revision();
169         if (vmxon(vmx->vmxon_gpa))
170                 return false;
171
172         return true;
173 }
174
175 bool load_vmcs(struct vmx_pages *vmx)
176 {
177         if (!enable_evmcs) {
178                 /* Load a VMCS. */
179                 *(uint32_t *)(vmx->vmcs) = vmcs_revision();
180                 if (vmclear(vmx->vmcs_gpa))
181                         return false;
182
183                 if (vmptrld(vmx->vmcs_gpa))
184                         return false;
185
186                 /* Setup shadow VMCS, do not load it yet. */
187                 *(uint32_t *)(vmx->shadow_vmcs) =
188                         vmcs_revision() | 0x80000000ul;
189                 if (vmclear(vmx->shadow_vmcs_gpa))
190                         return false;
191         } else {
192                 if (evmcs_vmptrld(vmx->enlightened_vmcs_gpa,
193                                   vmx->enlightened_vmcs))
194                         return false;
195                 current_evmcs->revision_id = EVMCS_VERSION;
196         }
197
198         return true;
199 }
200
201 static bool ept_vpid_cap_supported(uint64_t mask)
202 {
203         return rdmsr(MSR_IA32_VMX_EPT_VPID_CAP) & mask;
204 }
205
206 bool ept_1g_pages_supported(void)
207 {
208         return ept_vpid_cap_supported(VMX_EPT_VPID_CAP_1G_PAGES);
209 }
210
211 /*
212  * Initialize the control fields to the most basic settings possible.
213  */
214 static inline void init_vmcs_control_fields(struct vmx_pages *vmx)
215 {
216         uint32_t sec_exec_ctl = 0;
217
218         vmwrite(VIRTUAL_PROCESSOR_ID, 0);
219         vmwrite(POSTED_INTR_NV, 0);
220
221         vmwrite(PIN_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_TRUE_PINBASED_CTLS));
222
223         if (vmx->eptp_gpa) {
224                 uint64_t ept_paddr;
225                 struct eptPageTablePointer eptp = {
226                         .memory_type = VMX_BASIC_MEM_TYPE_WB,
227                         .page_walk_length = 3, /* + 1 */
228                         .ad_enabled = ept_vpid_cap_supported(VMX_EPT_VPID_CAP_AD_BITS),
229                         .address = vmx->eptp_gpa >> PAGE_SHIFT_4K,
230                 };
231
232                 memcpy(&ept_paddr, &eptp, sizeof(ept_paddr));
233                 vmwrite(EPT_POINTER, ept_paddr);
234                 sec_exec_ctl |= SECONDARY_EXEC_ENABLE_EPT;
235         }
236
237         if (!vmwrite(SECONDARY_VM_EXEC_CONTROL, sec_exec_ctl))
238                 vmwrite(CPU_BASED_VM_EXEC_CONTROL,
239                         rdmsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS) | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS);
240         else {
241                 vmwrite(CPU_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS));
242                 GUEST_ASSERT(!sec_exec_ctl);
243         }
244
245         vmwrite(EXCEPTION_BITMAP, 0);
246         vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0);
247         vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, -1); /* Never match */
248         vmwrite(CR3_TARGET_COUNT, 0);
249         vmwrite(VM_EXIT_CONTROLS, rdmsr(MSR_IA32_VMX_EXIT_CTLS) |
250                 VM_EXIT_HOST_ADDR_SPACE_SIZE);    /* 64-bit host */
251         vmwrite(VM_EXIT_MSR_STORE_COUNT, 0);
252         vmwrite(VM_EXIT_MSR_LOAD_COUNT, 0);
253         vmwrite(VM_ENTRY_CONTROLS, rdmsr(MSR_IA32_VMX_ENTRY_CTLS) |
254                 VM_ENTRY_IA32E_MODE);             /* 64-bit guest */
255         vmwrite(VM_ENTRY_MSR_LOAD_COUNT, 0);
256         vmwrite(VM_ENTRY_INTR_INFO_FIELD, 0);
257         vmwrite(TPR_THRESHOLD, 0);
258
259         vmwrite(CR0_GUEST_HOST_MASK, 0);
260         vmwrite(CR4_GUEST_HOST_MASK, 0);
261         vmwrite(CR0_READ_SHADOW, get_cr0());
262         vmwrite(CR4_READ_SHADOW, get_cr4());
263
264         vmwrite(MSR_BITMAP, vmx->msr_gpa);
265         vmwrite(VMREAD_BITMAP, vmx->vmread_gpa);
266         vmwrite(VMWRITE_BITMAP, vmx->vmwrite_gpa);
267 }
268
269 /*
270  * Initialize the host state fields based on the current host state, with
271  * the exception of HOST_RSP and HOST_RIP, which should be set by vmlaunch
272  * or vmresume.
273  */
274 static inline void init_vmcs_host_state(void)
275 {
276         uint32_t exit_controls = vmreadz(VM_EXIT_CONTROLS);
277
278         vmwrite(HOST_ES_SELECTOR, get_es());
279         vmwrite(HOST_CS_SELECTOR, get_cs());
280         vmwrite(HOST_SS_SELECTOR, get_ss());
281         vmwrite(HOST_DS_SELECTOR, get_ds());
282         vmwrite(HOST_FS_SELECTOR, get_fs());
283         vmwrite(HOST_GS_SELECTOR, get_gs());
284         vmwrite(HOST_TR_SELECTOR, get_tr());
285
286         if (exit_controls & VM_EXIT_LOAD_IA32_PAT)
287                 vmwrite(HOST_IA32_PAT, rdmsr(MSR_IA32_CR_PAT));
288         if (exit_controls & VM_EXIT_LOAD_IA32_EFER)
289                 vmwrite(HOST_IA32_EFER, rdmsr(MSR_EFER));
290         if (exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
291                 vmwrite(HOST_IA32_PERF_GLOBAL_CTRL,
292                         rdmsr(MSR_CORE_PERF_GLOBAL_CTRL));
293
294         vmwrite(HOST_IA32_SYSENTER_CS, rdmsr(MSR_IA32_SYSENTER_CS));
295
296         vmwrite(HOST_CR0, get_cr0());
297         vmwrite(HOST_CR3, get_cr3());
298         vmwrite(HOST_CR4, get_cr4());
299         vmwrite(HOST_FS_BASE, rdmsr(MSR_FS_BASE));
300         vmwrite(HOST_GS_BASE, rdmsr(MSR_GS_BASE));
301         vmwrite(HOST_TR_BASE,
302                 get_desc64_base((struct desc64 *)(get_gdt().address + get_tr())));
303         vmwrite(HOST_GDTR_BASE, get_gdt().address);
304         vmwrite(HOST_IDTR_BASE, get_idt().address);
305         vmwrite(HOST_IA32_SYSENTER_ESP, rdmsr(MSR_IA32_SYSENTER_ESP));
306         vmwrite(HOST_IA32_SYSENTER_EIP, rdmsr(MSR_IA32_SYSENTER_EIP));
307 }
308
309 /*
310  * Initialize the guest state fields essentially as a clone of
311  * the host state fields. Some host state fields have fixed
312  * values, and we set the corresponding guest state fields accordingly.
313  */
314 static inline void init_vmcs_guest_state(void *rip, void *rsp)
315 {
316         vmwrite(GUEST_ES_SELECTOR, vmreadz(HOST_ES_SELECTOR));
317         vmwrite(GUEST_CS_SELECTOR, vmreadz(HOST_CS_SELECTOR));
318         vmwrite(GUEST_SS_SELECTOR, vmreadz(HOST_SS_SELECTOR));
319         vmwrite(GUEST_DS_SELECTOR, vmreadz(HOST_DS_SELECTOR));
320         vmwrite(GUEST_FS_SELECTOR, vmreadz(HOST_FS_SELECTOR));
321         vmwrite(GUEST_GS_SELECTOR, vmreadz(HOST_GS_SELECTOR));
322         vmwrite(GUEST_LDTR_SELECTOR, 0);
323         vmwrite(GUEST_TR_SELECTOR, vmreadz(HOST_TR_SELECTOR));
324         vmwrite(GUEST_INTR_STATUS, 0);
325         vmwrite(GUEST_PML_INDEX, 0);
326
327         vmwrite(VMCS_LINK_POINTER, -1ll);
328         vmwrite(GUEST_IA32_DEBUGCTL, 0);
329         vmwrite(GUEST_IA32_PAT, vmreadz(HOST_IA32_PAT));
330         vmwrite(GUEST_IA32_EFER, vmreadz(HOST_IA32_EFER));
331         vmwrite(GUEST_IA32_PERF_GLOBAL_CTRL,
332                 vmreadz(HOST_IA32_PERF_GLOBAL_CTRL));
333
334         vmwrite(GUEST_ES_LIMIT, -1);
335         vmwrite(GUEST_CS_LIMIT, -1);
336         vmwrite(GUEST_SS_LIMIT, -1);
337         vmwrite(GUEST_DS_LIMIT, -1);
338         vmwrite(GUEST_FS_LIMIT, -1);
339         vmwrite(GUEST_GS_LIMIT, -1);
340         vmwrite(GUEST_LDTR_LIMIT, -1);
341         vmwrite(GUEST_TR_LIMIT, 0x67);
342         vmwrite(GUEST_GDTR_LIMIT, 0xffff);
343         vmwrite(GUEST_IDTR_LIMIT, 0xffff);
344         vmwrite(GUEST_ES_AR_BYTES,
345                 vmreadz(GUEST_ES_SELECTOR) == 0 ? 0x10000 : 0xc093);
346         vmwrite(GUEST_CS_AR_BYTES, 0xa09b);
347         vmwrite(GUEST_SS_AR_BYTES, 0xc093);
348         vmwrite(GUEST_DS_AR_BYTES,
349                 vmreadz(GUEST_DS_SELECTOR) == 0 ? 0x10000 : 0xc093);
350         vmwrite(GUEST_FS_AR_BYTES,
351                 vmreadz(GUEST_FS_SELECTOR) == 0 ? 0x10000 : 0xc093);
352         vmwrite(GUEST_GS_AR_BYTES,
353                 vmreadz(GUEST_GS_SELECTOR) == 0 ? 0x10000 : 0xc093);
354         vmwrite(GUEST_LDTR_AR_BYTES, 0x10000);
355         vmwrite(GUEST_TR_AR_BYTES, 0x8b);
356         vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
357         vmwrite(GUEST_ACTIVITY_STATE, 0);
358         vmwrite(GUEST_SYSENTER_CS, vmreadz(HOST_IA32_SYSENTER_CS));
359         vmwrite(VMX_PREEMPTION_TIMER_VALUE, 0);
360
361         vmwrite(GUEST_CR0, vmreadz(HOST_CR0));
362         vmwrite(GUEST_CR3, vmreadz(HOST_CR3));
363         vmwrite(GUEST_CR4, vmreadz(HOST_CR4));
364         vmwrite(GUEST_ES_BASE, 0);
365         vmwrite(GUEST_CS_BASE, 0);
366         vmwrite(GUEST_SS_BASE, 0);
367         vmwrite(GUEST_DS_BASE, 0);
368         vmwrite(GUEST_FS_BASE, vmreadz(HOST_FS_BASE));
369         vmwrite(GUEST_GS_BASE, vmreadz(HOST_GS_BASE));
370         vmwrite(GUEST_LDTR_BASE, 0);
371         vmwrite(GUEST_TR_BASE, vmreadz(HOST_TR_BASE));
372         vmwrite(GUEST_GDTR_BASE, vmreadz(HOST_GDTR_BASE));
373         vmwrite(GUEST_IDTR_BASE, vmreadz(HOST_IDTR_BASE));
374         vmwrite(GUEST_DR7, 0x400);
375         vmwrite(GUEST_RSP, (uint64_t)rsp);
376         vmwrite(GUEST_RIP, (uint64_t)rip);
377         vmwrite(GUEST_RFLAGS, 2);
378         vmwrite(GUEST_PENDING_DBG_EXCEPTIONS, 0);
379         vmwrite(GUEST_SYSENTER_ESP, vmreadz(HOST_IA32_SYSENTER_ESP));
380         vmwrite(GUEST_SYSENTER_EIP, vmreadz(HOST_IA32_SYSENTER_EIP));
381 }
382
383 void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp)
384 {
385         init_vmcs_control_fields(vmx);
386         init_vmcs_host_state();
387         init_vmcs_guest_state(guest_rip, guest_rsp);
388 }
389
390 bool nested_vmx_supported(void)
391 {
392         struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
393
394         return entry->ecx & CPUID_VMX;
395 }
396
397 void nested_vmx_check_supported(void)
398 {
399         if (!nested_vmx_supported()) {
400                 print_skip("nested VMX not enabled");
401                 exit(KSFT_SKIP);
402         }
403 }
404
405 static void nested_create_pte(struct kvm_vm *vm,
406                               struct eptPageTableEntry *pte,
407                               uint64_t nested_paddr,
408                               uint64_t paddr,
409                               int current_level,
410                               int target_level)
411 {
412         if (!pte->readable) {
413                 pte->writable = true;
414                 pte->readable = true;
415                 pte->executable = true;
416                 pte->page_size = (current_level == target_level);
417                 if (pte->page_size)
418                         pte->address = paddr >> vm->page_shift;
419                 else
420                         pte->address = vm_alloc_page_table(vm) >> vm->page_shift;
421         } else {
422                 /*
423                  * Entry already present.  Assert that the caller doesn't want
424                  * a hugepage at this level, and that there isn't a hugepage at
425                  * this level.
426                  */
427                 TEST_ASSERT(current_level != target_level,
428                             "Cannot create hugepage at level: %u, nested_paddr: 0x%lx\n",
429                             current_level, nested_paddr);
430                 TEST_ASSERT(!pte->page_size,
431                             "Cannot create page table at level: %u, nested_paddr: 0x%lx\n",
432                             current_level, nested_paddr);
433         }
434 }
435
436
437 void __nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
438                      uint64_t nested_paddr, uint64_t paddr, int target_level)
439 {
440         const uint64_t page_size = PG_LEVEL_SIZE(target_level);
441         struct eptPageTableEntry *pt = vmx->eptp_hva, *pte;
442         uint16_t index;
443
444         TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
445                     "unknown or unsupported guest mode, mode: 0x%x", vm->mode);
446
447         TEST_ASSERT((nested_paddr >> 48) == 0,
448                     "Nested physical address 0x%lx requires 5-level paging",
449                     nested_paddr);
450         TEST_ASSERT((nested_paddr % page_size) == 0,
451                     "Nested physical address not on page boundary,\n"
452                     "  nested_paddr: 0x%lx page_size: 0x%lx",
453                     nested_paddr, page_size);
454         TEST_ASSERT((nested_paddr >> vm->page_shift) <= vm->max_gfn,
455                     "Physical address beyond beyond maximum supported,\n"
456                     "  nested_paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
457                     paddr, vm->max_gfn, vm->page_size);
458         TEST_ASSERT((paddr % page_size) == 0,
459                     "Physical address not on page boundary,\n"
460                     "  paddr: 0x%lx page_size: 0x%lx",
461                     paddr, page_size);
462         TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
463                     "Physical address beyond beyond maximum supported,\n"
464                     "  paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
465                     paddr, vm->max_gfn, vm->page_size);
466
467         for (int level = PG_LEVEL_512G; level >= PG_LEVEL_4K; level--) {
468                 index = (nested_paddr >> PG_LEVEL_SHIFT(level)) & 0x1ffu;
469                 pte = &pt[index];
470
471                 nested_create_pte(vm, pte, nested_paddr, paddr, level, target_level);
472
473                 if (pte->page_size)
474                         break;
475
476                 pt = addr_gpa2hva(vm, pte->address * vm->page_size);
477         }
478
479         /*
480          * For now mark these as accessed and dirty because the only
481          * testcase we have needs that.  Can be reconsidered later.
482          */
483         pte->accessed = true;
484         pte->dirty = true;
485
486 }
487
488 void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
489                    uint64_t nested_paddr, uint64_t paddr)
490 {
491         __nested_pg_map(vmx, vm, nested_paddr, paddr, PG_LEVEL_4K);
492 }
493
494 /*
495  * Map a range of EPT guest physical addresses to the VM's physical address
496  *
497  * Input Args:
498  *   vm - Virtual Machine
499  *   nested_paddr - Nested guest physical address to map
500  *   paddr - VM Physical Address
501  *   size - The size of the range to map
502  *   level - The level at which to map the range
503  *
504  * Output Args: None
505  *
506  * Return: None
507  *
508  * Within the VM given by vm, creates a nested guest translation for the
509  * page range starting at nested_paddr to the page range starting at paddr.
510  */
511 void __nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
512                   uint64_t nested_paddr, uint64_t paddr, uint64_t size,
513                   int level)
514 {
515         size_t page_size = PG_LEVEL_SIZE(level);
516         size_t npages = size / page_size;
517
518         TEST_ASSERT(nested_paddr + size > nested_paddr, "Vaddr overflow");
519         TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
520
521         while (npages--) {
522                 __nested_pg_map(vmx, vm, nested_paddr, paddr, level);
523                 nested_paddr += page_size;
524                 paddr += page_size;
525         }
526 }
527
528 void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
529                 uint64_t nested_paddr, uint64_t paddr, uint64_t size)
530 {
531         __nested_map(vmx, vm, nested_paddr, paddr, size, PG_LEVEL_4K);
532 }
533
534 /* Prepare an identity extended page table that maps all the
535  * physical pages in VM.
536  */
537 void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm,
538                         uint32_t memslot)
539 {
540         sparsebit_idx_t i, last;
541         struct userspace_mem_region *region =
542                 memslot2region(vm, memslot);
543
544         i = (region->region.guest_phys_addr >> vm->page_shift) - 1;
545         last = i + (region->region.memory_size >> vm->page_shift);
546         for (;;) {
547                 i = sparsebit_next_clear(region->unused_phy_pages, i);
548                 if (i > last)
549                         break;
550
551                 nested_map(vmx, vm,
552                            (uint64_t)i << vm->page_shift,
553                            (uint64_t)i << vm->page_shift,
554                            1 << vm->page_shift);
555         }
556 }
557
558 /* Identity map a region with 1GiB Pages. */
559 void nested_identity_map_1g(struct vmx_pages *vmx, struct kvm_vm *vm,
560                             uint64_t addr, uint64_t size)
561 {
562         __nested_map(vmx, vm, addr, addr, size, PG_LEVEL_1G);
563 }
564
565 void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
566                   uint32_t eptp_memslot)
567 {
568         vmx->eptp = (void *)vm_vaddr_alloc_page(vm);
569         vmx->eptp_hva = addr_gva2hva(vm, (uintptr_t)vmx->eptp);
570         vmx->eptp_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->eptp);
571 }
572
573 void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm)
574 {
575         vmx->apic_access = (void *)vm_vaddr_alloc_page(vm);
576         vmx->apic_access_hva = addr_gva2hva(vm, (uintptr_t)vmx->apic_access);
577         vmx->apic_access_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->apic_access);
578 }