KVM: arm64: Rework CPTR_EL2 programming for HVHE configuration
[platform/kernel/linux-starfive.git] / arch / arm64 / kvm / hyp / nvhe / pkvm.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2021 Google LLC
4  * Author: Fuad Tabba <tabba@google.com>
5  */
6
7 #include <linux/kvm_host.h>
8 #include <linux/mm.h>
9 #include <nvhe/fixed_config.h>
10 #include <nvhe/mem_protect.h>
11 #include <nvhe/memory.h>
12 #include <nvhe/pkvm.h>
13 #include <nvhe/trap_handler.h>
14
15 /* Used by icache_is_vpipt(). */
16 unsigned long __icache_flags;
17
18 /* Used by kvm_get_vttbr(). */
19 unsigned int kvm_arm_vmid_bits;
20
21 /*
22  * Set trap register values based on features in ID_AA64PFR0.
23  */
24 static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
25 {
26         const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR0_EL1);
27         u64 hcr_set = HCR_RW;
28         u64 hcr_clear = 0;
29         u64 cptr_set = 0;
30         u64 cptr_clear = 0;
31
32         /* Protected KVM does not support AArch32 guests. */
33         BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0),
34                 PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
35         BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1),
36                 PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
37
38         /*
39          * Linux guests assume support for floating-point and Advanced SIMD. Do
40          * not change the trapping behavior for these from the KVM default.
41          */
42         BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP),
43                                 PVM_ID_AA64PFR0_ALLOW));
44         BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD),
45                                 PVM_ID_AA64PFR0_ALLOW));
46
47         /* Trap RAS unless all current versions are supported */
48         if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_RAS), feature_ids) <
49             ID_AA64PFR0_EL1_RAS_V1P1) {
50                 hcr_set |= HCR_TERR | HCR_TEA;
51                 hcr_clear |= HCR_FIEN;
52         }
53
54         /* Trap AMU */
55         if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU), feature_ids)) {
56                 hcr_clear |= HCR_AMVOFFEN;
57                 cptr_set |= CPTR_EL2_TAM;
58         }
59
60         /* Trap SVE */
61         if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids)) {
62                 if (has_hvhe())
63                         cptr_clear |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN;
64                 else
65                         cptr_set |= CPTR_EL2_TZ;
66         }
67
68         vcpu->arch.hcr_el2 |= hcr_set;
69         vcpu->arch.hcr_el2 &= ~hcr_clear;
70         vcpu->arch.cptr_el2 |= cptr_set;
71         vcpu->arch.cptr_el2 &= ~cptr_clear;
72 }
73
74 /*
75  * Set trap register values based on features in ID_AA64PFR1.
76  */
77 static void pvm_init_traps_aa64pfr1(struct kvm_vcpu *vcpu)
78 {
79         const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR1_EL1);
80         u64 hcr_set = 0;
81         u64 hcr_clear = 0;
82
83         /* Memory Tagging: Trap and Treat as Untagged if not supported. */
84         if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE), feature_ids)) {
85                 hcr_set |= HCR_TID5;
86                 hcr_clear |= HCR_DCT | HCR_ATA;
87         }
88
89         vcpu->arch.hcr_el2 |= hcr_set;
90         vcpu->arch.hcr_el2 &= ~hcr_clear;
91 }
92
93 /*
94  * Set trap register values based on features in ID_AA64DFR0.
95  */
96 static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
97 {
98         const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64DFR0_EL1);
99         u64 mdcr_set = 0;
100         u64 mdcr_clear = 0;
101         u64 cptr_set = 0;
102
103         /* Trap/constrain PMU */
104         if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), feature_ids)) {
105                 mdcr_set |= MDCR_EL2_TPM | MDCR_EL2_TPMCR;
106                 mdcr_clear |= MDCR_EL2_HPME | MDCR_EL2_MTPME |
107                               MDCR_EL2_HPMN_MASK;
108         }
109
110         /* Trap Debug */
111         if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), feature_ids))
112                 mdcr_set |= MDCR_EL2_TDRA | MDCR_EL2_TDA | MDCR_EL2_TDE;
113
114         /* Trap OS Double Lock */
115         if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DoubleLock), feature_ids))
116                 mdcr_set |= MDCR_EL2_TDOSA;
117
118         /* Trap SPE */
119         if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer), feature_ids)) {
120                 mdcr_set |= MDCR_EL2_TPMS;
121                 mdcr_clear |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
122         }
123
124         /* Trap Trace Filter */
125         if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceFilt), feature_ids))
126                 mdcr_set |= MDCR_EL2_TTRF;
127
128         /* Trap Trace */
129         if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceVer), feature_ids)) {
130                 if (has_hvhe())
131                         cptr_set |= CPACR_EL1_TTA;
132                 else
133                         cptr_set |= CPTR_EL2_TTA;
134         }
135
136         vcpu->arch.mdcr_el2 |= mdcr_set;
137         vcpu->arch.mdcr_el2 &= ~mdcr_clear;
138         vcpu->arch.cptr_el2 |= cptr_set;
139 }
140
141 /*
142  * Set trap register values based on features in ID_AA64MMFR0.
143  */
144 static void pvm_init_traps_aa64mmfr0(struct kvm_vcpu *vcpu)
145 {
146         const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64MMFR0_EL1);
147         u64 mdcr_set = 0;
148
149         /* Trap Debug Communications Channel registers */
150         if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_FGT), feature_ids))
151                 mdcr_set |= MDCR_EL2_TDCC;
152
153         vcpu->arch.mdcr_el2 |= mdcr_set;
154 }
155
156 /*
157  * Set trap register values based on features in ID_AA64MMFR1.
158  */
159 static void pvm_init_traps_aa64mmfr1(struct kvm_vcpu *vcpu)
160 {
161         const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64MMFR1_EL1);
162         u64 hcr_set = 0;
163
164         /* Trap LOR */
165         if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_LO), feature_ids))
166                 hcr_set |= HCR_TLOR;
167
168         vcpu->arch.hcr_el2 |= hcr_set;
169 }
170
171 /*
172  * Set baseline trap register values.
173  */
174 static void pvm_init_trap_regs(struct kvm_vcpu *vcpu)
175 {
176         const u64 hcr_trap_feat_regs = HCR_TID3;
177         const u64 hcr_trap_impdef = HCR_TACR | HCR_TIDCP | HCR_TID1;
178
179         /*
180          * Always trap:
181          * - Feature id registers: to control features exposed to guests
182          * - Implementation-defined features
183          */
184         vcpu->arch.hcr_el2 |= hcr_trap_feat_regs | hcr_trap_impdef;
185
186         /* Clear res0 and set res1 bits to trap potential new features. */
187         vcpu->arch.hcr_el2 &= ~(HCR_RES0);
188         vcpu->arch.mdcr_el2 &= ~(MDCR_EL2_RES0);
189         if (!has_hvhe()) {
190                 vcpu->arch.cptr_el2 |= CPTR_NVHE_EL2_RES1;
191                 vcpu->arch.cptr_el2 &= ~(CPTR_NVHE_EL2_RES0);
192         }
193 }
194
195 /*
196  * Initialize trap register values for protected VMs.
197  */
198 void __pkvm_vcpu_init_traps(struct kvm_vcpu *vcpu)
199 {
200         pvm_init_trap_regs(vcpu);
201         pvm_init_traps_aa64pfr0(vcpu);
202         pvm_init_traps_aa64pfr1(vcpu);
203         pvm_init_traps_aa64dfr0(vcpu);
204         pvm_init_traps_aa64mmfr0(vcpu);
205         pvm_init_traps_aa64mmfr1(vcpu);
206 }
207
208 /*
209  * Start the VM table handle at the offset defined instead of at 0.
210  * Mainly for sanity checking and debugging.
211  */
212 #define HANDLE_OFFSET 0x1000
213
214 static unsigned int vm_handle_to_idx(pkvm_handle_t handle)
215 {
216         return handle - HANDLE_OFFSET;
217 }
218
219 static pkvm_handle_t idx_to_vm_handle(unsigned int idx)
220 {
221         return idx + HANDLE_OFFSET;
222 }
223
224 /*
225  * Spinlock for protecting state related to the VM table. Protects writes
226  * to 'vm_table' and 'nr_table_entries' as well as reads and writes to
227  * 'last_hyp_vcpu_lookup'.
228  */
229 static DEFINE_HYP_SPINLOCK(vm_table_lock);
230
231 /*
232  * The table of VM entries for protected VMs in hyp.
233  * Allocated at hyp initialization and setup.
234  */
235 static struct pkvm_hyp_vm **vm_table;
236
237 void pkvm_hyp_vm_table_init(void *tbl)
238 {
239         WARN_ON(vm_table);
240         vm_table = tbl;
241 }
242
243 /*
244  * Return the hyp vm structure corresponding to the handle.
245  */
246 static struct pkvm_hyp_vm *get_vm_by_handle(pkvm_handle_t handle)
247 {
248         unsigned int idx = vm_handle_to_idx(handle);
249
250         if (unlikely(idx >= KVM_MAX_PVMS))
251                 return NULL;
252
253         return vm_table[idx];
254 }
255
256 struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle,
257                                          unsigned int vcpu_idx)
258 {
259         struct pkvm_hyp_vcpu *hyp_vcpu = NULL;
260         struct pkvm_hyp_vm *hyp_vm;
261
262         hyp_spin_lock(&vm_table_lock);
263         hyp_vm = get_vm_by_handle(handle);
264         if (!hyp_vm || hyp_vm->nr_vcpus <= vcpu_idx)
265                 goto unlock;
266
267         hyp_vcpu = hyp_vm->vcpus[vcpu_idx];
268         hyp_page_ref_inc(hyp_virt_to_page(hyp_vm));
269 unlock:
270         hyp_spin_unlock(&vm_table_lock);
271         return hyp_vcpu;
272 }
273
274 void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
275 {
276         struct pkvm_hyp_vm *hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu);
277
278         hyp_spin_lock(&vm_table_lock);
279         hyp_page_ref_dec(hyp_virt_to_page(hyp_vm));
280         hyp_spin_unlock(&vm_table_lock);
281 }
282
283 static void unpin_host_vcpu(struct kvm_vcpu *host_vcpu)
284 {
285         if (host_vcpu)
286                 hyp_unpin_shared_mem(host_vcpu, host_vcpu + 1);
287 }
288
289 static void unpin_host_vcpus(struct pkvm_hyp_vcpu *hyp_vcpus[],
290                              unsigned int nr_vcpus)
291 {
292         int i;
293
294         for (i = 0; i < nr_vcpus; i++)
295                 unpin_host_vcpu(hyp_vcpus[i]->host_vcpu);
296 }
297
298 static void init_pkvm_hyp_vm(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm,
299                              unsigned int nr_vcpus)
300 {
301         hyp_vm->host_kvm = host_kvm;
302         hyp_vm->kvm.created_vcpus = nr_vcpus;
303         hyp_vm->kvm.arch.vtcr = host_mmu.arch.vtcr;
304 }
305
306 static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
307                               struct pkvm_hyp_vm *hyp_vm,
308                               struct kvm_vcpu *host_vcpu,
309                               unsigned int vcpu_idx)
310 {
311         int ret = 0;
312
313         if (hyp_pin_shared_mem(host_vcpu, host_vcpu + 1))
314                 return -EBUSY;
315
316         if (host_vcpu->vcpu_idx != vcpu_idx) {
317                 ret = -EINVAL;
318                 goto done;
319         }
320
321         hyp_vcpu->host_vcpu = host_vcpu;
322
323         hyp_vcpu->vcpu.kvm = &hyp_vm->kvm;
324         hyp_vcpu->vcpu.vcpu_id = READ_ONCE(host_vcpu->vcpu_id);
325         hyp_vcpu->vcpu.vcpu_idx = vcpu_idx;
326
327         hyp_vcpu->vcpu.arch.hw_mmu = &hyp_vm->kvm.arch.mmu;
328         hyp_vcpu->vcpu.arch.cflags = READ_ONCE(host_vcpu->arch.cflags);
329 done:
330         if (ret)
331                 unpin_host_vcpu(host_vcpu);
332         return ret;
333 }
334
335 static int find_free_vm_table_entry(struct kvm *host_kvm)
336 {
337         int i;
338
339         for (i = 0; i < KVM_MAX_PVMS; ++i) {
340                 if (!vm_table[i])
341                         return i;
342         }
343
344         return -ENOMEM;
345 }
346
347 /*
348  * Allocate a VM table entry and insert a pointer to the new vm.
349  *
350  * Return a unique handle to the protected VM on success,
351  * negative error code on failure.
352  */
353 static pkvm_handle_t insert_vm_table_entry(struct kvm *host_kvm,
354                                            struct pkvm_hyp_vm *hyp_vm)
355 {
356         struct kvm_s2_mmu *mmu = &hyp_vm->kvm.arch.mmu;
357         int idx;
358
359         hyp_assert_lock_held(&vm_table_lock);
360
361         /*
362          * Initializing protected state might have failed, yet a malicious
363          * host could trigger this function. Thus, ensure that 'vm_table'
364          * exists.
365          */
366         if (unlikely(!vm_table))
367                 return -EINVAL;
368
369         idx = find_free_vm_table_entry(host_kvm);
370         if (idx < 0)
371                 return idx;
372
373         hyp_vm->kvm.arch.pkvm.handle = idx_to_vm_handle(idx);
374
375         /* VMID 0 is reserved for the host */
376         atomic64_set(&mmu->vmid.id, idx + 1);
377
378         mmu->arch = &hyp_vm->kvm.arch;
379         mmu->pgt = &hyp_vm->pgt;
380
381         vm_table[idx] = hyp_vm;
382         return hyp_vm->kvm.arch.pkvm.handle;
383 }
384
385 /*
386  * Deallocate and remove the VM table entry corresponding to the handle.
387  */
388 static void remove_vm_table_entry(pkvm_handle_t handle)
389 {
390         hyp_assert_lock_held(&vm_table_lock);
391         vm_table[vm_handle_to_idx(handle)] = NULL;
392 }
393
394 static size_t pkvm_get_hyp_vm_size(unsigned int nr_vcpus)
395 {
396         return size_add(sizeof(struct pkvm_hyp_vm),
397                 size_mul(sizeof(struct pkvm_hyp_vcpu *), nr_vcpus));
398 }
399
400 static void *map_donated_memory_noclear(unsigned long host_va, size_t size)
401 {
402         void *va = (void *)kern_hyp_va(host_va);
403
404         if (!PAGE_ALIGNED(va))
405                 return NULL;
406
407         if (__pkvm_host_donate_hyp(hyp_virt_to_pfn(va),
408                                    PAGE_ALIGN(size) >> PAGE_SHIFT))
409                 return NULL;
410
411         return va;
412 }
413
414 static void *map_donated_memory(unsigned long host_va, size_t size)
415 {
416         void *va = map_donated_memory_noclear(host_va, size);
417
418         if (va)
419                 memset(va, 0, size);
420
421         return va;
422 }
423
424 static void __unmap_donated_memory(void *va, size_t size)
425 {
426         WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(va),
427                                        PAGE_ALIGN(size) >> PAGE_SHIFT));
428 }
429
430 static void unmap_donated_memory(void *va, size_t size)
431 {
432         if (!va)
433                 return;
434
435         memset(va, 0, size);
436         __unmap_donated_memory(va, size);
437 }
438
439 static void unmap_donated_memory_noclear(void *va, size_t size)
440 {
441         if (!va)
442                 return;
443
444         __unmap_donated_memory(va, size);
445 }
446
447 /*
448  * Initialize the hypervisor copy of the protected VM state using the
449  * memory donated by the host.
450  *
451  * Unmaps the donated memory from the host at stage 2.
452  *
453  * host_kvm: A pointer to the host's struct kvm.
454  * vm_hva: The host va of the area being donated for the VM state.
455  *         Must be page aligned.
456  * pgd_hva: The host va of the area being donated for the stage-2 PGD for
457  *          the VM. Must be page aligned. Its size is implied by the VM's
458  *          VTCR.
459  *
460  * Return a unique handle to the protected VM on success,
461  * negative error code on failure.
462  */
463 int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva,
464                    unsigned long pgd_hva)
465 {
466         struct pkvm_hyp_vm *hyp_vm = NULL;
467         size_t vm_size, pgd_size;
468         unsigned int nr_vcpus;
469         void *pgd = NULL;
470         int ret;
471
472         ret = hyp_pin_shared_mem(host_kvm, host_kvm + 1);
473         if (ret)
474                 return ret;
475
476         nr_vcpus = READ_ONCE(host_kvm->created_vcpus);
477         if (nr_vcpus < 1) {
478                 ret = -EINVAL;
479                 goto err_unpin_kvm;
480         }
481
482         vm_size = pkvm_get_hyp_vm_size(nr_vcpus);
483         pgd_size = kvm_pgtable_stage2_pgd_size(host_mmu.arch.vtcr);
484
485         ret = -ENOMEM;
486
487         hyp_vm = map_donated_memory(vm_hva, vm_size);
488         if (!hyp_vm)
489                 goto err_remove_mappings;
490
491         pgd = map_donated_memory_noclear(pgd_hva, pgd_size);
492         if (!pgd)
493                 goto err_remove_mappings;
494
495         init_pkvm_hyp_vm(host_kvm, hyp_vm, nr_vcpus);
496
497         hyp_spin_lock(&vm_table_lock);
498         ret = insert_vm_table_entry(host_kvm, hyp_vm);
499         if (ret < 0)
500                 goto err_unlock;
501
502         ret = kvm_guest_prepare_stage2(hyp_vm, pgd);
503         if (ret)
504                 goto err_remove_vm_table_entry;
505         hyp_spin_unlock(&vm_table_lock);
506
507         return hyp_vm->kvm.arch.pkvm.handle;
508
509 err_remove_vm_table_entry:
510         remove_vm_table_entry(hyp_vm->kvm.arch.pkvm.handle);
511 err_unlock:
512         hyp_spin_unlock(&vm_table_lock);
513 err_remove_mappings:
514         unmap_donated_memory(hyp_vm, vm_size);
515         unmap_donated_memory(pgd, pgd_size);
516 err_unpin_kvm:
517         hyp_unpin_shared_mem(host_kvm, host_kvm + 1);
518         return ret;
519 }
520
521 /*
522  * Initialize the hypervisor copy of the protected vCPU state using the
523  * memory donated by the host.
524  *
525  * handle: The handle for the protected vm.
526  * host_vcpu: A pointer to the corresponding host vcpu.
527  * vcpu_hva: The host va of the area being donated for the vcpu state.
528  *           Must be page aligned. The size of the area must be equal to
529  *           the page-aligned size of 'struct pkvm_hyp_vcpu'.
530  * Return 0 on success, negative error code on failure.
531  */
532 int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
533                      unsigned long vcpu_hva)
534 {
535         struct pkvm_hyp_vcpu *hyp_vcpu;
536         struct pkvm_hyp_vm *hyp_vm;
537         unsigned int idx;
538         int ret;
539
540         hyp_vcpu = map_donated_memory(vcpu_hva, sizeof(*hyp_vcpu));
541         if (!hyp_vcpu)
542                 return -ENOMEM;
543
544         hyp_spin_lock(&vm_table_lock);
545
546         hyp_vm = get_vm_by_handle(handle);
547         if (!hyp_vm) {
548                 ret = -ENOENT;
549                 goto unlock;
550         }
551
552         idx = hyp_vm->nr_vcpus;
553         if (idx >= hyp_vm->kvm.created_vcpus) {
554                 ret = -EINVAL;
555                 goto unlock;
556         }
557
558         ret = init_pkvm_hyp_vcpu(hyp_vcpu, hyp_vm, host_vcpu, idx);
559         if (ret)
560                 goto unlock;
561
562         hyp_vm->vcpus[idx] = hyp_vcpu;
563         hyp_vm->nr_vcpus++;
564 unlock:
565         hyp_spin_unlock(&vm_table_lock);
566
567         if (ret)
568                 unmap_donated_memory(hyp_vcpu, sizeof(*hyp_vcpu));
569
570         return ret;
571 }
572
573 static void
574 teardown_donated_memory(struct kvm_hyp_memcache *mc, void *addr, size_t size)
575 {
576         size = PAGE_ALIGN(size);
577         memset(addr, 0, size);
578
579         for (void *start = addr; start < addr + size; start += PAGE_SIZE)
580                 push_hyp_memcache(mc, start, hyp_virt_to_phys);
581
582         unmap_donated_memory_noclear(addr, size);
583 }
584
585 int __pkvm_teardown_vm(pkvm_handle_t handle)
586 {
587         struct kvm_hyp_memcache *mc;
588         struct pkvm_hyp_vm *hyp_vm;
589         struct kvm *host_kvm;
590         unsigned int idx;
591         size_t vm_size;
592         int err;
593
594         hyp_spin_lock(&vm_table_lock);
595         hyp_vm = get_vm_by_handle(handle);
596         if (!hyp_vm) {
597                 err = -ENOENT;
598                 goto err_unlock;
599         }
600
601         if (WARN_ON(hyp_page_count(hyp_vm))) {
602                 err = -EBUSY;
603                 goto err_unlock;
604         }
605
606         host_kvm = hyp_vm->host_kvm;
607
608         /* Ensure the VMID is clean before it can be reallocated */
609         __kvm_tlb_flush_vmid(&hyp_vm->kvm.arch.mmu);
610         remove_vm_table_entry(handle);
611         hyp_spin_unlock(&vm_table_lock);
612
613         /* Reclaim guest pages (including page-table pages) */
614         mc = &host_kvm->arch.pkvm.teardown_mc;
615         reclaim_guest_pages(hyp_vm, mc);
616         unpin_host_vcpus(hyp_vm->vcpus, hyp_vm->nr_vcpus);
617
618         /* Push the metadata pages to the teardown memcache */
619         for (idx = 0; idx < hyp_vm->nr_vcpus; ++idx) {
620                 struct pkvm_hyp_vcpu *hyp_vcpu = hyp_vm->vcpus[idx];
621
622                 teardown_donated_memory(mc, hyp_vcpu, sizeof(*hyp_vcpu));
623         }
624
625         vm_size = pkvm_get_hyp_vm_size(hyp_vm->kvm.created_vcpus);
626         teardown_donated_memory(mc, hyp_vm, vm_size);
627         hyp_unpin_shared_mem(host_kvm, host_kvm + 1);
628         return 0;
629
630 err_unlock:
631         hyp_spin_unlock(&vm_table_lock);
632         return err;
633 }