1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2021 Google LLC
4 * Author: Fuad Tabba <tabba@google.com>
7 #include <linux/kvm_host.h>
9 #include <nvhe/fixed_config.h>
10 #include <nvhe/mem_protect.h>
11 #include <nvhe/memory.h>
12 #include <nvhe/pkvm.h>
13 #include <nvhe/trap_handler.h>
15 /* Used by icache_is_vpipt(). */
16 unsigned long __icache_flags;
18 /* Used by kvm_get_vttbr(). */
19 unsigned int kvm_arm_vmid_bits;
22 * Set trap register values based on features in ID_AA64PFR0.
24 static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
26 const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR0_EL1);
31 /* Protected KVM does not support AArch32 guests. */
32 BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0),
33 PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
34 BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1),
35 PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
38 * Linux guests assume support for floating-point and Advanced SIMD. Do
39 * not change the trapping behavior for these from the KVM default.
41 BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP),
42 PVM_ID_AA64PFR0_ALLOW));
43 BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD),
44 PVM_ID_AA64PFR0_ALLOW));
46 /* Trap RAS unless all current versions are supported */
47 if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_RAS), feature_ids) <
48 ID_AA64PFR0_EL1_RAS_V1P1) {
49 hcr_set |= HCR_TERR | HCR_TEA;
50 hcr_clear |= HCR_FIEN;
54 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU), feature_ids)) {
55 hcr_clear |= HCR_AMVOFFEN;
56 cptr_set |= CPTR_EL2_TAM;
60 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids))
61 cptr_set |= CPTR_EL2_TZ;
63 vcpu->arch.hcr_el2 |= hcr_set;
64 vcpu->arch.hcr_el2 &= ~hcr_clear;
65 vcpu->arch.cptr_el2 |= cptr_set;
69 * Set trap register values based on features in ID_AA64PFR1.
71 static void pvm_init_traps_aa64pfr1(struct kvm_vcpu *vcpu)
73 const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR1_EL1);
77 /* Memory Tagging: Trap and Treat as Untagged if not supported. */
78 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE), feature_ids)) {
80 hcr_clear |= HCR_DCT | HCR_ATA;
83 vcpu->arch.hcr_el2 |= hcr_set;
84 vcpu->arch.hcr_el2 &= ~hcr_clear;
88 * Set trap register values based on features in ID_AA64DFR0.
90 static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
92 const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64DFR0_EL1);
97 /* Trap/constrain PMU */
98 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), feature_ids)) {
99 mdcr_set |= MDCR_EL2_TPM | MDCR_EL2_TPMCR;
100 mdcr_clear |= MDCR_EL2_HPME | MDCR_EL2_MTPME |
105 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), feature_ids))
106 mdcr_set |= MDCR_EL2_TDRA | MDCR_EL2_TDA | MDCR_EL2_TDE;
108 /* Trap OS Double Lock */
109 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DoubleLock), feature_ids))
110 mdcr_set |= MDCR_EL2_TDOSA;
113 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer), feature_ids)) {
114 mdcr_set |= MDCR_EL2_TPMS;
115 mdcr_clear |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
118 /* Trap Trace Filter */
119 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceFilt), feature_ids))
120 mdcr_set |= MDCR_EL2_TTRF;
123 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceVer), feature_ids))
124 cptr_set |= CPTR_EL2_TTA;
126 vcpu->arch.mdcr_el2 |= mdcr_set;
127 vcpu->arch.mdcr_el2 &= ~mdcr_clear;
128 vcpu->arch.cptr_el2 |= cptr_set;
132 * Set trap register values based on features in ID_AA64MMFR0.
134 static void pvm_init_traps_aa64mmfr0(struct kvm_vcpu *vcpu)
136 const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64MMFR0_EL1);
139 /* Trap Debug Communications Channel registers */
140 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_FGT), feature_ids))
141 mdcr_set |= MDCR_EL2_TDCC;
143 vcpu->arch.mdcr_el2 |= mdcr_set;
147 * Set trap register values based on features in ID_AA64MMFR1.
149 static void pvm_init_traps_aa64mmfr1(struct kvm_vcpu *vcpu)
151 const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64MMFR1_EL1);
155 if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_LO), feature_ids))
158 vcpu->arch.hcr_el2 |= hcr_set;
162 * Set baseline trap register values.
164 static void pvm_init_trap_regs(struct kvm_vcpu *vcpu)
166 const u64 hcr_trap_feat_regs = HCR_TID3;
167 const u64 hcr_trap_impdef = HCR_TACR | HCR_TIDCP | HCR_TID1;
171 * - Feature id registers: to control features exposed to guests
172 * - Implementation-defined features
174 vcpu->arch.hcr_el2 |= hcr_trap_feat_regs | hcr_trap_impdef;
176 /* Clear res0 and set res1 bits to trap potential new features. */
177 vcpu->arch.hcr_el2 &= ~(HCR_RES0);
178 vcpu->arch.mdcr_el2 &= ~(MDCR_EL2_RES0);
179 vcpu->arch.cptr_el2 |= CPTR_NVHE_EL2_RES1;
180 vcpu->arch.cptr_el2 &= ~(CPTR_NVHE_EL2_RES0);
184 * Initialize trap register values for protected VMs.
186 void __pkvm_vcpu_init_traps(struct kvm_vcpu *vcpu)
188 pvm_init_trap_regs(vcpu);
189 pvm_init_traps_aa64pfr0(vcpu);
190 pvm_init_traps_aa64pfr1(vcpu);
191 pvm_init_traps_aa64dfr0(vcpu);
192 pvm_init_traps_aa64mmfr0(vcpu);
193 pvm_init_traps_aa64mmfr1(vcpu);
197 * Start the VM table handle at the offset defined instead of at 0.
198 * Mainly for sanity checking and debugging.
200 #define HANDLE_OFFSET 0x1000
202 static unsigned int vm_handle_to_idx(pkvm_handle_t handle)
204 return handle - HANDLE_OFFSET;
207 static pkvm_handle_t idx_to_vm_handle(unsigned int idx)
209 return idx + HANDLE_OFFSET;
213 * Spinlock for protecting state related to the VM table. Protects writes
214 * to 'vm_table' and 'nr_table_entries' as well as reads and writes to
215 * 'last_hyp_vcpu_lookup'.
217 static DEFINE_HYP_SPINLOCK(vm_table_lock);
220 * The table of VM entries for protected VMs in hyp.
221 * Allocated at hyp initialization and setup.
223 static struct pkvm_hyp_vm **vm_table;
225 void pkvm_hyp_vm_table_init(void *tbl)
232 * Return the hyp vm structure corresponding to the handle.
234 static struct pkvm_hyp_vm *get_vm_by_handle(pkvm_handle_t handle)
236 unsigned int idx = vm_handle_to_idx(handle);
238 if (unlikely(idx >= KVM_MAX_PVMS))
241 return vm_table[idx];
244 struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle,
245 unsigned int vcpu_idx)
247 struct pkvm_hyp_vcpu *hyp_vcpu = NULL;
248 struct pkvm_hyp_vm *hyp_vm;
250 hyp_spin_lock(&vm_table_lock);
251 hyp_vm = get_vm_by_handle(handle);
252 if (!hyp_vm || hyp_vm->nr_vcpus <= vcpu_idx)
255 hyp_vcpu = hyp_vm->vcpus[vcpu_idx];
256 hyp_page_ref_inc(hyp_virt_to_page(hyp_vm));
258 hyp_spin_unlock(&vm_table_lock);
262 void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
264 struct pkvm_hyp_vm *hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu);
266 hyp_spin_lock(&vm_table_lock);
267 hyp_page_ref_dec(hyp_virt_to_page(hyp_vm));
268 hyp_spin_unlock(&vm_table_lock);
271 static void unpin_host_vcpu(struct kvm_vcpu *host_vcpu)
274 hyp_unpin_shared_mem(host_vcpu, host_vcpu + 1);
277 static void unpin_host_vcpus(struct pkvm_hyp_vcpu *hyp_vcpus[],
278 unsigned int nr_vcpus)
282 for (i = 0; i < nr_vcpus; i++)
283 unpin_host_vcpu(hyp_vcpus[i]->host_vcpu);
286 static void init_pkvm_hyp_vm(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm,
287 unsigned int nr_vcpus)
289 hyp_vm->host_kvm = host_kvm;
290 hyp_vm->kvm.created_vcpus = nr_vcpus;
291 hyp_vm->kvm.arch.vtcr = host_mmu.arch.vtcr;
294 static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
295 struct pkvm_hyp_vm *hyp_vm,
296 struct kvm_vcpu *host_vcpu,
297 unsigned int vcpu_idx)
301 if (hyp_pin_shared_mem(host_vcpu, host_vcpu + 1))
304 if (host_vcpu->vcpu_idx != vcpu_idx) {
309 hyp_vcpu->host_vcpu = host_vcpu;
311 hyp_vcpu->vcpu.kvm = &hyp_vm->kvm;
312 hyp_vcpu->vcpu.vcpu_id = READ_ONCE(host_vcpu->vcpu_id);
313 hyp_vcpu->vcpu.vcpu_idx = vcpu_idx;
315 hyp_vcpu->vcpu.arch.hw_mmu = &hyp_vm->kvm.arch.mmu;
316 hyp_vcpu->vcpu.arch.cflags = READ_ONCE(host_vcpu->arch.cflags);
319 unpin_host_vcpu(host_vcpu);
323 static int find_free_vm_table_entry(struct kvm *host_kvm)
327 for (i = 0; i < KVM_MAX_PVMS; ++i) {
336 * Allocate a VM table entry and insert a pointer to the new vm.
338 * Return a unique handle to the protected VM on success,
339 * negative error code on failure.
341 static pkvm_handle_t insert_vm_table_entry(struct kvm *host_kvm,
342 struct pkvm_hyp_vm *hyp_vm)
344 struct kvm_s2_mmu *mmu = &hyp_vm->kvm.arch.mmu;
347 hyp_assert_lock_held(&vm_table_lock);
350 * Initializing protected state might have failed, yet a malicious
351 * host could trigger this function. Thus, ensure that 'vm_table'
354 if (unlikely(!vm_table))
357 idx = find_free_vm_table_entry(host_kvm);
361 hyp_vm->kvm.arch.pkvm.handle = idx_to_vm_handle(idx);
363 /* VMID 0 is reserved for the host */
364 atomic64_set(&mmu->vmid.id, idx + 1);
366 mmu->arch = &hyp_vm->kvm.arch;
367 mmu->pgt = &hyp_vm->pgt;
369 vm_table[idx] = hyp_vm;
370 return hyp_vm->kvm.arch.pkvm.handle;
374 * Deallocate and remove the VM table entry corresponding to the handle.
376 static void remove_vm_table_entry(pkvm_handle_t handle)
378 hyp_assert_lock_held(&vm_table_lock);
379 vm_table[vm_handle_to_idx(handle)] = NULL;
382 static size_t pkvm_get_hyp_vm_size(unsigned int nr_vcpus)
384 return size_add(sizeof(struct pkvm_hyp_vm),
385 size_mul(sizeof(struct pkvm_hyp_vcpu *), nr_vcpus));
388 static void *map_donated_memory_noclear(unsigned long host_va, size_t size)
390 void *va = (void *)kern_hyp_va(host_va);
392 if (!PAGE_ALIGNED(va))
395 if (__pkvm_host_donate_hyp(hyp_virt_to_pfn(va),
396 PAGE_ALIGN(size) >> PAGE_SHIFT))
402 static void *map_donated_memory(unsigned long host_va, size_t size)
404 void *va = map_donated_memory_noclear(host_va, size);
412 static void __unmap_donated_memory(void *va, size_t size)
414 WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(va),
415 PAGE_ALIGN(size) >> PAGE_SHIFT));
418 static void unmap_donated_memory(void *va, size_t size)
424 __unmap_donated_memory(va, size);
427 static void unmap_donated_memory_noclear(void *va, size_t size)
432 __unmap_donated_memory(va, size);
436 * Initialize the hypervisor copy of the protected VM state using the
437 * memory donated by the host.
439 * Unmaps the donated memory from the host at stage 2.
441 * host_kvm: A pointer to the host's struct kvm.
442 * vm_hva: The host va of the area being donated for the VM state.
443 * Must be page aligned.
444 * pgd_hva: The host va of the area being donated for the stage-2 PGD for
445 * the VM. Must be page aligned. Its size is implied by the VM's
448 * Return a unique handle to the protected VM on success,
449 * negative error code on failure.
451 int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva,
452 unsigned long pgd_hva)
454 struct pkvm_hyp_vm *hyp_vm = NULL;
455 size_t vm_size, pgd_size;
456 unsigned int nr_vcpus;
460 ret = hyp_pin_shared_mem(host_kvm, host_kvm + 1);
464 nr_vcpus = READ_ONCE(host_kvm->created_vcpus);
470 vm_size = pkvm_get_hyp_vm_size(nr_vcpus);
471 pgd_size = kvm_pgtable_stage2_pgd_size(host_mmu.arch.vtcr);
475 hyp_vm = map_donated_memory(vm_hva, vm_size);
477 goto err_remove_mappings;
479 pgd = map_donated_memory_noclear(pgd_hva, pgd_size);
481 goto err_remove_mappings;
483 init_pkvm_hyp_vm(host_kvm, hyp_vm, nr_vcpus);
485 hyp_spin_lock(&vm_table_lock);
486 ret = insert_vm_table_entry(host_kvm, hyp_vm);
490 ret = kvm_guest_prepare_stage2(hyp_vm, pgd);
492 goto err_remove_vm_table_entry;
493 hyp_spin_unlock(&vm_table_lock);
495 return hyp_vm->kvm.arch.pkvm.handle;
497 err_remove_vm_table_entry:
498 remove_vm_table_entry(hyp_vm->kvm.arch.pkvm.handle);
500 hyp_spin_unlock(&vm_table_lock);
502 unmap_donated_memory(hyp_vm, vm_size);
503 unmap_donated_memory(pgd, pgd_size);
505 hyp_unpin_shared_mem(host_kvm, host_kvm + 1);
510 * Initialize the hypervisor copy of the protected vCPU state using the
511 * memory donated by the host.
513 * handle: The handle for the protected vm.
514 * host_vcpu: A pointer to the corresponding host vcpu.
515 * vcpu_hva: The host va of the area being donated for the vcpu state.
516 * Must be page aligned. The size of the area must be equal to
517 * the page-aligned size of 'struct pkvm_hyp_vcpu'.
518 * Return 0 on success, negative error code on failure.
520 int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
521 unsigned long vcpu_hva)
523 struct pkvm_hyp_vcpu *hyp_vcpu;
524 struct pkvm_hyp_vm *hyp_vm;
528 hyp_vcpu = map_donated_memory(vcpu_hva, sizeof(*hyp_vcpu));
532 hyp_spin_lock(&vm_table_lock);
534 hyp_vm = get_vm_by_handle(handle);
540 idx = hyp_vm->nr_vcpus;
541 if (idx >= hyp_vm->kvm.created_vcpus) {
546 ret = init_pkvm_hyp_vcpu(hyp_vcpu, hyp_vm, host_vcpu, idx);
550 hyp_vm->vcpus[idx] = hyp_vcpu;
553 hyp_spin_unlock(&vm_table_lock);
556 unmap_donated_memory(hyp_vcpu, sizeof(*hyp_vcpu));
562 teardown_donated_memory(struct kvm_hyp_memcache *mc, void *addr, size_t size)
564 size = PAGE_ALIGN(size);
565 memset(addr, 0, size);
567 for (void *start = addr; start < addr + size; start += PAGE_SIZE)
568 push_hyp_memcache(mc, start, hyp_virt_to_phys);
570 unmap_donated_memory_noclear(addr, size);
573 int __pkvm_teardown_vm(pkvm_handle_t handle)
575 struct kvm_hyp_memcache *mc;
576 struct pkvm_hyp_vm *hyp_vm;
577 struct kvm *host_kvm;
582 hyp_spin_lock(&vm_table_lock);
583 hyp_vm = get_vm_by_handle(handle);
589 if (WARN_ON(hyp_page_count(hyp_vm))) {
594 host_kvm = hyp_vm->host_kvm;
596 /* Ensure the VMID is clean before it can be reallocated */
597 __kvm_tlb_flush_vmid(&hyp_vm->kvm.arch.mmu);
598 remove_vm_table_entry(handle);
599 hyp_spin_unlock(&vm_table_lock);
601 /* Reclaim guest pages (including page-table pages) */
602 mc = &host_kvm->arch.pkvm.teardown_mc;
603 reclaim_guest_pages(hyp_vm, mc);
604 unpin_host_vcpus(hyp_vm->vcpus, hyp_vm->nr_vcpus);
606 /* Push the metadata pages to the teardown memcache */
607 for (idx = 0; idx < hyp_vm->nr_vcpus; ++idx) {
608 struct pkvm_hyp_vcpu *hyp_vcpu = hyp_vm->vcpus[idx];
610 teardown_donated_memory(mc, hyp_vcpu, sizeof(*hyp_vcpu));
613 vm_size = pkvm_get_hyp_vm_size(hyp_vm->kvm.created_vcpus);
614 teardown_donated_memory(mc, hyp_vm, vm_size);
615 hyp_unpin_shared_mem(host_kvm, host_kvm + 1);
619 hyp_spin_unlock(&vm_table_lock);