1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020 Google LLC
4 * Author: Quentin Perret <qperret@google.com>
7 #include <linux/kvm_host.h>
8 #include <asm/kvm_emulate.h>
9 #include <asm/kvm_hyp.h>
10 #include <asm/kvm_mmu.h>
11 #include <asm/kvm_pgtable.h>
12 #include <asm/kvm_pkvm.h>
13 #include <asm/stage2_pgtable.h>
15 #include <hyp/fault.h>
18 #include <nvhe/memory.h>
19 #include <nvhe/mem_protect.h>
22 #define KVM_HOST_S2_FLAGS (KVM_PGTABLE_S2_NOFWB | KVM_PGTABLE_S2_IDMAP)
24 extern unsigned long hyp_nr_cpus;
25 struct host_kvm host_kvm;
27 static struct hyp_pool host_s2_pool;
29 const u8 pkvm_hyp_id = 1;
31 static void host_lock_component(void)
33 hyp_spin_lock(&host_kvm.lock);
36 static void host_unlock_component(void)
38 hyp_spin_unlock(&host_kvm.lock);
41 static void hyp_lock_component(void)
43 hyp_spin_lock(&pkvm_pgd_lock);
46 static void hyp_unlock_component(void)
48 hyp_spin_unlock(&pkvm_pgd_lock);
51 static void *host_s2_zalloc_pages_exact(size_t size)
53 void *addr = hyp_alloc_pages(&host_s2_pool, get_order(size));
55 hyp_split_page(hyp_virt_to_page(addr));
58 * The size of concatenated PGDs is always a power of two of PAGE_SIZE,
59 * so there should be no need to free any of the tail pages to make the
62 WARN_ON(size != (PAGE_SIZE << get_order(size)));
67 static void *host_s2_zalloc_page(void *pool)
69 return hyp_alloc_pages(pool, 0);
72 static void host_s2_get_page(void *addr)
74 hyp_get_page(&host_s2_pool, addr);
77 static void host_s2_put_page(void *addr)
79 hyp_put_page(&host_s2_pool, addr);
82 static int prepare_s2_pool(void *pgt_pool_base)
84 unsigned long nr_pages, pfn;
87 pfn = hyp_virt_to_pfn(pgt_pool_base);
88 nr_pages = host_s2_pgtable_pages();
89 ret = hyp_pool_init(&host_s2_pool, pfn, nr_pages, 0);
93 host_kvm.mm_ops = (struct kvm_pgtable_mm_ops) {
94 .zalloc_pages_exact = host_s2_zalloc_pages_exact,
95 .zalloc_page = host_s2_zalloc_page,
96 .phys_to_virt = hyp_phys_to_virt,
97 .virt_to_phys = hyp_virt_to_phys,
98 .page_count = hyp_page_count,
99 .get_page = host_s2_get_page,
100 .put_page = host_s2_put_page,
106 static void prepare_host_vtcr(void)
108 u32 parange, phys_shift;
110 /* The host stage 2 is id-mapped, so use parange for T0SZ */
111 parange = kvm_get_parange(id_aa64mmfr0_el1_sys_val);
112 phys_shift = id_aa64mmfr0_parange_to_phys_shift(parange);
114 host_kvm.arch.vtcr = kvm_get_vtcr(id_aa64mmfr0_el1_sys_val,
115 id_aa64mmfr1_el1_sys_val, phys_shift);
118 static bool host_stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot prot);
120 int kvm_host_prepare_stage2(void *pgt_pool_base)
122 struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu;
126 hyp_spin_lock_init(&host_kvm.lock);
127 mmu->arch = &host_kvm.arch;
129 ret = prepare_s2_pool(pgt_pool_base);
133 ret = __kvm_pgtable_stage2_init(&host_kvm.pgt, mmu,
134 &host_kvm.mm_ops, KVM_HOST_S2_FLAGS,
135 host_stage2_force_pte_cb);
139 mmu->pgd_phys = __hyp_pa(host_kvm.pgt.pgd);
140 mmu->pgt = &host_kvm.pgt;
141 atomic64_set(&mmu->vmid.id, 0);
146 int __pkvm_prot_finalize(void)
148 struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu;
149 struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
151 if (params->hcr_el2 & HCR_VM)
154 params->vttbr = kvm_get_vttbr(mmu);
155 params->vtcr = host_kvm.arch.vtcr;
156 params->hcr_el2 |= HCR_VM;
157 kvm_flush_dcache_to_poc(params, sizeof(*params));
159 write_sysreg(params->hcr_el2, hcr_el2);
160 __load_stage2(&host_kvm.arch.mmu, &host_kvm.arch);
163 * Make sure to have an ISB before the TLB maintenance below but only
164 * when __load_stage2() doesn't include one already.
166 asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
168 /* Invalidate stale HCR bits that may be cached in TLBs */
176 static int host_stage2_unmap_dev_all(void)
178 struct kvm_pgtable *pgt = &host_kvm.pgt;
179 struct memblock_region *reg;
183 /* Unmap all non-memory regions to recycle the pages */
184 for (i = 0; i < hyp_memblock_nr; i++, addr = reg->base + reg->size) {
185 reg = &hyp_memory[i];
186 ret = kvm_pgtable_stage2_unmap(pgt, addr, reg->base - addr);
190 return kvm_pgtable_stage2_unmap(pgt, addr, BIT(pgt->ia_bits) - addr);
193 struct kvm_mem_range {
198 static bool find_mem_range(phys_addr_t addr, struct kvm_mem_range *range)
200 int cur, left = 0, right = hyp_memblock_nr;
201 struct memblock_region *reg;
205 range->end = ULONG_MAX;
207 /* The list of memblock regions is sorted, binary search it */
208 while (left < right) {
209 cur = (left + right) >> 1;
210 reg = &hyp_memory[cur];
211 end = reg->base + reg->size;
212 if (addr < reg->base) {
214 range->end = reg->base;
215 } else if (addr >= end) {
219 range->start = reg->base;
228 bool addr_is_memory(phys_addr_t phys)
230 struct kvm_mem_range range;
232 return find_mem_range(phys, &range);
235 static bool is_in_mem_range(u64 addr, struct kvm_mem_range *range)
237 return range->start <= addr && addr < range->end;
240 static bool range_is_memory(u64 start, u64 end)
242 struct kvm_mem_range r;
244 if (!find_mem_range(start, &r))
247 return is_in_mem_range(end - 1, &r);
250 static inline int __host_stage2_idmap(u64 start, u64 end,
251 enum kvm_pgtable_prot prot)
253 return kvm_pgtable_stage2_map(&host_kvm.pgt, start, end - start, start,
254 prot, &host_s2_pool);
258 * The pool has been provided with enough pages to cover all of memory with
259 * page granularity, but it is difficult to know how much of the MMIO range
260 * we will need to cover upfront, so we may need to 'recycle' the pages if we
263 #define host_stage2_try(fn, ...) \
266 hyp_assert_lock_held(&host_kvm.lock); \
267 __ret = fn(__VA_ARGS__); \
268 if (__ret == -ENOMEM) { \
269 __ret = host_stage2_unmap_dev_all(); \
271 __ret = fn(__VA_ARGS__); \
276 static inline bool range_included(struct kvm_mem_range *child,
277 struct kvm_mem_range *parent)
279 return parent->start <= child->start && child->end <= parent->end;
282 static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range)
284 struct kvm_mem_range cur;
289 hyp_assert_lock_held(&host_kvm.lock);
290 ret = kvm_pgtable_get_leaf(&host_kvm.pgt, addr, &pte, &level);
294 if (kvm_pte_valid(pte))
301 u64 granule = kvm_granule_size(level);
302 cur.start = ALIGN_DOWN(addr, granule);
303 cur.end = cur.start + granule;
305 } while ((level < KVM_PGTABLE_MAX_LEVELS) &&
306 !(kvm_level_supports_block_mapping(level) &&
307 range_included(&cur, range)));
314 int host_stage2_idmap_locked(phys_addr_t addr, u64 size,
315 enum kvm_pgtable_prot prot)
317 hyp_assert_lock_held(&host_kvm.lock);
319 return host_stage2_try(__host_stage2_idmap, addr, addr + size, prot);
322 int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id)
324 hyp_assert_lock_held(&host_kvm.lock);
326 return host_stage2_try(kvm_pgtable_stage2_set_owner, &host_kvm.pgt,
327 addr, size, &host_s2_pool, owner_id);
330 static bool host_stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot prot)
333 * Block mappings must be used with care in the host stage-2 as a
334 * kvm_pgtable_stage2_map() operation targeting a page in the range of
335 * an existing block will delete the block under the assumption that
336 * mappings in the rest of the block range can always be rebuilt lazily.
337 * That assumption is correct for the host stage-2 with RWX mappings
338 * targeting memory or RW mappings targeting MMIO ranges (see
339 * host_stage2_idmap() below which implements some of the host memory
340 * abort logic). However, this is not safe for any other mappings where
341 * the host stage-2 page-table is in fact the only place where this
342 * state is stored. In all those cases, it is safer to use page-level
343 * mappings, hence avoiding to lose the state because of side-effects in
344 * kvm_pgtable_stage2_map().
346 if (range_is_memory(addr, end))
347 return prot != PKVM_HOST_MEM_PROT;
349 return prot != PKVM_HOST_MMIO_PROT;
352 static int host_stage2_idmap(u64 addr)
354 struct kvm_mem_range range;
355 bool is_memory = find_mem_range(addr, &range);
356 enum kvm_pgtable_prot prot;
359 prot = is_memory ? PKVM_HOST_MEM_PROT : PKVM_HOST_MMIO_PROT;
361 host_lock_component();
362 ret = host_stage2_adjust_range(addr, &range);
366 ret = host_stage2_idmap_locked(range.start, range.end - range.start, prot);
368 host_unlock_component();
373 void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt)
375 struct kvm_vcpu_fault_info fault;
379 esr = read_sysreg_el2(SYS_ESR);
380 BUG_ON(!__get_fault_info(esr, &fault));
382 addr = (fault.hpfar_el2 & HPFAR_MASK) << 8;
383 ret = host_stage2_idmap(addr);
384 BUG_ON(ret && ret != -EAGAIN);
387 /* This corresponds to locking order */
388 enum pkvm_component_id {
393 struct pkvm_mem_transition {
397 enum pkvm_component_id id;
398 /* Address in the initiator's address space */
403 /* Address in the completer's address space */
410 enum pkvm_component_id id;
414 struct pkvm_mem_share {
415 const struct pkvm_mem_transition tx;
416 const enum kvm_pgtable_prot completer_prot;
419 struct check_walk_data {
420 enum pkvm_page_state desired;
421 enum pkvm_page_state (*get_page_state)(kvm_pte_t pte);
424 static int __check_page_state_visitor(u64 addr, u64 end, u32 level,
426 enum kvm_pgtable_walk_flags flag,
429 struct check_walk_data *d = arg;
430 kvm_pte_t pte = *ptep;
432 if (kvm_pte_valid(pte) && !addr_is_memory(kvm_pte_to_phys(pte)))
435 return d->get_page_state(pte) == d->desired ? 0 : -EPERM;
438 static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size,
439 struct check_walk_data *data)
441 struct kvm_pgtable_walker walker = {
442 .cb = __check_page_state_visitor,
444 .flags = KVM_PGTABLE_WALK_LEAF,
447 return kvm_pgtable_walk(pgt, addr, size, &walker);
450 static enum pkvm_page_state host_get_page_state(kvm_pte_t pte)
452 if (!kvm_pte_valid(pte) && pte)
455 return pkvm_getstate(kvm_pgtable_stage2_pte_prot(pte));
458 static int __host_check_page_state_range(u64 addr, u64 size,
459 enum pkvm_page_state state)
461 struct check_walk_data d = {
463 .get_page_state = host_get_page_state,
466 hyp_assert_lock_held(&host_kvm.lock);
467 return check_page_state_range(&host_kvm.pgt, addr, size, &d);
470 static int __host_set_page_state_range(u64 addr, u64 size,
471 enum pkvm_page_state state)
473 enum kvm_pgtable_prot prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, state);
475 return host_stage2_idmap_locked(addr, size, prot);
478 static int host_request_owned_transition(u64 *completer_addr,
479 const struct pkvm_mem_transition *tx)
481 u64 size = tx->nr_pages * PAGE_SIZE;
482 u64 addr = tx->initiator.addr;
484 *completer_addr = tx->initiator.host.completer_addr;
485 return __host_check_page_state_range(addr, size, PKVM_PAGE_OWNED);
488 static int host_request_unshare(u64 *completer_addr,
489 const struct pkvm_mem_transition *tx)
491 u64 size = tx->nr_pages * PAGE_SIZE;
492 u64 addr = tx->initiator.addr;
494 *completer_addr = tx->initiator.host.completer_addr;
495 return __host_check_page_state_range(addr, size, PKVM_PAGE_SHARED_OWNED);
498 static int host_initiate_share(u64 *completer_addr,
499 const struct pkvm_mem_transition *tx)
501 u64 size = tx->nr_pages * PAGE_SIZE;
502 u64 addr = tx->initiator.addr;
504 *completer_addr = tx->initiator.host.completer_addr;
505 return __host_set_page_state_range(addr, size, PKVM_PAGE_SHARED_OWNED);
508 static int host_initiate_unshare(u64 *completer_addr,
509 const struct pkvm_mem_transition *tx)
511 u64 size = tx->nr_pages * PAGE_SIZE;
512 u64 addr = tx->initiator.addr;
514 *completer_addr = tx->initiator.host.completer_addr;
515 return __host_set_page_state_range(addr, size, PKVM_PAGE_OWNED);
518 static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte)
520 if (!kvm_pte_valid(pte))
523 return pkvm_getstate(kvm_pgtable_stage2_pte_prot(pte));
526 static int __hyp_check_page_state_range(u64 addr, u64 size,
527 enum pkvm_page_state state)
529 struct check_walk_data d = {
531 .get_page_state = hyp_get_page_state,
534 hyp_assert_lock_held(&pkvm_pgd_lock);
535 return check_page_state_range(&pkvm_pgtable, addr, size, &d);
538 static bool __hyp_ack_skip_pgtable_check(const struct pkvm_mem_transition *tx)
540 return !(IS_ENABLED(CONFIG_NVHE_EL2_DEBUG) ||
541 tx->initiator.id != PKVM_ID_HOST);
544 static int hyp_ack_share(u64 addr, const struct pkvm_mem_transition *tx,
545 enum kvm_pgtable_prot perms)
547 u64 size = tx->nr_pages * PAGE_SIZE;
549 if (perms != PAGE_HYP)
552 if (__hyp_ack_skip_pgtable_check(tx))
555 return __hyp_check_page_state_range(addr, size, PKVM_NOPAGE);
558 static int hyp_ack_unshare(u64 addr, const struct pkvm_mem_transition *tx)
560 u64 size = tx->nr_pages * PAGE_SIZE;
562 if (__hyp_ack_skip_pgtable_check(tx))
565 return __hyp_check_page_state_range(addr, size,
566 PKVM_PAGE_SHARED_BORROWED);
569 static int hyp_complete_share(u64 addr, const struct pkvm_mem_transition *tx,
570 enum kvm_pgtable_prot perms)
572 void *start = (void *)addr, *end = start + (tx->nr_pages * PAGE_SIZE);
573 enum kvm_pgtable_prot prot;
575 prot = pkvm_mkstate(perms, PKVM_PAGE_SHARED_BORROWED);
576 return pkvm_create_mappings_locked(start, end, prot);
579 static int hyp_complete_unshare(u64 addr, const struct pkvm_mem_transition *tx)
581 u64 size = tx->nr_pages * PAGE_SIZE;
582 int ret = kvm_pgtable_hyp_unmap(&pkvm_pgtable, addr, size);
584 return (ret != size) ? -EFAULT : 0;
587 static int check_share(struct pkvm_mem_share *share)
589 const struct pkvm_mem_transition *tx = &share->tx;
593 switch (tx->initiator.id) {
595 ret = host_request_owned_transition(&completer_addr, tx);
604 switch (tx->completer.id) {
606 ret = hyp_ack_share(completer_addr, tx, share->completer_prot);
615 static int __do_share(struct pkvm_mem_share *share)
617 const struct pkvm_mem_transition *tx = &share->tx;
621 switch (tx->initiator.id) {
623 ret = host_initiate_share(&completer_addr, tx);
632 switch (tx->completer.id) {
634 ret = hyp_complete_share(completer_addr, tx, share->completer_prot);
646 * The page owner grants access to another component with a given set
649 * Initiator: OWNED => SHARED_OWNED
650 * Completer: NOPAGE => SHARED_BORROWED
652 static int do_share(struct pkvm_mem_share *share)
656 ret = check_share(share);
660 return WARN_ON(__do_share(share));
663 static int check_unshare(struct pkvm_mem_share *share)
665 const struct pkvm_mem_transition *tx = &share->tx;
669 switch (tx->initiator.id) {
671 ret = host_request_unshare(&completer_addr, tx);
680 switch (tx->completer.id) {
682 ret = hyp_ack_unshare(completer_addr, tx);
691 static int __do_unshare(struct pkvm_mem_share *share)
693 const struct pkvm_mem_transition *tx = &share->tx;
697 switch (tx->initiator.id) {
699 ret = host_initiate_unshare(&completer_addr, tx);
708 switch (tx->completer.id) {
710 ret = hyp_complete_unshare(completer_addr, tx);
722 * The page owner revokes access from another component for a range of
723 * pages which were previously shared using do_share().
725 * Initiator: SHARED_OWNED => OWNED
726 * Completer: SHARED_BORROWED => NOPAGE
728 static int do_unshare(struct pkvm_mem_share *share)
732 ret = check_unshare(share);
736 return WARN_ON(__do_unshare(share));
739 int __pkvm_host_share_hyp(u64 pfn)
742 u64 host_addr = hyp_pfn_to_phys(pfn);
743 u64 hyp_addr = (u64)__hyp_va(host_addr);
744 struct pkvm_mem_share share = {
751 .completer_addr = hyp_addr,
758 .completer_prot = PAGE_HYP,
761 host_lock_component();
762 hyp_lock_component();
764 ret = do_share(&share);
766 hyp_unlock_component();
767 host_unlock_component();
772 int __pkvm_host_unshare_hyp(u64 pfn)
775 u64 host_addr = hyp_pfn_to_phys(pfn);
776 u64 hyp_addr = (u64)__hyp_va(host_addr);
777 struct pkvm_mem_share share = {
784 .completer_addr = hyp_addr,
791 .completer_prot = PAGE_HYP,
794 host_lock_component();
795 hyp_lock_component();
797 ret = do_unshare(&share);
799 hyp_unlock_component();
800 host_unlock_component();