1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020 Google LLC
4 * Author: Quentin Perret <qperret@google.com>
7 #include <linux/kvm_host.h>
8 #include <asm/kvm_emulate.h>
9 #include <asm/kvm_hyp.h>
10 #include <asm/kvm_mmu.h>
11 #include <asm/kvm_pgtable.h>
12 #include <asm/kvm_pkvm.h>
13 #include <asm/stage2_pgtable.h>
15 #include <hyp/fault.h>
18 #include <nvhe/memory.h>
19 #include <nvhe/mem_protect.h>
22 #define KVM_HOST_S2_FLAGS (KVM_PGTABLE_S2_NOFWB | KVM_PGTABLE_S2_IDMAP)
24 struct host_mmu host_mmu;
26 static struct hyp_pool host_s2_pool;
28 static DEFINE_PER_CPU(struct pkvm_hyp_vm *, __current_vm);
29 #define current_vm (*this_cpu_ptr(&__current_vm))
31 static void guest_lock_component(struct pkvm_hyp_vm *vm)
33 hyp_spin_lock(&vm->lock);
37 static void guest_unlock_component(struct pkvm_hyp_vm *vm)
40 hyp_spin_unlock(&vm->lock);
43 static void host_lock_component(void)
45 hyp_spin_lock(&host_mmu.lock);
48 static void host_unlock_component(void)
50 hyp_spin_unlock(&host_mmu.lock);
53 static void hyp_lock_component(void)
55 hyp_spin_lock(&pkvm_pgd_lock);
58 static void hyp_unlock_component(void)
60 hyp_spin_unlock(&pkvm_pgd_lock);
63 static void *host_s2_zalloc_pages_exact(size_t size)
65 void *addr = hyp_alloc_pages(&host_s2_pool, get_order(size));
67 hyp_split_page(hyp_virt_to_page(addr));
70 * The size of concatenated PGDs is always a power of two of PAGE_SIZE,
71 * so there should be no need to free any of the tail pages to make the
74 WARN_ON(size != (PAGE_SIZE << get_order(size)));
79 static void *host_s2_zalloc_page(void *pool)
81 return hyp_alloc_pages(pool, 0);
84 static void host_s2_get_page(void *addr)
86 hyp_get_page(&host_s2_pool, addr);
89 static void host_s2_put_page(void *addr)
91 hyp_put_page(&host_s2_pool, addr);
94 static void host_s2_free_removed_table(void *addr, u32 level)
96 kvm_pgtable_stage2_free_removed(&host_mmu.mm_ops, addr, level);
99 static int prepare_s2_pool(void *pgt_pool_base)
101 unsigned long nr_pages, pfn;
104 pfn = hyp_virt_to_pfn(pgt_pool_base);
105 nr_pages = host_s2_pgtable_pages();
106 ret = hyp_pool_init(&host_s2_pool, pfn, nr_pages, 0);
110 host_mmu.mm_ops = (struct kvm_pgtable_mm_ops) {
111 .zalloc_pages_exact = host_s2_zalloc_pages_exact,
112 .zalloc_page = host_s2_zalloc_page,
113 .free_removed_table = host_s2_free_removed_table,
114 .phys_to_virt = hyp_phys_to_virt,
115 .virt_to_phys = hyp_virt_to_phys,
116 .page_count = hyp_page_count,
117 .get_page = host_s2_get_page,
118 .put_page = host_s2_put_page,
124 static void prepare_host_vtcr(void)
126 u32 parange, phys_shift;
128 /* The host stage 2 is id-mapped, so use parange for T0SZ */
129 parange = kvm_get_parange(id_aa64mmfr0_el1_sys_val);
130 phys_shift = id_aa64mmfr0_parange_to_phys_shift(parange);
132 host_mmu.arch.vtcr = kvm_get_vtcr(id_aa64mmfr0_el1_sys_val,
133 id_aa64mmfr1_el1_sys_val, phys_shift);
136 static bool host_stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot prot);
138 int kvm_host_prepare_stage2(void *pgt_pool_base)
140 struct kvm_s2_mmu *mmu = &host_mmu.arch.mmu;
144 hyp_spin_lock_init(&host_mmu.lock);
145 mmu->arch = &host_mmu.arch;
147 ret = prepare_s2_pool(pgt_pool_base);
151 ret = __kvm_pgtable_stage2_init(&host_mmu.pgt, mmu,
152 &host_mmu.mm_ops, KVM_HOST_S2_FLAGS,
153 host_stage2_force_pte_cb);
157 mmu->pgd_phys = __hyp_pa(host_mmu.pgt.pgd);
158 mmu->pgt = &host_mmu.pgt;
159 atomic64_set(&mmu->vmid.id, 0);
164 static bool guest_stage2_force_pte_cb(u64 addr, u64 end,
165 enum kvm_pgtable_prot prot)
170 static void *guest_s2_zalloc_pages_exact(size_t size)
172 void *addr = hyp_alloc_pages(¤t_vm->pool, get_order(size));
174 WARN_ON(size != (PAGE_SIZE << get_order(size)));
175 hyp_split_page(hyp_virt_to_page(addr));
180 static void guest_s2_free_pages_exact(void *addr, unsigned long size)
182 u8 order = get_order(size);
185 for (i = 0; i < (1 << order); i++)
186 hyp_put_page(¤t_vm->pool, addr + (i * PAGE_SIZE));
189 static void *guest_s2_zalloc_page(void *mc)
194 addr = hyp_alloc_pages(¤t_vm->pool, 0);
198 addr = pop_hyp_memcache(mc, hyp_phys_to_virt);
202 memset(addr, 0, PAGE_SIZE);
203 p = hyp_virt_to_page(addr);
204 memset(p, 0, sizeof(*p));
210 static void guest_s2_get_page(void *addr)
212 hyp_get_page(¤t_vm->pool, addr);
215 static void guest_s2_put_page(void *addr)
217 hyp_put_page(¤t_vm->pool, addr);
220 static void clean_dcache_guest_page(void *va, size_t size)
222 __clean_dcache_guest_page(hyp_fixmap_map(__hyp_pa(va)), size);
226 static void invalidate_icache_guest_page(void *va, size_t size)
228 __invalidate_icache_guest_page(hyp_fixmap_map(__hyp_pa(va)), size);
232 int kvm_guest_prepare_stage2(struct pkvm_hyp_vm *vm, void *pgd)
234 struct kvm_s2_mmu *mmu = &vm->kvm.arch.mmu;
235 unsigned long nr_pages;
238 nr_pages = kvm_pgtable_stage2_pgd_size(vm->kvm.arch.vtcr) >> PAGE_SHIFT;
239 ret = hyp_pool_init(&vm->pool, hyp_virt_to_pfn(pgd), nr_pages, 0);
243 hyp_spin_lock_init(&vm->lock);
244 vm->mm_ops = (struct kvm_pgtable_mm_ops) {
245 .zalloc_pages_exact = guest_s2_zalloc_pages_exact,
246 .free_pages_exact = guest_s2_free_pages_exact,
247 .zalloc_page = guest_s2_zalloc_page,
248 .phys_to_virt = hyp_phys_to_virt,
249 .virt_to_phys = hyp_virt_to_phys,
250 .page_count = hyp_page_count,
251 .get_page = guest_s2_get_page,
252 .put_page = guest_s2_put_page,
253 .dcache_clean_inval_poc = clean_dcache_guest_page,
254 .icache_inval_pou = invalidate_icache_guest_page,
257 guest_lock_component(vm);
258 ret = __kvm_pgtable_stage2_init(mmu->pgt, mmu, &vm->mm_ops, 0,
259 guest_stage2_force_pte_cb);
260 guest_unlock_component(vm);
264 vm->kvm.arch.mmu.pgd_phys = __hyp_pa(vm->pgt.pgd);
269 void reclaim_guest_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc)
273 /* Dump all pgtable pages in the hyp_pool */
274 guest_lock_component(vm);
275 kvm_pgtable_stage2_destroy(&vm->pgt);
276 vm->kvm.arch.mmu.pgd_phys = 0ULL;
277 guest_unlock_component(vm);
279 /* Drain the hyp_pool into the memcache */
280 addr = hyp_alloc_pages(&vm->pool, 0);
282 memset(hyp_virt_to_page(addr), 0, sizeof(struct hyp_page));
283 push_hyp_memcache(mc, addr, hyp_virt_to_phys);
284 WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(addr), 1));
285 addr = hyp_alloc_pages(&vm->pool, 0);
289 int __pkvm_prot_finalize(void)
291 struct kvm_s2_mmu *mmu = &host_mmu.arch.mmu;
292 struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
294 if (params->hcr_el2 & HCR_VM)
297 params->vttbr = kvm_get_vttbr(mmu);
298 params->vtcr = host_mmu.arch.vtcr;
299 params->hcr_el2 |= HCR_VM;
302 * The CMO below not only cleans the updated params to the
303 * PoC, but also provides the DSB that ensures ongoing
304 * page-table walks that have started before we trapped to EL2
307 kvm_flush_dcache_to_poc(params, sizeof(*params));
309 write_sysreg(params->hcr_el2, hcr_el2);
310 __load_stage2(&host_mmu.arch.mmu, &host_mmu.arch);
313 * Make sure to have an ISB before the TLB maintenance below but only
314 * when __load_stage2() doesn't include one already.
316 asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
318 /* Invalidate stale HCR bits that may be cached in TLBs */
326 static int host_stage2_unmap_dev_all(void)
328 struct kvm_pgtable *pgt = &host_mmu.pgt;
329 struct memblock_region *reg;
333 /* Unmap all non-memory regions to recycle the pages */
334 for (i = 0; i < hyp_memblock_nr; i++, addr = reg->base + reg->size) {
335 reg = &hyp_memory[i];
336 ret = kvm_pgtable_stage2_unmap(pgt, addr, reg->base - addr);
340 return kvm_pgtable_stage2_unmap(pgt, addr, BIT(pgt->ia_bits) - addr);
343 struct kvm_mem_range {
348 static struct memblock_region *find_mem_range(phys_addr_t addr, struct kvm_mem_range *range)
350 int cur, left = 0, right = hyp_memblock_nr;
351 struct memblock_region *reg;
355 range->end = ULONG_MAX;
357 /* The list of memblock regions is sorted, binary search it */
358 while (left < right) {
359 cur = (left + right) >> 1;
360 reg = &hyp_memory[cur];
361 end = reg->base + reg->size;
362 if (addr < reg->base) {
364 range->end = reg->base;
365 } else if (addr >= end) {
369 range->start = reg->base;
378 bool addr_is_memory(phys_addr_t phys)
380 struct kvm_mem_range range;
382 return !!find_mem_range(phys, &range);
385 static bool addr_is_allowed_memory(phys_addr_t phys)
387 struct memblock_region *reg;
388 struct kvm_mem_range range;
390 reg = find_mem_range(phys, &range);
392 return reg && !(reg->flags & MEMBLOCK_NOMAP);
395 static bool is_in_mem_range(u64 addr, struct kvm_mem_range *range)
397 return range->start <= addr && addr < range->end;
400 static bool range_is_memory(u64 start, u64 end)
402 struct kvm_mem_range r;
404 if (!find_mem_range(start, &r))
407 return is_in_mem_range(end - 1, &r);
410 static inline int __host_stage2_idmap(u64 start, u64 end,
411 enum kvm_pgtable_prot prot)
413 return kvm_pgtable_stage2_map(&host_mmu.pgt, start, end - start, start,
414 prot, &host_s2_pool, 0);
418 * The pool has been provided with enough pages to cover all of memory with
419 * page granularity, but it is difficult to know how much of the MMIO range
420 * we will need to cover upfront, so we may need to 'recycle' the pages if we
423 #define host_stage2_try(fn, ...) \
426 hyp_assert_lock_held(&host_mmu.lock); \
427 __ret = fn(__VA_ARGS__); \
428 if (__ret == -ENOMEM) { \
429 __ret = host_stage2_unmap_dev_all(); \
431 __ret = fn(__VA_ARGS__); \
436 static inline bool range_included(struct kvm_mem_range *child,
437 struct kvm_mem_range *parent)
439 return parent->start <= child->start && child->end <= parent->end;
442 static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range)
444 struct kvm_mem_range cur;
449 hyp_assert_lock_held(&host_mmu.lock);
450 ret = kvm_pgtable_get_leaf(&host_mmu.pgt, addr, &pte, &level);
454 if (kvm_pte_valid(pte))
461 u64 granule = kvm_granule_size(level);
462 cur.start = ALIGN_DOWN(addr, granule);
463 cur.end = cur.start + granule;
465 } while ((level < KVM_PGTABLE_MAX_LEVELS) &&
466 !(kvm_level_supports_block_mapping(level) &&
467 range_included(&cur, range)));
474 int host_stage2_idmap_locked(phys_addr_t addr, u64 size,
475 enum kvm_pgtable_prot prot)
477 return host_stage2_try(__host_stage2_idmap, addr, addr + size, prot);
480 int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id)
482 return host_stage2_try(kvm_pgtable_stage2_set_owner, &host_mmu.pgt,
483 addr, size, &host_s2_pool, owner_id);
486 static bool host_stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot prot)
489 * Block mappings must be used with care in the host stage-2 as a
490 * kvm_pgtable_stage2_map() operation targeting a page in the range of
491 * an existing block will delete the block under the assumption that
492 * mappings in the rest of the block range can always be rebuilt lazily.
493 * That assumption is correct for the host stage-2 with RWX mappings
494 * targeting memory or RW mappings targeting MMIO ranges (see
495 * host_stage2_idmap() below which implements some of the host memory
496 * abort logic). However, this is not safe for any other mappings where
497 * the host stage-2 page-table is in fact the only place where this
498 * state is stored. In all those cases, it is safer to use page-level
499 * mappings, hence avoiding to lose the state because of side-effects in
500 * kvm_pgtable_stage2_map().
502 if (range_is_memory(addr, end))
503 return prot != PKVM_HOST_MEM_PROT;
505 return prot != PKVM_HOST_MMIO_PROT;
508 static int host_stage2_idmap(u64 addr)
510 struct kvm_mem_range range;
511 bool is_memory = !!find_mem_range(addr, &range);
512 enum kvm_pgtable_prot prot;
515 prot = is_memory ? PKVM_HOST_MEM_PROT : PKVM_HOST_MMIO_PROT;
517 host_lock_component();
518 ret = host_stage2_adjust_range(addr, &range);
522 ret = host_stage2_idmap_locked(range.start, range.end - range.start, prot);
524 host_unlock_component();
529 void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt)
531 struct kvm_vcpu_fault_info fault;
535 esr = read_sysreg_el2(SYS_ESR);
536 BUG_ON(!__get_fault_info(esr, &fault));
538 addr = (fault.hpfar_el2 & HPFAR_MASK) << 8;
539 ret = host_stage2_idmap(addr);
540 BUG_ON(ret && ret != -EAGAIN);
543 struct pkvm_mem_transition {
547 enum pkvm_component_id id;
548 /* Address in the initiator's address space */
553 /* Address in the completer's address space */
563 enum pkvm_component_id id;
567 struct pkvm_mem_share {
568 const struct pkvm_mem_transition tx;
569 const enum kvm_pgtable_prot completer_prot;
572 struct pkvm_mem_donation {
573 const struct pkvm_mem_transition tx;
576 struct check_walk_data {
577 enum pkvm_page_state desired;
578 enum pkvm_page_state (*get_page_state)(kvm_pte_t pte);
581 static int __check_page_state_visitor(const struct kvm_pgtable_visit_ctx *ctx,
582 enum kvm_pgtable_walk_flags visit)
584 struct check_walk_data *d = ctx->arg;
586 if (kvm_pte_valid(ctx->old) && !addr_is_allowed_memory(kvm_pte_to_phys(ctx->old)))
589 return d->get_page_state(ctx->old) == d->desired ? 0 : -EPERM;
592 static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size,
593 struct check_walk_data *data)
595 struct kvm_pgtable_walker walker = {
596 .cb = __check_page_state_visitor,
598 .flags = KVM_PGTABLE_WALK_LEAF,
601 return kvm_pgtable_walk(pgt, addr, size, &walker);
604 static enum pkvm_page_state host_get_page_state(kvm_pte_t pte)
606 if (!kvm_pte_valid(pte) && pte)
609 return pkvm_getstate(kvm_pgtable_stage2_pte_prot(pte));
612 static int __host_check_page_state_range(u64 addr, u64 size,
613 enum pkvm_page_state state)
615 struct check_walk_data d = {
617 .get_page_state = host_get_page_state,
620 hyp_assert_lock_held(&host_mmu.lock);
621 return check_page_state_range(&host_mmu.pgt, addr, size, &d);
624 static int __host_set_page_state_range(u64 addr, u64 size,
625 enum pkvm_page_state state)
627 enum kvm_pgtable_prot prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, state);
629 return host_stage2_idmap_locked(addr, size, prot);
632 static int host_request_owned_transition(u64 *completer_addr,
633 const struct pkvm_mem_transition *tx)
635 u64 size = tx->nr_pages * PAGE_SIZE;
636 u64 addr = tx->initiator.addr;
638 *completer_addr = tx->initiator.host.completer_addr;
639 return __host_check_page_state_range(addr, size, PKVM_PAGE_OWNED);
642 static int host_request_unshare(u64 *completer_addr,
643 const struct pkvm_mem_transition *tx)
645 u64 size = tx->nr_pages * PAGE_SIZE;
646 u64 addr = tx->initiator.addr;
648 *completer_addr = tx->initiator.host.completer_addr;
649 return __host_check_page_state_range(addr, size, PKVM_PAGE_SHARED_OWNED);
652 static int host_initiate_share(u64 *completer_addr,
653 const struct pkvm_mem_transition *tx)
655 u64 size = tx->nr_pages * PAGE_SIZE;
656 u64 addr = tx->initiator.addr;
658 *completer_addr = tx->initiator.host.completer_addr;
659 return __host_set_page_state_range(addr, size, PKVM_PAGE_SHARED_OWNED);
662 static int host_initiate_unshare(u64 *completer_addr,
663 const struct pkvm_mem_transition *tx)
665 u64 size = tx->nr_pages * PAGE_SIZE;
666 u64 addr = tx->initiator.addr;
668 *completer_addr = tx->initiator.host.completer_addr;
669 return __host_set_page_state_range(addr, size, PKVM_PAGE_OWNED);
672 static int host_initiate_donation(u64 *completer_addr,
673 const struct pkvm_mem_transition *tx)
675 u8 owner_id = tx->completer.id;
676 u64 size = tx->nr_pages * PAGE_SIZE;
678 *completer_addr = tx->initiator.host.completer_addr;
679 return host_stage2_set_owner_locked(tx->initiator.addr, size, owner_id);
682 static bool __host_ack_skip_pgtable_check(const struct pkvm_mem_transition *tx)
684 return !(IS_ENABLED(CONFIG_NVHE_EL2_DEBUG) ||
685 tx->initiator.id != PKVM_ID_HYP);
688 static int __host_ack_transition(u64 addr, const struct pkvm_mem_transition *tx,
689 enum pkvm_page_state state)
691 u64 size = tx->nr_pages * PAGE_SIZE;
693 if (__host_ack_skip_pgtable_check(tx))
696 return __host_check_page_state_range(addr, size, state);
699 static int host_ack_donation(u64 addr, const struct pkvm_mem_transition *tx)
701 return __host_ack_transition(addr, tx, PKVM_NOPAGE);
704 static int host_complete_donation(u64 addr, const struct pkvm_mem_transition *tx)
706 u64 size = tx->nr_pages * PAGE_SIZE;
707 u8 host_id = tx->completer.id;
709 return host_stage2_set_owner_locked(addr, size, host_id);
712 static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte)
714 if (!kvm_pte_valid(pte))
717 return pkvm_getstate(kvm_pgtable_hyp_pte_prot(pte));
720 static int __hyp_check_page_state_range(u64 addr, u64 size,
721 enum pkvm_page_state state)
723 struct check_walk_data d = {
725 .get_page_state = hyp_get_page_state,
728 hyp_assert_lock_held(&pkvm_pgd_lock);
729 return check_page_state_range(&pkvm_pgtable, addr, size, &d);
732 static int hyp_request_donation(u64 *completer_addr,
733 const struct pkvm_mem_transition *tx)
735 u64 size = tx->nr_pages * PAGE_SIZE;
736 u64 addr = tx->initiator.addr;
738 *completer_addr = tx->initiator.hyp.completer_addr;
739 return __hyp_check_page_state_range(addr, size, PKVM_PAGE_OWNED);
742 static int hyp_initiate_donation(u64 *completer_addr,
743 const struct pkvm_mem_transition *tx)
745 u64 size = tx->nr_pages * PAGE_SIZE;
748 *completer_addr = tx->initiator.hyp.completer_addr;
749 ret = kvm_pgtable_hyp_unmap(&pkvm_pgtable, tx->initiator.addr, size);
750 return (ret != size) ? -EFAULT : 0;
753 static bool __hyp_ack_skip_pgtable_check(const struct pkvm_mem_transition *tx)
755 return !(IS_ENABLED(CONFIG_NVHE_EL2_DEBUG) ||
756 tx->initiator.id != PKVM_ID_HOST);
759 static int hyp_ack_share(u64 addr, const struct pkvm_mem_transition *tx,
760 enum kvm_pgtable_prot perms)
762 u64 size = tx->nr_pages * PAGE_SIZE;
764 if (perms != PAGE_HYP)
767 if (__hyp_ack_skip_pgtable_check(tx))
770 return __hyp_check_page_state_range(addr, size, PKVM_NOPAGE);
773 static int hyp_ack_unshare(u64 addr, const struct pkvm_mem_transition *tx)
775 u64 size = tx->nr_pages * PAGE_SIZE;
777 if (tx->initiator.id == PKVM_ID_HOST && hyp_page_count((void *)addr))
780 if (__hyp_ack_skip_pgtable_check(tx))
783 return __hyp_check_page_state_range(addr, size,
784 PKVM_PAGE_SHARED_BORROWED);
787 static int hyp_ack_donation(u64 addr, const struct pkvm_mem_transition *tx)
789 u64 size = tx->nr_pages * PAGE_SIZE;
791 if (__hyp_ack_skip_pgtable_check(tx))
794 return __hyp_check_page_state_range(addr, size, PKVM_NOPAGE);
797 static int hyp_complete_share(u64 addr, const struct pkvm_mem_transition *tx,
798 enum kvm_pgtable_prot perms)
800 void *start = (void *)addr, *end = start + (tx->nr_pages * PAGE_SIZE);
801 enum kvm_pgtable_prot prot;
803 prot = pkvm_mkstate(perms, PKVM_PAGE_SHARED_BORROWED);
804 return pkvm_create_mappings_locked(start, end, prot);
807 static int hyp_complete_unshare(u64 addr, const struct pkvm_mem_transition *tx)
809 u64 size = tx->nr_pages * PAGE_SIZE;
810 int ret = kvm_pgtable_hyp_unmap(&pkvm_pgtable, addr, size);
812 return (ret != size) ? -EFAULT : 0;
815 static int hyp_complete_donation(u64 addr,
816 const struct pkvm_mem_transition *tx)
818 void *start = (void *)addr, *end = start + (tx->nr_pages * PAGE_SIZE);
819 enum kvm_pgtable_prot prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_OWNED);
821 return pkvm_create_mappings_locked(start, end, prot);
824 static int check_share(struct pkvm_mem_share *share)
826 const struct pkvm_mem_transition *tx = &share->tx;
830 switch (tx->initiator.id) {
832 ret = host_request_owned_transition(&completer_addr, tx);
841 switch (tx->completer.id) {
843 ret = hyp_ack_share(completer_addr, tx, share->completer_prot);
852 static int __do_share(struct pkvm_mem_share *share)
854 const struct pkvm_mem_transition *tx = &share->tx;
858 switch (tx->initiator.id) {
860 ret = host_initiate_share(&completer_addr, tx);
869 switch (tx->completer.id) {
871 ret = hyp_complete_share(completer_addr, tx, share->completer_prot);
883 * The page owner grants access to another component with a given set
886 * Initiator: OWNED => SHARED_OWNED
887 * Completer: NOPAGE => SHARED_BORROWED
889 static int do_share(struct pkvm_mem_share *share)
893 ret = check_share(share);
897 return WARN_ON(__do_share(share));
900 static int check_unshare(struct pkvm_mem_share *share)
902 const struct pkvm_mem_transition *tx = &share->tx;
906 switch (tx->initiator.id) {
908 ret = host_request_unshare(&completer_addr, tx);
917 switch (tx->completer.id) {
919 ret = hyp_ack_unshare(completer_addr, tx);
928 static int __do_unshare(struct pkvm_mem_share *share)
930 const struct pkvm_mem_transition *tx = &share->tx;
934 switch (tx->initiator.id) {
936 ret = host_initiate_unshare(&completer_addr, tx);
945 switch (tx->completer.id) {
947 ret = hyp_complete_unshare(completer_addr, tx);
959 * The page owner revokes access from another component for a range of
960 * pages which were previously shared using do_share().
962 * Initiator: SHARED_OWNED => OWNED
963 * Completer: SHARED_BORROWED => NOPAGE
965 static int do_unshare(struct pkvm_mem_share *share)
969 ret = check_unshare(share);
973 return WARN_ON(__do_unshare(share));
976 static int check_donation(struct pkvm_mem_donation *donation)
978 const struct pkvm_mem_transition *tx = &donation->tx;
982 switch (tx->initiator.id) {
984 ret = host_request_owned_transition(&completer_addr, tx);
987 ret = hyp_request_donation(&completer_addr, tx);
996 switch (tx->completer.id) {
998 ret = host_ack_donation(completer_addr, tx);
1001 ret = hyp_ack_donation(completer_addr, tx);
1010 static int __do_donate(struct pkvm_mem_donation *donation)
1012 const struct pkvm_mem_transition *tx = &donation->tx;
1016 switch (tx->initiator.id) {
1018 ret = host_initiate_donation(&completer_addr, tx);
1021 ret = hyp_initiate_donation(&completer_addr, tx);
1030 switch (tx->completer.id) {
1032 ret = host_complete_donation(completer_addr, tx);
1035 ret = hyp_complete_donation(completer_addr, tx);
1047 * The page owner transfers ownership to another component, losing access
1050 * Initiator: OWNED => NOPAGE
1051 * Completer: NOPAGE => OWNED
1053 static int do_donate(struct pkvm_mem_donation *donation)
1057 ret = check_donation(donation);
1061 return WARN_ON(__do_donate(donation));
1064 int __pkvm_host_share_hyp(u64 pfn)
1067 u64 host_addr = hyp_pfn_to_phys(pfn);
1068 u64 hyp_addr = (u64)__hyp_va(host_addr);
1069 struct pkvm_mem_share share = {
1076 .completer_addr = hyp_addr,
1083 .completer_prot = PAGE_HYP,
1086 host_lock_component();
1087 hyp_lock_component();
1089 ret = do_share(&share);
1091 hyp_unlock_component();
1092 host_unlock_component();
1097 int __pkvm_host_unshare_hyp(u64 pfn)
1100 u64 host_addr = hyp_pfn_to_phys(pfn);
1101 u64 hyp_addr = (u64)__hyp_va(host_addr);
1102 struct pkvm_mem_share share = {
1109 .completer_addr = hyp_addr,
1116 .completer_prot = PAGE_HYP,
1119 host_lock_component();
1120 hyp_lock_component();
1122 ret = do_unshare(&share);
1124 hyp_unlock_component();
1125 host_unlock_component();
1130 int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages)
1133 u64 host_addr = hyp_pfn_to_phys(pfn);
1134 u64 hyp_addr = (u64)__hyp_va(host_addr);
1135 struct pkvm_mem_donation donation = {
1137 .nr_pages = nr_pages,
1142 .completer_addr = hyp_addr,
1151 host_lock_component();
1152 hyp_lock_component();
1154 ret = do_donate(&donation);
1156 hyp_unlock_component();
1157 host_unlock_component();
1162 int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages)
1165 u64 host_addr = hyp_pfn_to_phys(pfn);
1166 u64 hyp_addr = (u64)__hyp_va(host_addr);
1167 struct pkvm_mem_donation donation = {
1169 .nr_pages = nr_pages,
1174 .completer_addr = host_addr,
1183 host_lock_component();
1184 hyp_lock_component();
1186 ret = do_donate(&donation);
1188 hyp_unlock_component();
1189 host_unlock_component();
1194 int hyp_pin_shared_mem(void *from, void *to)
1196 u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE);
1197 u64 end = PAGE_ALIGN((u64)to);
1198 u64 size = end - start;
1201 host_lock_component();
1202 hyp_lock_component();
1204 ret = __host_check_page_state_range(__hyp_pa(start), size,
1205 PKVM_PAGE_SHARED_OWNED);
1209 ret = __hyp_check_page_state_range(start, size,
1210 PKVM_PAGE_SHARED_BORROWED);
1214 for (cur = start; cur < end; cur += PAGE_SIZE)
1215 hyp_page_ref_inc(hyp_virt_to_page(cur));
1218 hyp_unlock_component();
1219 host_unlock_component();
1224 void hyp_unpin_shared_mem(void *from, void *to)
1226 u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE);
1227 u64 end = PAGE_ALIGN((u64)to);
1229 host_lock_component();
1230 hyp_lock_component();
1232 for (cur = start; cur < end; cur += PAGE_SIZE)
1233 hyp_page_ref_dec(hyp_virt_to_page(cur));
1235 hyp_unlock_component();
1236 host_unlock_component();