1 // SPDX-License-Identifier: GPL-2.0-only
3 * Stand-alone page-table allocator for hyp stage-1 and guest stage-2.
4 * No bombay mix was harmed in the writing of this file.
6 * Copyright (C) 2020 Google LLC
7 * Author: Will Deacon <will@kernel.org>
10 #include <linux/bitfield.h>
11 #include <asm/kvm_pgtable.h>
12 #include <asm/stage2_pgtable.h>
15 #define KVM_PTE_TYPE BIT(1)
16 #define KVM_PTE_TYPE_BLOCK 0
17 #define KVM_PTE_TYPE_PAGE 1
18 #define KVM_PTE_TYPE_TABLE 1
20 #define KVM_PTE_LEAF_ATTR_LO GENMASK(11, 2)
22 #define KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX GENMASK(4, 2)
23 #define KVM_PTE_LEAF_ATTR_LO_S1_AP GENMASK(7, 6)
24 #define KVM_PTE_LEAF_ATTR_LO_S1_AP_RO 3
25 #define KVM_PTE_LEAF_ATTR_LO_S1_AP_RW 1
26 #define KVM_PTE_LEAF_ATTR_LO_S1_SH GENMASK(9, 8)
27 #define KVM_PTE_LEAF_ATTR_LO_S1_SH_IS 3
28 #define KVM_PTE_LEAF_ATTR_LO_S1_AF BIT(10)
30 #define KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR GENMASK(5, 2)
31 #define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R BIT(6)
32 #define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W BIT(7)
33 #define KVM_PTE_LEAF_ATTR_LO_S2_SH GENMASK(9, 8)
34 #define KVM_PTE_LEAF_ATTR_LO_S2_SH_IS 3
35 #define KVM_PTE_LEAF_ATTR_LO_S2_AF BIT(10)
37 #define KVM_PTE_LEAF_ATTR_HI GENMASK(63, 51)
39 #define KVM_PTE_LEAF_ATTR_HI_SW GENMASK(58, 55)
41 #define KVM_PTE_LEAF_ATTR_HI_S1_XN BIT(54)
43 #define KVM_PTE_LEAF_ATTR_HI_S2_XN BIT(54)
45 #define KVM_PTE_LEAF_ATTR_S2_PERMS (KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R | \
46 KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | \
47 KVM_PTE_LEAF_ATTR_HI_S2_XN)
49 #define KVM_INVALID_PTE_OWNER_MASK GENMASK(9, 2)
50 #define KVM_MAX_OWNER_ID 1
53 * Used to indicate a pte for which a 'break-before-make' sequence is in
56 #define KVM_INVALID_PTE_LOCKED BIT(10)
58 struct kvm_pgtable_walk_data {
59 struct kvm_pgtable_walker *walker;
66 static bool kvm_phys_is_valid(u64 phys)
68 return phys < BIT(id_aa64mmfr0_parange_to_phys_shift(ID_AA64MMFR0_EL1_PARANGE_MAX));
71 static bool kvm_block_mapping_supported(const struct kvm_pgtable_visit_ctx *ctx, u64 phys)
73 u64 granule = kvm_granule_size(ctx->level);
75 if (!kvm_level_supports_block_mapping(ctx->level))
78 if (granule > (ctx->end - ctx->addr))
81 if (kvm_phys_is_valid(phys) && !IS_ALIGNED(phys, granule))
84 return IS_ALIGNED(ctx->addr, granule);
87 static u32 kvm_pgtable_idx(struct kvm_pgtable_walk_data *data, u32 level)
89 u64 shift = kvm_granule_shift(level);
90 u64 mask = BIT(PAGE_SHIFT - 3) - 1;
92 return (data->addr >> shift) & mask;
95 static u32 kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr)
97 u64 shift = kvm_granule_shift(pgt->start_level - 1); /* May underflow */
98 u64 mask = BIT(pgt->ia_bits) - 1;
100 return (addr & mask) >> shift;
103 static u32 kvm_pgd_pages(u32 ia_bits, u32 start_level)
105 struct kvm_pgtable pgt = {
107 .start_level = start_level,
110 return kvm_pgd_page_idx(&pgt, -1ULL) + 1;
113 static bool kvm_pte_table(kvm_pte_t pte, u32 level)
115 if (level == KVM_PGTABLE_MAX_LEVELS - 1)
118 if (!kvm_pte_valid(pte))
121 return FIELD_GET(KVM_PTE_TYPE, pte) == KVM_PTE_TYPE_TABLE;
124 static kvm_pte_t *kvm_pte_follow(kvm_pte_t pte, struct kvm_pgtable_mm_ops *mm_ops)
126 return mm_ops->phys_to_virt(kvm_pte_to_phys(pte));
129 static void kvm_clear_pte(kvm_pte_t *ptep)
131 WRITE_ONCE(*ptep, 0);
134 static kvm_pte_t kvm_init_table_pte(kvm_pte_t *childp, struct kvm_pgtable_mm_ops *mm_ops)
136 kvm_pte_t pte = kvm_phys_to_pte(mm_ops->virt_to_phys(childp));
138 pte |= FIELD_PREP(KVM_PTE_TYPE, KVM_PTE_TYPE_TABLE);
139 pte |= KVM_PTE_VALID;
143 static kvm_pte_t kvm_init_valid_leaf_pte(u64 pa, kvm_pte_t attr, u32 level)
145 kvm_pte_t pte = kvm_phys_to_pte(pa);
146 u64 type = (level == KVM_PGTABLE_MAX_LEVELS - 1) ? KVM_PTE_TYPE_PAGE :
149 pte |= attr & (KVM_PTE_LEAF_ATTR_LO | KVM_PTE_LEAF_ATTR_HI);
150 pte |= FIELD_PREP(KVM_PTE_TYPE, type);
151 pte |= KVM_PTE_VALID;
156 static kvm_pte_t kvm_init_invalid_leaf_owner(u8 owner_id)
158 return FIELD_PREP(KVM_INVALID_PTE_OWNER_MASK, owner_id);
161 static int kvm_pgtable_visitor_cb(struct kvm_pgtable_walk_data *data,
162 const struct kvm_pgtable_visit_ctx *ctx,
163 enum kvm_pgtable_walk_flags visit)
165 struct kvm_pgtable_walker *walker = data->walker;
167 /* Ensure the appropriate lock is held (e.g. RCU lock for stage-2 MMU) */
168 WARN_ON_ONCE(kvm_pgtable_walk_shared(ctx) && !kvm_pgtable_walk_lock_held());
169 return walker->cb(ctx, visit);
172 static bool kvm_pgtable_walk_continue(const struct kvm_pgtable_walker *walker,
176 * Visitor callbacks return EAGAIN when the conditions that led to a
177 * fault are no longer reflected in the page tables due to a race to
178 * update a PTE. In the context of a fault handler this is interpreted
179 * as a signal to retry guest execution.
181 * Ignore the return code altogether for walkers outside a fault handler
182 * (e.g. write protecting a range of memory) and chug along with the
186 return !(walker->flags & KVM_PGTABLE_WALK_HANDLE_FAULT);
191 static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data,
192 struct kvm_pgtable_mm_ops *mm_ops, kvm_pteref_t pgtable, u32 level);
194 static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data,
195 struct kvm_pgtable_mm_ops *mm_ops,
196 kvm_pteref_t pteref, u32 level)
198 enum kvm_pgtable_walk_flags flags = data->walker->flags;
199 kvm_pte_t *ptep = kvm_dereference_pteref(data->walker, pteref);
200 struct kvm_pgtable_visit_ctx ctx = {
202 .old = READ_ONCE(*ptep),
203 .arg = data->walker->arg,
205 .start = data->start,
213 bool table = kvm_pte_table(ctx.old, level);
215 if (table && (ctx.flags & KVM_PGTABLE_WALK_TABLE_PRE))
216 ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_TABLE_PRE);
218 if (!table && (ctx.flags & KVM_PGTABLE_WALK_LEAF)) {
219 ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_LEAF);
220 ctx.old = READ_ONCE(*ptep);
221 table = kvm_pte_table(ctx.old, level);
224 if (!kvm_pgtable_walk_continue(data->walker, ret))
228 data->addr = ALIGN_DOWN(data->addr, kvm_granule_size(level));
229 data->addr += kvm_granule_size(level);
233 childp = (kvm_pteref_t)kvm_pte_follow(ctx.old, mm_ops);
234 ret = __kvm_pgtable_walk(data, mm_ops, childp, level + 1);
235 if (!kvm_pgtable_walk_continue(data->walker, ret))
238 if (ctx.flags & KVM_PGTABLE_WALK_TABLE_POST)
239 ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_TABLE_POST);
242 if (kvm_pgtable_walk_continue(data->walker, ret))
248 static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data,
249 struct kvm_pgtable_mm_ops *mm_ops, kvm_pteref_t pgtable, u32 level)
254 if (WARN_ON_ONCE(level >= KVM_PGTABLE_MAX_LEVELS))
257 for (idx = kvm_pgtable_idx(data, level); idx < PTRS_PER_PTE; ++idx) {
258 kvm_pteref_t pteref = &pgtable[idx];
260 if (data->addr >= data->end)
263 ret = __kvm_pgtable_visit(data, mm_ops, pteref, level);
271 static int _kvm_pgtable_walk(struct kvm_pgtable *pgt, struct kvm_pgtable_walk_data *data)
275 u64 limit = BIT(pgt->ia_bits);
277 if (data->addr > limit || data->end > limit)
283 for (idx = kvm_pgd_page_idx(pgt, data->addr); data->addr < data->end; ++idx) {
284 kvm_pteref_t pteref = &pgt->pgd[idx * PTRS_PER_PTE];
286 ret = __kvm_pgtable_walk(data, pgt->mm_ops, pteref, pgt->start_level);
294 int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
295 struct kvm_pgtable_walker *walker)
297 struct kvm_pgtable_walk_data walk_data = {
298 .start = ALIGN_DOWN(addr, PAGE_SIZE),
299 .addr = ALIGN_DOWN(addr, PAGE_SIZE),
300 .end = PAGE_ALIGN(walk_data.addr + size),
305 r = kvm_pgtable_walk_begin(walker);
309 r = _kvm_pgtable_walk(pgt, &walk_data);
310 kvm_pgtable_walk_end(walker);
315 struct leaf_walk_data {
320 static int leaf_walker(const struct kvm_pgtable_visit_ctx *ctx,
321 enum kvm_pgtable_walk_flags visit)
323 struct leaf_walk_data *data = ctx->arg;
325 data->pte = ctx->old;
326 data->level = ctx->level;
331 int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr,
332 kvm_pte_t *ptep, u32 *level)
334 struct leaf_walk_data data;
335 struct kvm_pgtable_walker walker = {
337 .flags = KVM_PGTABLE_WALK_LEAF,
342 ret = kvm_pgtable_walk(pgt, ALIGN_DOWN(addr, PAGE_SIZE),
354 struct hyp_map_data {
359 static int hyp_set_prot_attr(enum kvm_pgtable_prot prot, kvm_pte_t *ptep)
361 bool device = prot & KVM_PGTABLE_PROT_DEVICE;
362 u32 mtype = device ? MT_DEVICE_nGnRE : MT_NORMAL;
363 kvm_pte_t attr = FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX, mtype);
364 u32 sh = KVM_PTE_LEAF_ATTR_LO_S1_SH_IS;
365 u32 ap = (prot & KVM_PGTABLE_PROT_W) ? KVM_PTE_LEAF_ATTR_LO_S1_AP_RW :
366 KVM_PTE_LEAF_ATTR_LO_S1_AP_RO;
368 if (!(prot & KVM_PGTABLE_PROT_R))
371 if (prot & KVM_PGTABLE_PROT_X) {
372 if (prot & KVM_PGTABLE_PROT_W)
378 attr |= KVM_PTE_LEAF_ATTR_HI_S1_XN;
381 attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_AP, ap);
382 attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_SH, sh);
383 attr |= KVM_PTE_LEAF_ATTR_LO_S1_AF;
384 attr |= prot & KVM_PTE_LEAF_ATTR_HI_SW;
390 enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte)
392 enum kvm_pgtable_prot prot = pte & KVM_PTE_LEAF_ATTR_HI_SW;
395 if (!kvm_pte_valid(pte))
398 if (!(pte & KVM_PTE_LEAF_ATTR_HI_S1_XN))
399 prot |= KVM_PGTABLE_PROT_X;
401 ap = FIELD_GET(KVM_PTE_LEAF_ATTR_LO_S1_AP, pte);
402 if (ap == KVM_PTE_LEAF_ATTR_LO_S1_AP_RO)
403 prot |= KVM_PGTABLE_PROT_R;
404 else if (ap == KVM_PTE_LEAF_ATTR_LO_S1_AP_RW)
405 prot |= KVM_PGTABLE_PROT_RW;
410 static bool hyp_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
411 struct hyp_map_data *data)
413 u64 phys = data->phys + (ctx->addr - ctx->start);
416 if (!kvm_block_mapping_supported(ctx, phys))
419 new = kvm_init_valid_leaf_pte(phys, data->attr, ctx->level);
422 if (!kvm_pte_valid(ctx->old))
423 ctx->mm_ops->get_page(ctx->ptep);
424 else if (WARN_ON((ctx->old ^ new) & ~KVM_PTE_LEAF_ATTR_HI_SW))
427 smp_store_release(ctx->ptep, new);
431 static int hyp_map_walker(const struct kvm_pgtable_visit_ctx *ctx,
432 enum kvm_pgtable_walk_flags visit)
434 kvm_pte_t *childp, new;
435 struct hyp_map_data *data = ctx->arg;
436 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
438 if (hyp_map_walker_try_leaf(ctx, data))
441 if (WARN_ON(ctx->level == KVM_PGTABLE_MAX_LEVELS - 1))
444 childp = (kvm_pte_t *)mm_ops->zalloc_page(NULL);
448 new = kvm_init_table_pte(childp, mm_ops);
449 mm_ops->get_page(ctx->ptep);
450 smp_store_release(ctx->ptep, new);
455 int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
456 enum kvm_pgtable_prot prot)
459 struct hyp_map_data map_data = {
460 .phys = ALIGN_DOWN(phys, PAGE_SIZE),
462 struct kvm_pgtable_walker walker = {
463 .cb = hyp_map_walker,
464 .flags = KVM_PGTABLE_WALK_LEAF,
468 ret = hyp_set_prot_attr(prot, &map_data.attr);
472 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
478 static int hyp_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx,
479 enum kvm_pgtable_walk_flags visit)
481 kvm_pte_t *childp = NULL;
482 u64 granule = kvm_granule_size(ctx->level);
483 u64 *unmapped = ctx->arg;
484 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
486 if (!kvm_pte_valid(ctx->old))
489 if (kvm_pte_table(ctx->old, ctx->level)) {
490 childp = kvm_pte_follow(ctx->old, mm_ops);
492 if (mm_ops->page_count(childp) != 1)
495 kvm_clear_pte(ctx->ptep);
497 __tlbi_level(vae2is, __TLBI_VADDR(ctx->addr, 0), ctx->level);
499 if (ctx->end - ctx->addr < granule)
502 kvm_clear_pte(ctx->ptep);
504 __tlbi_level(vale2is, __TLBI_VADDR(ctx->addr, 0), ctx->level);
505 *unmapped += granule;
510 mm_ops->put_page(ctx->ptep);
513 mm_ops->put_page(childp);
518 u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
521 struct kvm_pgtable_walker walker = {
522 .cb = hyp_unmap_walker,
524 .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
527 if (!pgt->mm_ops->page_count)
530 kvm_pgtable_walk(pgt, addr, size, &walker);
534 int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits,
535 struct kvm_pgtable_mm_ops *mm_ops)
537 u64 levels = ARM64_HW_PGTABLE_LEVELS(va_bits);
539 pgt->pgd = (kvm_pteref_t)mm_ops->zalloc_page(NULL);
543 pgt->ia_bits = va_bits;
544 pgt->start_level = KVM_PGTABLE_MAX_LEVELS - levels;
545 pgt->mm_ops = mm_ops;
547 pgt->force_pte_cb = NULL;
552 static int hyp_free_walker(const struct kvm_pgtable_visit_ctx *ctx,
553 enum kvm_pgtable_walk_flags visit)
555 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
557 if (!kvm_pte_valid(ctx->old))
560 mm_ops->put_page(ctx->ptep);
562 if (kvm_pte_table(ctx->old, ctx->level))
563 mm_ops->put_page(kvm_pte_follow(ctx->old, mm_ops));
568 void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt)
570 struct kvm_pgtable_walker walker = {
571 .cb = hyp_free_walker,
572 .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
575 WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
576 pgt->mm_ops->put_page(kvm_dereference_pteref(&walker, pgt->pgd));
580 struct stage2_map_data {
588 struct kvm_s2_mmu *mmu;
591 /* Force mappings to page granularity */
595 u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift)
597 u64 vtcr = VTCR_EL2_FLAGS;
600 vtcr |= kvm_get_parange(mmfr0) << VTCR_EL2_PS_SHIFT;
601 vtcr |= VTCR_EL2_T0SZ(phys_shift);
603 * Use a minimum 2 level page table to prevent splitting
604 * host PMD huge pages at stage2.
606 lvls = stage2_pgtable_levels(phys_shift);
609 vtcr |= VTCR_EL2_LVLS_TO_SL0(lvls);
611 #ifdef CONFIG_ARM64_HW_AFDBM
613 * Enable the Hardware Access Flag management, unconditionally
614 * on all CPUs. The features is RES0 on CPUs without the support
615 * and must be ignored by the CPUs.
618 #endif /* CONFIG_ARM64_HW_AFDBM */
620 /* Set the vmid bits */
621 vtcr |= (get_vmid_bits(mmfr1) == 16) ?
628 static bool stage2_has_fwb(struct kvm_pgtable *pgt)
630 if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
633 return !(pgt->flags & KVM_PGTABLE_S2_NOFWB);
636 #define KVM_S2_MEMATTR(pgt, attr) PAGE_S2_MEMATTR(attr, stage2_has_fwb(pgt))
638 static int stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot prot,
641 bool device = prot & KVM_PGTABLE_PROT_DEVICE;
642 kvm_pte_t attr = device ? KVM_S2_MEMATTR(pgt, DEVICE_nGnRE) :
643 KVM_S2_MEMATTR(pgt, NORMAL);
644 u32 sh = KVM_PTE_LEAF_ATTR_LO_S2_SH_IS;
646 if (!(prot & KVM_PGTABLE_PROT_X))
647 attr |= KVM_PTE_LEAF_ATTR_HI_S2_XN;
651 if (prot & KVM_PGTABLE_PROT_R)
652 attr |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R;
654 if (prot & KVM_PGTABLE_PROT_W)
655 attr |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W;
657 attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S2_SH, sh);
658 attr |= KVM_PTE_LEAF_ATTR_LO_S2_AF;
659 attr |= prot & KVM_PTE_LEAF_ATTR_HI_SW;
665 enum kvm_pgtable_prot kvm_pgtable_stage2_pte_prot(kvm_pte_t pte)
667 enum kvm_pgtable_prot prot = pte & KVM_PTE_LEAF_ATTR_HI_SW;
669 if (!kvm_pte_valid(pte))
672 if (pte & KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R)
673 prot |= KVM_PGTABLE_PROT_R;
674 if (pte & KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W)
675 prot |= KVM_PGTABLE_PROT_W;
676 if (!(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN))
677 prot |= KVM_PGTABLE_PROT_X;
682 static bool stage2_pte_needs_update(kvm_pte_t old, kvm_pte_t new)
684 if (!kvm_pte_valid(old) || !kvm_pte_valid(new))
687 return ((old ^ new) & (~KVM_PTE_LEAF_ATTR_S2_PERMS));
690 static bool stage2_pte_is_counted(kvm_pte_t pte)
693 * The refcount tracks valid entries as well as invalid entries if they
694 * encode ownership of a page to another entity than the page-table
695 * owner, whose id is 0.
700 static bool stage2_pte_is_locked(kvm_pte_t pte)
702 return !kvm_pte_valid(pte) && (pte & KVM_INVALID_PTE_LOCKED);
705 static bool stage2_try_set_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_t new)
707 if (!kvm_pgtable_walk_shared(ctx)) {
708 WRITE_ONCE(*ctx->ptep, new);
712 return cmpxchg(ctx->ptep, ctx->old, new) == ctx->old;
716 * stage2_try_break_pte() - Invalidates a pte according to the
717 * 'break-before-make' requirements of the
720 * @ctx: context of the visited pte.
723 * Returns: true if the pte was successfully broken.
725 * If the removed pte was valid, performs the necessary serialization and TLB
726 * invalidation for the old value. For counted ptes, drops the reference count
727 * on the containing table page.
729 static bool stage2_try_break_pte(const struct kvm_pgtable_visit_ctx *ctx,
730 struct kvm_s2_mmu *mmu)
732 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
734 if (stage2_pte_is_locked(ctx->old)) {
736 * Should never occur if this walker has exclusive access to the
739 WARN_ON(!kvm_pgtable_walk_shared(ctx));
743 if (!stage2_try_set_pte(ctx, KVM_INVALID_PTE_LOCKED))
747 * Perform the appropriate TLB invalidation based on the evicted pte
750 if (kvm_pte_table(ctx->old, ctx->level))
751 kvm_call_hyp(__kvm_tlb_flush_vmid, mmu);
752 else if (kvm_pte_valid(ctx->old))
753 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr, ctx->level);
755 if (stage2_pte_is_counted(ctx->old))
756 mm_ops->put_page(ctx->ptep);
761 static void stage2_make_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_t new)
763 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
765 WARN_ON(!stage2_pte_is_locked(*ctx->ptep));
767 if (stage2_pte_is_counted(new))
768 mm_ops->get_page(ctx->ptep);
770 smp_store_release(ctx->ptep, new);
773 static void stage2_put_pte(const struct kvm_pgtable_visit_ctx *ctx, struct kvm_s2_mmu *mmu,
774 struct kvm_pgtable_mm_ops *mm_ops)
777 * Clear the existing PTE, and perform break-before-make with
778 * TLB maintenance if it was valid.
780 if (kvm_pte_valid(ctx->old)) {
781 kvm_clear_pte(ctx->ptep);
782 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr, ctx->level);
785 mm_ops->put_page(ctx->ptep);
788 static bool stage2_pte_cacheable(struct kvm_pgtable *pgt, kvm_pte_t pte)
790 u64 memattr = pte & KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR;
791 return memattr == KVM_S2_MEMATTR(pgt, NORMAL);
794 static bool stage2_pte_executable(kvm_pte_t pte)
796 return !(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN);
799 static u64 stage2_map_walker_phys_addr(const struct kvm_pgtable_visit_ctx *ctx,
800 const struct stage2_map_data *data)
802 u64 phys = data->phys;
805 * Stage-2 walks to update ownership data are communicated to the map
806 * walker using an invalid PA. Avoid offsetting an already invalid PA,
807 * which could overflow and make the address valid again.
809 if (!kvm_phys_is_valid(phys))
813 * Otherwise, work out the correct PA based on how far the walk has
816 return phys + (ctx->addr - ctx->start);
819 static bool stage2_leaf_mapping_allowed(const struct kvm_pgtable_visit_ctx *ctx,
820 struct stage2_map_data *data)
822 u64 phys = stage2_map_walker_phys_addr(ctx, data);
824 if (data->force_pte && (ctx->level < (KVM_PGTABLE_MAX_LEVELS - 1)))
827 return kvm_block_mapping_supported(ctx, phys);
830 static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
831 struct stage2_map_data *data)
834 u64 phys = stage2_map_walker_phys_addr(ctx, data);
835 u64 granule = kvm_granule_size(ctx->level);
836 struct kvm_pgtable *pgt = data->mmu->pgt;
837 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
839 if (!stage2_leaf_mapping_allowed(ctx, data))
842 if (kvm_phys_is_valid(phys))
843 new = kvm_init_valid_leaf_pte(phys, data->attr, ctx->level);
845 new = kvm_init_invalid_leaf_owner(data->owner_id);
848 * Skip updating the PTE if we are trying to recreate the exact
849 * same mapping or only change the access permissions. Instead,
850 * the vCPU will exit one more time from guest if still needed
851 * and then go through the path of relaxing permissions.
853 if (!stage2_pte_needs_update(ctx->old, new))
856 if (!stage2_try_break_pte(ctx, data->mmu))
859 /* Perform CMOs before installation of the guest stage-2 PTE */
860 if (mm_ops->dcache_clean_inval_poc && stage2_pte_cacheable(pgt, new))
861 mm_ops->dcache_clean_inval_poc(kvm_pte_follow(new, mm_ops),
864 if (mm_ops->icache_inval_pou && stage2_pte_executable(new))
865 mm_ops->icache_inval_pou(kvm_pte_follow(new, mm_ops), granule);
867 stage2_make_pte(ctx, new);
872 static int stage2_map_walk_table_pre(const struct kvm_pgtable_visit_ctx *ctx,
873 struct stage2_map_data *data)
875 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
876 kvm_pte_t *childp = kvm_pte_follow(ctx->old, mm_ops);
879 if (!stage2_leaf_mapping_allowed(ctx, data))
882 ret = stage2_map_walker_try_leaf(ctx, data);
886 mm_ops->free_removed_table(childp, ctx->level);
890 static int stage2_map_walk_leaf(const struct kvm_pgtable_visit_ctx *ctx,
891 struct stage2_map_data *data)
893 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
894 kvm_pte_t *childp, new;
897 ret = stage2_map_walker_try_leaf(ctx, data);
901 if (WARN_ON(ctx->level == KVM_PGTABLE_MAX_LEVELS - 1))
907 childp = mm_ops->zalloc_page(data->memcache);
911 if (!stage2_try_break_pte(ctx, data->mmu)) {
912 mm_ops->put_page(childp);
917 * If we've run into an existing block mapping then replace it with
918 * a table. Accesses beyond 'end' that fall within the new table
919 * will be mapped lazily.
921 new = kvm_init_table_pte(childp, mm_ops);
922 stage2_make_pte(ctx, new);
928 * The TABLE_PRE callback runs for table entries on the way down, looking
929 * for table entries which we could conceivably replace with a block entry
930 * for this mapping. If it finds one it replaces the entry and calls
931 * kvm_pgtable_mm_ops::free_removed_table() to tear down the detached table.
933 * Otherwise, the LEAF callback performs the mapping at the existing leaves
936 static int stage2_map_walker(const struct kvm_pgtable_visit_ctx *ctx,
937 enum kvm_pgtable_walk_flags visit)
939 struct stage2_map_data *data = ctx->arg;
942 case KVM_PGTABLE_WALK_TABLE_PRE:
943 return stage2_map_walk_table_pre(ctx, data);
944 case KVM_PGTABLE_WALK_LEAF:
945 return stage2_map_walk_leaf(ctx, data);
951 int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
952 u64 phys, enum kvm_pgtable_prot prot,
953 void *mc, enum kvm_pgtable_walk_flags flags)
956 struct stage2_map_data map_data = {
957 .phys = ALIGN_DOWN(phys, PAGE_SIZE),
960 .force_pte = pgt->force_pte_cb && pgt->force_pte_cb(addr, addr + size, prot),
962 struct kvm_pgtable_walker walker = {
963 .cb = stage2_map_walker,
965 KVM_PGTABLE_WALK_TABLE_PRE |
966 KVM_PGTABLE_WALK_LEAF,
970 if (WARN_ON((pgt->flags & KVM_PGTABLE_S2_IDMAP) && (addr != phys)))
973 ret = stage2_set_prot_attr(pgt, prot, &map_data.attr);
977 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
982 int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size,
983 void *mc, u8 owner_id)
986 struct stage2_map_data map_data = {
987 .phys = KVM_PHYS_INVALID,
990 .owner_id = owner_id,
993 struct kvm_pgtable_walker walker = {
994 .cb = stage2_map_walker,
995 .flags = KVM_PGTABLE_WALK_TABLE_PRE |
996 KVM_PGTABLE_WALK_LEAF,
1000 if (owner_id > KVM_MAX_OWNER_ID)
1003 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
1007 static int stage2_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx,
1008 enum kvm_pgtable_walk_flags visit)
1010 struct kvm_pgtable *pgt = ctx->arg;
1011 struct kvm_s2_mmu *mmu = pgt->mmu;
1012 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1013 kvm_pte_t *childp = NULL;
1014 bool need_flush = false;
1016 if (!kvm_pte_valid(ctx->old)) {
1017 if (stage2_pte_is_counted(ctx->old)) {
1018 kvm_clear_pte(ctx->ptep);
1019 mm_ops->put_page(ctx->ptep);
1024 if (kvm_pte_table(ctx->old, ctx->level)) {
1025 childp = kvm_pte_follow(ctx->old, mm_ops);
1027 if (mm_ops->page_count(childp) != 1)
1029 } else if (stage2_pte_cacheable(pgt, ctx->old)) {
1030 need_flush = !stage2_has_fwb(pgt);
1034 * This is similar to the map() path in that we unmap the entire
1035 * block entry and rely on the remaining portions being faulted
1038 stage2_put_pte(ctx, mmu, mm_ops);
1040 if (need_flush && mm_ops->dcache_clean_inval_poc)
1041 mm_ops->dcache_clean_inval_poc(kvm_pte_follow(ctx->old, mm_ops),
1042 kvm_granule_size(ctx->level));
1045 mm_ops->put_page(childp);
1050 int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
1052 struct kvm_pgtable_walker walker = {
1053 .cb = stage2_unmap_walker,
1055 .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
1058 return kvm_pgtable_walk(pgt, addr, size, &walker);
1061 struct stage2_attr_data {
1068 static int stage2_attr_walker(const struct kvm_pgtable_visit_ctx *ctx,
1069 enum kvm_pgtable_walk_flags visit)
1071 kvm_pte_t pte = ctx->old;
1072 struct stage2_attr_data *data = ctx->arg;
1073 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1075 if (!kvm_pte_valid(ctx->old))
1078 data->level = ctx->level;
1080 pte &= ~data->attr_clr;
1081 pte |= data->attr_set;
1084 * We may race with the CPU trying to set the access flag here,
1085 * but worst-case the access flag update gets lost and will be
1086 * set on the next access instead.
1088 if (data->pte != pte) {
1090 * Invalidate instruction cache before updating the guest
1091 * stage-2 PTE if we are going to add executable permission.
1093 if (mm_ops->icache_inval_pou &&
1094 stage2_pte_executable(pte) && !stage2_pte_executable(ctx->old))
1095 mm_ops->icache_inval_pou(kvm_pte_follow(pte, mm_ops),
1096 kvm_granule_size(ctx->level));
1098 if (!stage2_try_set_pte(ctx, pte))
1105 static int stage2_update_leaf_attrs(struct kvm_pgtable *pgt, u64 addr,
1106 u64 size, kvm_pte_t attr_set,
1107 kvm_pte_t attr_clr, kvm_pte_t *orig_pte,
1108 u32 *level, enum kvm_pgtable_walk_flags flags)
1111 kvm_pte_t attr_mask = KVM_PTE_LEAF_ATTR_LO | KVM_PTE_LEAF_ATTR_HI;
1112 struct stage2_attr_data data = {
1113 .attr_set = attr_set & attr_mask,
1114 .attr_clr = attr_clr & attr_mask,
1116 struct kvm_pgtable_walker walker = {
1117 .cb = stage2_attr_walker,
1119 .flags = flags | KVM_PGTABLE_WALK_LEAF,
1122 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
1127 *orig_pte = data.pte;
1130 *level = data.level;
1134 int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size)
1136 return stage2_update_leaf_attrs(pgt, addr, size, 0,
1137 KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W,
1141 kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr)
1146 ret = stage2_update_leaf_attrs(pgt, addr, 1, KVM_PTE_LEAF_ATTR_LO_S2_AF, 0,
1148 KVM_PGTABLE_WALK_HANDLE_FAULT |
1149 KVM_PGTABLE_WALK_SHARED);
1156 kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr)
1159 stage2_update_leaf_attrs(pgt, addr, 1, 0, KVM_PTE_LEAF_ATTR_LO_S2_AF,
1162 * "But where's the TLBI?!", you scream.
1163 * "Over in the core code", I sigh.
1165 * See the '->clear_flush_young()' callback on the KVM mmu notifier.
1170 bool kvm_pgtable_stage2_is_young(struct kvm_pgtable *pgt, u64 addr)
1173 stage2_update_leaf_attrs(pgt, addr, 1, 0, 0, &pte, NULL, 0);
1174 return pte & KVM_PTE_LEAF_ATTR_LO_S2_AF;
1177 int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
1178 enum kvm_pgtable_prot prot)
1182 kvm_pte_t set = 0, clr = 0;
1184 if (prot & KVM_PTE_LEAF_ATTR_HI_SW)
1187 if (prot & KVM_PGTABLE_PROT_R)
1188 set |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R;
1190 if (prot & KVM_PGTABLE_PROT_W)
1191 set |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W;
1193 if (prot & KVM_PGTABLE_PROT_X)
1194 clr |= KVM_PTE_LEAF_ATTR_HI_S2_XN;
1196 ret = stage2_update_leaf_attrs(pgt, addr, 1, set, clr, NULL, &level,
1197 KVM_PGTABLE_WALK_HANDLE_FAULT |
1198 KVM_PGTABLE_WALK_SHARED);
1200 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, pgt->mmu, addr, level);
1204 static int stage2_flush_walker(const struct kvm_pgtable_visit_ctx *ctx,
1205 enum kvm_pgtable_walk_flags visit)
1207 struct kvm_pgtable *pgt = ctx->arg;
1208 struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
1210 if (!kvm_pte_valid(ctx->old) || !stage2_pte_cacheable(pgt, ctx->old))
1213 if (mm_ops->dcache_clean_inval_poc)
1214 mm_ops->dcache_clean_inval_poc(kvm_pte_follow(ctx->old, mm_ops),
1215 kvm_granule_size(ctx->level));
1219 int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size)
1221 struct kvm_pgtable_walker walker = {
1222 .cb = stage2_flush_walker,
1223 .flags = KVM_PGTABLE_WALK_LEAF,
1227 if (stage2_has_fwb(pgt))
1230 return kvm_pgtable_walk(pgt, addr, size, &walker);
1234 int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
1235 struct kvm_pgtable_mm_ops *mm_ops,
1236 enum kvm_pgtable_stage2_flags flags,
1237 kvm_pgtable_force_pte_cb_t force_pte_cb)
1240 u64 vtcr = mmu->arch->vtcr;
1241 u32 ia_bits = VTCR_EL2_IPA(vtcr);
1242 u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr);
1243 u32 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0;
1245 pgd_sz = kvm_pgd_pages(ia_bits, start_level) * PAGE_SIZE;
1246 pgt->pgd = (kvm_pteref_t)mm_ops->zalloc_pages_exact(pgd_sz);
1250 pgt->ia_bits = ia_bits;
1251 pgt->start_level = start_level;
1252 pgt->mm_ops = mm_ops;
1255 pgt->force_pte_cb = force_pte_cb;
1257 /* Ensure zeroed PGD pages are visible to the hardware walker */
1262 size_t kvm_pgtable_stage2_pgd_size(u64 vtcr)
1264 u32 ia_bits = VTCR_EL2_IPA(vtcr);
1265 u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr);
1266 u32 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0;
1268 return kvm_pgd_pages(ia_bits, start_level) * PAGE_SIZE;
1271 static int stage2_free_walker(const struct kvm_pgtable_visit_ctx *ctx,
1272 enum kvm_pgtable_walk_flags visit)
1274 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1276 if (!stage2_pte_is_counted(ctx->old))
1279 mm_ops->put_page(ctx->ptep);
1281 if (kvm_pte_table(ctx->old, ctx->level))
1282 mm_ops->put_page(kvm_pte_follow(ctx->old, mm_ops));
1287 void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
1290 struct kvm_pgtable_walker walker = {
1291 .cb = stage2_free_walker,
1292 .flags = KVM_PGTABLE_WALK_LEAF |
1293 KVM_PGTABLE_WALK_TABLE_POST,
1296 WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
1297 pgd_sz = kvm_pgd_pages(pgt->ia_bits, pgt->start_level) * PAGE_SIZE;
1298 pgt->mm_ops->free_pages_exact(kvm_dereference_pteref(&walker, pgt->pgd), pgd_sz);
1302 void kvm_pgtable_stage2_free_removed(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, u32 level)
1304 kvm_pteref_t ptep = (kvm_pteref_t)pgtable;
1305 struct kvm_pgtable_walker walker = {
1306 .cb = stage2_free_walker,
1307 .flags = KVM_PGTABLE_WALK_LEAF |
1308 KVM_PGTABLE_WALK_TABLE_POST,
1310 struct kvm_pgtable_walk_data data = {
1314 * At this point the IPA really doesn't matter, as the page
1315 * table being traversed has already been removed from the stage
1316 * 2. Set an appropriate range to cover the entire page table.
1319 .end = kvm_granule_size(level),
1322 WARN_ON(__kvm_pgtable_walk(&data, mm_ops, ptep, level + 1));