KVM: arm64: Add a helper to tear down unlinked stage-2 subtrees
authorOliver Upton <oliver.upton@linux.dev>
Mon, 7 Nov 2022 21:56:35 +0000 (21:56 +0000)
committerMarc Zyngier <maz@kernel.org>
Thu, 10 Nov 2022 14:43:46 +0000 (14:43 +0000)
A subsequent change to KVM will move the tear down of an unlinked
stage-2 subtree out of the critical path of the break-before-make
sequence.

Introduce a new helper for tearing down unlinked stage-2 subtrees.
Leverage the existing stage-2 free walkers to do so, with a deep call
into __kvm_pgtable_walk() as the subtree is no longer reachable from the
root.

Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20221107215644.1895162-6-oliver.upton@linux.dev
arch/arm64/include/asm/kvm_pgtable.h
arch/arm64/kvm/hyp/pgtable.c

index a752793..93b1fee 100644 (file)
@@ -334,6 +334,17 @@ int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
 void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
 
 /**
+ * kvm_pgtable_stage2_free_removed() - Free a removed stage-2 paging structure.
+ * @mm_ops:    Memory management callbacks.
+ * @pgtable:   Unlinked stage-2 paging structure to be freed.
+ * @level:     Level of the stage-2 paging structure to be freed.
+ *
+ * The page-table is assumed to be unreachable by any hardware walkers prior to
+ * freeing and therefore no TLB invalidation is performed.
+ */
+void kvm_pgtable_stage2_free_removed(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, u32 level);
+
+/**
  * kvm_pgtable_stage2_map() - Install a mapping in a guest stage-2 page-table.
  * @pgt:       Page-table structure initialised by kvm_pgtable_stage2_init*().
  * @addr:      Intermediate physical address at which to place the mapping.
index 93989b7..363a5cc 100644 (file)
@@ -1203,3 +1203,26 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
        pgt->mm_ops->free_pages_exact(pgt->pgd, pgd_sz);
        pgt->pgd = NULL;
 }
+
+void kvm_pgtable_stage2_free_removed(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, u32 level)
+{
+       kvm_pte_t *ptep = (kvm_pte_t *)pgtable;
+       struct kvm_pgtable_walker walker = {
+               .cb     = stage2_free_walker,
+               .flags  = KVM_PGTABLE_WALK_LEAF |
+                         KVM_PGTABLE_WALK_TABLE_POST,
+       };
+       struct kvm_pgtable_walk_data data = {
+               .walker = &walker,
+
+               /*
+                * At this point the IPA really doesn't matter, as the page
+                * table being traversed has already been removed from the stage
+                * 2. Set an appropriate range to cover the entire page table.
+                */
+               .addr   = 0,
+               .end    = kvm_granule_size(level),
+       };
+
+       WARN_ON(__kvm_pgtable_walk(&data, mm_ops, ptep, level));
+}