KVM: arm64: Add support for stage-2 cache flushing in generic page-table
authorQuentin Perret <qperret@google.com>
Fri, 11 Sep 2020 13:25:22 +0000 (14:25 +0100)
committerMarc Zyngier <maz@kernel.org>
Fri, 11 Sep 2020 14:51:14 +0000 (15:51 +0100)
Add support for cache flushing a range of the stage-2 address space to
the generic page-table code.

Signed-off-by: Quentin Perret <qperret@google.com>
Signed-off-by: Will Deacon <will@kernel.org>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Reviewed-by: Gavin Shan <gshan@redhat.com>
Cc: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20200911132529.19844-15-will@kernel.org
arch/arm64/include/asm/kvm_pgtable.h
arch/arm64/kvm/hyp/pgtable.c

index 5ae6006a6098575e9ea2a67a8702eefc08c66cc5..77c027456c61408204dbecbdaae0c5dccf4d676a 100644 (file)
@@ -248,6 +248,21 @@ kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr);
  */
 bool kvm_pgtable_stage2_is_young(struct kvm_pgtable *pgt, u64 addr);
 
+/**
+ * kvm_pgtable_stage2_flush_range() - Clean and invalidate data cache to Point
+ *                                   of Coherency for guest stage-2 address
+ *                                   range.
+ * @pgt:       Page-table structure initialised by kvm_pgtable_stage2_init().
+ * @addr:      Intermediate physical address from which to flush.
+ * @size:      Size of the range.
+ *
+ * The offset of @addr within a page is ignored and @size is rounded-up to
+ * the next page boundary.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size);
+
 /**
  * kvm_pgtable_walk() - Walk a page-table.
  * @pgt:       Page-table structure initialised by kvm_pgtable_*_init().
index 480b95030f542102638795d025eaabb426d6e193..d382756a527c367e99c26d4015408ed50dc5a20b 100644 (file)
@@ -782,6 +782,32 @@ bool kvm_pgtable_stage2_is_young(struct kvm_pgtable *pgt, u64 addr)
        return pte & KVM_PTE_LEAF_ATTR_LO_S2_AF;
 }
 
+static int stage2_flush_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
+                              enum kvm_pgtable_walk_flags flag,
+                              void * const arg)
+{
+       kvm_pte_t pte = *ptep;
+
+       if (!kvm_pte_valid(pte) || !stage2_pte_cacheable(pte))
+               return 0;
+
+       stage2_flush_dcache(kvm_pte_follow(pte), kvm_granule_size(level));
+       return 0;
+}
+
+int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size)
+{
+       struct kvm_pgtable_walker walker = {
+               .cb     = stage2_flush_walker,
+               .flags  = KVM_PGTABLE_WALK_LEAF,
+       };
+
+       if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
+               return 0;
+
+       return kvm_pgtable_walk(pgt, addr, size, &walker);
+}
+
 int kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm *kvm)
 {
        size_t pgd_sz;