powerpc/kvm/book3s: Add helper to walk partition scoped linux page table.
authorAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Tue, 5 May 2020 07:17:16 +0000 (12:47 +0530)
committerMichael Ellerman <mpe@ellerman.id.au>
Tue, 5 May 2020 11:20:15 +0000 (21:20 +1000)
The locking rules for walking partition scoped table is different from process
scoped table. Hence add a helper for secondary linux page table walk and also
add check whether we are holding the right locks.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200505071729.54912-10-aneesh.kumar@linux.ibm.com
arch/powerpc/include/asm/kvm_book3s_64.h
arch/powerpc/kvm/book3s_64_mmu_radix.c
arch/powerpc/kvm/book3s_hv_nested.c

index 04b2b92..2c26359 100644 (file)
@@ -14,6 +14,7 @@
 #include <asm/book3s/64/mmu-hash.h>
 #include <asm/cpu_has_feature.h>
 #include <asm/ppc-opcode.h>
+#include <asm/pte-walk.h>
 
 #ifdef CONFIG_PPC_PSERIES
 static inline bool kvmhv_on_pseries(void)
@@ -634,6 +635,18 @@ extern void kvmhv_remove_nest_rmap_range(struct kvm *kvm,
                                unsigned long gpa, unsigned long hpa,
                                unsigned long nbytes);
 
+static inline pte_t *find_kvm_secondary_pte(struct kvm *kvm, unsigned long ea,
+                                           unsigned *hshift)
+{
+       pte_t *pte;
+
+       VM_WARN(!spin_is_locked(&kvm->mmu_lock),
+               "%s called with kvm mmu_lock not held \n", __func__);
+       pte = __find_linux_pte(kvm->arch.pgtable, ea, NULL, hshift);
+
+       return pte;
+}
+
 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
 
 #endif /* __ASM_KVM_BOOK3S_64_H__ */
index aa12cd4..c92d413 100644 (file)
@@ -981,11 +981,11 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
                return 0;
        }
 
-       ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
+       ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
        if (ptep && pte_present(*ptep))
                kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
                                 kvm->arch.lpid);
-       return 0;                               
+       return 0;
 }
 
 /* Called with kvm->mmu_lock held */
@@ -1001,7 +1001,7 @@ int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
        if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
                return ref;
 
-       ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
+       ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
        if (ptep && pte_present(*ptep) && pte_young(*ptep)) {
                old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0,
                                              gpa, shift);
@@ -1028,7 +1028,7 @@ int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
        if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
                return ref;
 
-       ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
+       ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
        if (ptep && pte_present(*ptep) && pte_young(*ptep))
                ref = 1;
        return ref;
@@ -1048,7 +1048,7 @@ static int kvm_radix_test_clear_dirty(struct kvm *kvm,
        if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
                return ret;
 
-       ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
+       ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
        if (ptep && pte_present(*ptep) && pte_dirty(*ptep)) {
                ret = 1;
                if (shift)
@@ -1109,7 +1109,7 @@ void kvmppc_radix_flush_memslot(struct kvm *kvm,
        gpa = memslot->base_gfn << PAGE_SHIFT;
        spin_lock(&kvm->mmu_lock);
        for (n = memslot->npages; n; --n) {
-               ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
+               ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
                if (ptep && pte_present(*ptep))
                        kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
                                         kvm->arch.lpid);
index dc97e5b..7f1fc5d 100644 (file)
@@ -1362,7 +1362,7 @@ static long int __kvmhv_nested_page_fault(struct kvm_run *run,
        /* See if can find translation in our partition scoped tables for L1 */
        pte = __pte(0);
        spin_lock(&kvm->mmu_lock);
-       pte_p = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
+       pte_p = find_kvm_secondary_pte(kvm, gpa, &shift);
        if (!shift)
                shift = PAGE_SHIFT;
        if (pte_p)