powerpc/kvm/book3s: Use find_kvm_host_pte in h_enter
authorAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Tue, 5 May 2020 07:17:21 +0000 (12:47 +0530)
committerMichael Ellerman <mpe@ellerman.id.au>
Tue, 5 May 2020 11:20:15 +0000 (21:20 +1000)
Since kvmppc_do_h_enter can get called in realmode use low level
arch_spin_lock which is safe to be called in realmode.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200505071729.54912-15-aneesh.kumar@linux.ibm.com
arch/powerpc/kvm/book3s_64_mmu_hv.c
arch/powerpc/kvm/book3s_hv_rm_mmu.c

index 8f9cd4c79044256bc00a05207abf536a6b63d130..18aed9775a3c6cde9f0283ca99dd191dde72313b 100644 (file)
@@ -281,11 +281,10 @@ static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
 {
        long ret;
 
-       /* Protect linux PTE lookup from page table destruction */
-       rcu_read_lock_sched();  /* this disables preemption too */
+       preempt_disable();
        ret = kvmppc_do_h_enter(kvm, flags, pte_index, pteh, ptel,
                                kvm->mm->pgd, false, pte_idx_ret);
-       rcu_read_unlock_sched();
+       preempt_enable();
        if (ret == H_TOO_HARD) {
                /* this can't happen */
                pr_err("KVM: Oops, kvmppc_h_enter returned too hard!\n");
index 03f8347de48be47fe6cb72c5f735d28b94fe0b5e..83e987fecf975ff763088ca3bdd47bfc5b73ceda 100644 (file)
@@ -210,7 +210,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
        pte_t *ptep;
        unsigned int writing;
        unsigned long mmu_seq;
-       unsigned long rcbits, irq_flags = 0;
+       unsigned long rcbits;
 
        if (kvm_is_radix(kvm))
                return H_FUNCTION;
@@ -248,17 +248,9 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
 
        /* Translate to host virtual address */
        hva = __gfn_to_hva_memslot(memslot, gfn);
-       /*
-        * If we had a page table table change after lookup, we would
-        * retry via mmu_notifier_retry.
-        */
-       if (!realmode)
-               local_irq_save(irq_flags);
-       /*
-        * If called in real mode we have MSR_EE = 0. Otherwise
-        * we disable irq above.
-        */
-       ptep = __find_linux_pte(pgdir, hva, NULL, &hpage_shift);
+
+       arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
+       ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &hpage_shift);
        if (ptep) {
                pte_t pte;
                unsigned int host_pte_size;
@@ -272,8 +264,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
                 * to <= host page size, if host is using hugepage
                 */
                if (host_pte_size < psize) {
-                       if (!realmode)
-                               local_irq_restore(flags);
+                       arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
                        return H_PARAMETER;
                }
                pte = kvmppc_read_update_linux_pte(ptep, writing);
@@ -287,8 +278,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
                        pa |= gpa & ~PAGE_MASK;
                }
        }
-       if (!realmode)
-               local_irq_restore(irq_flags);
+       arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
 
        ptel &= HPTE_R_KEY | HPTE_R_PP0 | (psize-1);
        ptel |= pa;