KVM: PPC: Book3S HV: POWER10 enable HAIL when running radix guests
authorNicholas Piggin <npiggin@gmail.com>
Tue, 23 Nov 2021 09:51:46 +0000 (19:51 +1000)
committerMichael Ellerman <mpe@ellerman.id.au>
Wed, 24 Nov 2021 10:08:57 +0000 (21:08 +1100)
HV interrupts may be taken with the MMU enabled when radix guests are
running. Enable LPCR[HAIL] on ISA v3.1 processors for radix guests.
Make this depend on the host LPCR[HAIL] being enabled. Currently that is
always enabled, but having this test means any issue that might require
LPCR[HAIL] to be disabled in the host will not have to be duplicated in
KVM.

This optimisation takes 1380 cycles off a NULL hcall entry+exit micro
benchmark on a POWER10.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Reviewed-by: Fabiano Rosas <farosas@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20211123095231.1036501-9-npiggin@gmail.com
arch/powerpc/kvm/book3s_hv.c

index 1b556db..a683ee5 100644 (file)
@@ -5073,6 +5073,8 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
  */
 int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
 {
+       unsigned long lpcr, lpcr_mask;
+
        if (nesting_enabled(kvm))
                kvmhv_release_all_nested(kvm);
        kvmppc_rmap_reset(kvm);
@@ -5082,8 +5084,13 @@ int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
        kvm->arch.radix = 0;
        spin_unlock(&kvm->mmu_lock);
        kvmppc_free_radix(kvm);
-       kvmppc_update_lpcr(kvm, LPCR_VPM1,
-                          LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR);
+
+       lpcr = LPCR_VPM1;
+       lpcr_mask = LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR;
+       if (cpu_has_feature(CPU_FTR_ARCH_31))
+               lpcr_mask |= LPCR_HAIL;
+       kvmppc_update_lpcr(kvm, lpcr, lpcr_mask);
+
        return 0;
 }
 
@@ -5093,6 +5100,7 @@ int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
  */
 int kvmppc_switch_mmu_to_radix(struct kvm *kvm)
 {
+       unsigned long lpcr, lpcr_mask;
        int err;
 
        err = kvmppc_init_vm_radix(kvm);
@@ -5104,8 +5112,17 @@ int kvmppc_switch_mmu_to_radix(struct kvm *kvm)
        kvm->arch.radix = 1;
        spin_unlock(&kvm->mmu_lock);
        kvmppc_free_hpt(&kvm->arch.hpt);
-       kvmppc_update_lpcr(kvm, LPCR_UPRT | LPCR_GTSE | LPCR_HR,
-                          LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR);
+
+       lpcr = LPCR_UPRT | LPCR_GTSE | LPCR_HR;
+       lpcr_mask = LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR;
+       if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+               lpcr_mask |= LPCR_HAIL;
+               if (cpu_has_feature(CPU_FTR_HVMODE) &&
+                               (kvm->arch.host_lpcr & LPCR_HAIL))
+                       lpcr |= LPCR_HAIL;
+       }
+       kvmppc_update_lpcr(kvm, lpcr, lpcr_mask);
+
        return 0;
 }
 
@@ -5269,6 +5286,10 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
                kvm->arch.mmu_ready = 1;
                lpcr &= ~LPCR_VPM1;
                lpcr |= LPCR_UPRT | LPCR_GTSE | LPCR_HR;
+               if (cpu_has_feature(CPU_FTR_HVMODE) &&
+                   cpu_has_feature(CPU_FTR_ARCH_31) &&
+                   (kvm->arch.host_lpcr & LPCR_HAIL))
+                       lpcr |= LPCR_HAIL;
                ret = kvmppc_init_vm_radix(kvm);
                if (ret) {
                        kvmppc_free_lpid(kvm->arch.lpid);