KVM: PPC: Book3S HV P9: Switch PMU to guest as late as possible
authorNicholas Piggin <npiggin@gmail.com>
Tue, 23 Nov 2021 09:52:11 +0000 (19:52 +1000)
committerMichael Ellerman <mpe@ellerman.id.au>
Wed, 24 Nov 2021 10:09:00 +0000 (21:09 +1100)
This moves PMU switch to guest as late as possible in entry, and switch
back to host as early as possible at exit. This helps the host get the
most perf coverage of KVM entry/exit code as possible.

This is slightly suboptimal for SPR scheduling point of view when the
PMU is enabled, but when perf is disabled there is no real difference.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20211123095231.1036501-34-npiggin@gmail.com
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_hv_p9_entry.c

index 40bee0d..c14467c 100644 (file)
@@ -3833,8 +3833,6 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns
        s64 dec;
        int trap;
 
-       switch_pmu_to_guest(vcpu, &host_os_sprs);
-
        save_p9_host_os_sprs(&host_os_sprs);
 
        /*
@@ -3897,9 +3895,11 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns
 
        mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
        mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
+       switch_pmu_to_guest(vcpu, &host_os_sprs);
        trap = plpar_hcall_norets(H_ENTER_NESTED, __pa(&hvregs),
                                  __pa(&vcpu->arch.regs));
        kvmhv_restore_hv_return_state(vcpu, &hvregs);
+       switch_pmu_to_host(vcpu, &host_os_sprs);
        vcpu->arch.shregs.msr = vcpu->arch.regs.msr;
        vcpu->arch.shregs.dar = mfspr(SPRN_DAR);
        vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR);
@@ -3918,8 +3918,6 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns
 
        restore_p9_host_os_sprs(vcpu, &host_os_sprs);
 
-       switch_pmu_to_host(vcpu, &host_os_sprs);
-
        return trap;
 }
 
index 6bef509..619bbcd 100644 (file)
@@ -601,8 +601,6 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
        local_paca->kvm_hstate.host_purr = mfspr(SPRN_PURR);
        local_paca->kvm_hstate.host_spurr = mfspr(SPRN_SPURR);
 
-       switch_pmu_to_guest(vcpu, &host_os_sprs);
-
        save_p9_host_os_sprs(&host_os_sprs);
 
        /*
@@ -744,7 +742,9 @@ tm_return_to_guest:
 
        accumulate_time(vcpu, &vcpu->arch.guest_time);
 
+       switch_pmu_to_guest(vcpu, &host_os_sprs);
        kvmppc_p9_enter_guest(vcpu);
+       switch_pmu_to_host(vcpu, &host_os_sprs);
 
        accumulate_time(vcpu, &vcpu->arch.rm_intr);
 
@@ -955,8 +955,6 @@ tm_return_to_guest:
                asm volatile(PPC_CP_ABORT);
 
 out:
-       switch_pmu_to_host(vcpu, &host_os_sprs);
-
        end_timing(vcpu);
 
        return trap;