KVM: PPC: Book3S HV: Introduce low level MSR accessor
authorJordan Niethe <jniethe5@gmail.com>
Thu, 14 Sep 2023 03:05:56 +0000 (13:05 +1000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 25 Jan 2024 23:35:13 +0000 (15:35 -0800)
[ Upstream commit 6de2e837babb411cfb3cdb570581c3a65576ddaf ]

kvmppc_get_msr() and kvmppc_set_msr_fast() serve as accessors for the
MSR. However because the MSR is kept in the shared regs they include a
conditional check for kvmppc_shared_big_endian() and endian conversion.

Within the Book3S HV specific code there are direct reads and writes of
shregs::msr. In preparation for Nested APIv2 these accesses need to be
replaced with accessor functions so it is possible to extend their
behavior. However, using the kvmppc_get_msr() and kvmppc_set_msr_fast()
functions is undesirable because it would introduce a conditional branch
and endian conversion that is not currently present.

kvmppc_set_msr_hv() already exists, it is used for the
kvmppc_ops::set_msr callback.

Introduce a low level accessor __kvmppc_{s,g}et_msr_hv() that simply
gets and sets shregs::msr. This will be extend for Nested APIv2 support.

Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/20230914030600.16993-8-jniethe5@gmail.com
Stable-dep-of: ecd10702baae ("KVM: PPC: Book3S HV: Handle pending exceptions on guest entry with MSR_EE")
Signed-off-by: Sasha Levin <sashal@kernel.org>
arch/powerpc/kvm/book3s_64_mmu_hv.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_hv.h
arch/powerpc/kvm/book3s_hv_builtin.c

index efd0ebf..fdfc2a6 100644 (file)
@@ -28,6 +28,7 @@
 #include <asm/pte-walk.h>
 
 #include "book3s.h"
+#include "book3s_hv.h"
 #include "trace_hv.h"
 
 //#define DEBUG_RESIZE_HPT     1
@@ -347,7 +348,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
        unsigned long v, orig_v, gr;
        __be64 *hptep;
        long int index;
-       int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);
+       int virtmode = __kvmppc_get_msr_hv(vcpu) & (data ? MSR_DR : MSR_IR);
 
        if (kvm_is_radix(vcpu->kvm))
                return kvmppc_mmu_radix_xlate(vcpu, eaddr, gpte, data, iswrite);
@@ -385,7 +386,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
 
        /* Get PP bits and key for permission check */
        pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
-       key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
+       key = (__kvmppc_get_msr_hv(vcpu) & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
        key &= slb_v;
 
        /* Calculate permissions */
index 7fa7fce..11bc5aa 100644 (file)
@@ -1370,7 +1370,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
  */
 static void kvmppc_cede(struct kvm_vcpu *vcpu)
 {
-       vcpu->arch.shregs.msr |= MSR_EE;
+       __kvmppc_set_msr_hv(vcpu, __kvmppc_get_msr_hv(vcpu) | MSR_EE);
        vcpu->arch.ceded = 1;
        smp_mb();
        if (vcpu->arch.prodded) {
@@ -1585,7 +1585,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
         * That can happen due to a bug, or due to a machine check
         * occurring at just the wrong time.
         */
-       if (vcpu->arch.shregs.msr & MSR_HV) {
+       if (__kvmppc_get_msr_hv(vcpu) & MSR_HV) {
                printk(KERN_EMERG "KVM trap in HV mode!\n");
                printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
                        vcpu->arch.trap, kvmppc_get_pc(vcpu),
@@ -1636,7 +1636,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
                 * so that it knows that the machine check occurred.
                 */
                if (!vcpu->kvm->arch.fwnmi_enabled) {
-                       ulong flags = (vcpu->arch.shregs.msr & 0x083c0000) |
+                       ulong flags = (__kvmppc_get_msr_hv(vcpu) & 0x083c0000) |
                                        (kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
                        kvmppc_core_queue_machine_check(vcpu, flags);
                        r = RESUME_GUEST;
@@ -1666,7 +1666,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
                 * as a result of a hypervisor emulation interrupt
                 * (e40) getting turned into a 700 by BML RTAS.
                 */
-               flags = (vcpu->arch.shregs.msr & 0x1f0000ull) |
+               flags = (__kvmppc_get_msr_hv(vcpu) & 0x1f0000ull) |
                        (kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
                kvmppc_core_queue_program(vcpu, flags);
                r = RESUME_GUEST;
@@ -1676,7 +1676,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
        {
                int i;
 
-               if (unlikely(vcpu->arch.shregs.msr & MSR_PR)) {
+               if (unlikely(__kvmppc_get_msr_hv(vcpu) & MSR_PR)) {
                        /*
                         * Guest userspace executed sc 1. This can only be
                         * reached by the P9 path because the old path
@@ -1754,7 +1754,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
                        break;
                }
 
-               if (!(vcpu->arch.shregs.msr & MSR_DR))
+               if (!(__kvmppc_get_msr_hv(vcpu) & MSR_DR))
                        vsid = vcpu->kvm->arch.vrma_slb_v;
                else
                        vsid = vcpu->arch.fault_gpa;
@@ -1778,7 +1778,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
                long err;
 
                vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
-               vcpu->arch.fault_dsisr = vcpu->arch.shregs.msr &
+               vcpu->arch.fault_dsisr = __kvmppc_get_msr_hv(vcpu) &
                        DSISR_SRR1_MATCH_64S;
                if (kvm_is_radix(vcpu->kvm) || !cpu_has_feature(CPU_FTR_ARCH_300)) {
                        /*
@@ -1787,7 +1787,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
                         * hash fault handling below is v3 only (it uses ASDR
                         * via fault_gpa).
                         */
-                       if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
+                       if (__kvmppc_get_msr_hv(vcpu) & HSRR1_HISI_WRITE)
                                vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
                        r = RESUME_PAGE_FAULT;
                        break;
@@ -1801,7 +1801,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
                        break;
                }
 
-               if (!(vcpu->arch.shregs.msr & MSR_IR))
+               if (!(__kvmppc_get_msr_hv(vcpu) & MSR_IR))
                        vsid = vcpu->kvm->arch.vrma_slb_v;
                else
                        vsid = vcpu->arch.fault_gpa;
@@ -1891,7 +1891,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
                kvmppc_dump_regs(vcpu);
                printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
                        vcpu->arch.trap, kvmppc_get_pc(vcpu),
-                       vcpu->arch.shregs.msr);
+                       __kvmppc_get_msr_hv(vcpu));
                run->hw.hardware_exit_reason = vcpu->arch.trap;
                r = RESUME_HOST;
                break;
@@ -1915,11 +1915,11 @@ static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
         * That can happen due to a bug, or due to a machine check
         * occurring at just the wrong time.
         */
-       if (vcpu->arch.shregs.msr & MSR_HV) {
+       if (__kvmppc_get_msr_hv(vcpu) & MSR_HV) {
                pr_emerg("KVM trap in HV mode while nested!\n");
                pr_emerg("trap=0x%x | pc=0x%lx | msr=0x%llx\n",
                         vcpu->arch.trap, kvmppc_get_pc(vcpu),
-                        vcpu->arch.shregs.msr);
+                        __kvmppc_get_msr_hv(vcpu));
                kvmppc_dump_regs(vcpu);
                return RESUME_HOST;
        }
@@ -1976,7 +1976,7 @@ static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
                vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
                vcpu->arch.fault_dsisr = kvmppc_get_msr(vcpu) &
                                         DSISR_SRR1_MATCH_64S;
-               if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
+               if (__kvmppc_get_msr_hv(vcpu) & HSRR1_HISI_WRITE)
                        vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
                srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
                r = kvmhv_nested_page_fault(vcpu);
@@ -2935,7 +2935,7 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu)
        spin_lock_init(&vcpu->arch.vpa_update_lock);
        spin_lock_init(&vcpu->arch.tbacct_lock);
        vcpu->arch.busy_preempt = TB_NIL;
-       vcpu->arch.shregs.msr = MSR_ME;
+       __kvmppc_set_msr_hv(vcpu, MSR_ME);
        vcpu->arch.intr_msr = MSR_SF | MSR_ME;
 
        /*
@@ -4184,7 +4184,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
                __this_cpu_write(cpu_in_guest, NULL);
 
                if (trap == BOOK3S_INTERRUPT_SYSCALL &&
-                   !(vcpu->arch.shregs.msr & MSR_PR)) {
+                   !(__kvmppc_get_msr_hv(vcpu) & MSR_PR)) {
                        unsigned long req = kvmppc_get_gpr(vcpu, 3);
 
                        /*
@@ -4663,7 +4663,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
 
        if (!nested) {
                kvmppc_core_prepare_to_enter(vcpu);
-               if (vcpu->arch.shregs.msr & MSR_EE) {
+               if (__kvmppc_get_msr_hv(vcpu) & MSR_EE) {
                        if (xive_interrupt_pending(vcpu))
                                kvmppc_inject_interrupt_hv(vcpu,
                                                BOOK3S_INTERRUPT_EXTERNAL, 0);
@@ -4876,7 +4876,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
                if (run->exit_reason == KVM_EXIT_PAPR_HCALL) {
                        accumulate_time(vcpu, &vcpu->arch.hcall);
 
-                       if (WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_PR)) {
+                       if (WARN_ON_ONCE(__kvmppc_get_msr_hv(vcpu) & MSR_PR)) {
                                /*
                                 * These should have been caught reflected
                                 * into the guest by now. Final sanity check:
index acd9a7a..9524176 100644 (file)
@@ -51,6 +51,16 @@ void accumulate_time(struct kvm_vcpu *vcpu, struct kvmhv_tb_accumulator *next);
 #define end_timing(vcpu) do {} while (0)
 #endif
 
+static inline void __kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 val)
+{
+       vcpu->arch.shregs.msr = val;
+}
+
+static inline u64 __kvmppc_get_msr_hv(struct kvm_vcpu *vcpu)
+{
+       return vcpu->arch.shregs.msr;
+}
+
 #define KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_SET(reg, size)                  \
 static inline void kvmppc_set_##reg ##_hv(struct kvm_vcpu *vcpu, u##size val)  \
 {                                                                      \
index 0f5b021..663f522 100644 (file)
@@ -32,6 +32,7 @@
 
 #include "book3s_xics.h"
 #include "book3s_xive.h"
+#include "book3s_hv.h"
 
 /*
  * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
@@ -510,7 +511,7 @@ void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
         */
        if ((msr & MSR_TS_MASK) == MSR_TS_MASK)
                msr &= ~MSR_TS_MASK;
-       vcpu->arch.shregs.msr = msr;
+       __kvmppc_set_msr_hv(vcpu, msr);
        kvmppc_end_cede(vcpu);
 }
 EXPORT_SYMBOL_GPL(kvmppc_set_msr_hv);
@@ -548,7 +549,7 @@ static void inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags)
        kvmppc_set_srr0(vcpu, pc);
        kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags);
        kvmppc_set_pc(vcpu, new_pc);
-       vcpu->arch.shregs.msr = new_msr;
+       __kvmppc_set_msr_hv(vcpu, new_msr);
 }
 
 void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags)