KVM: PPC: Book3S HV: Use accessors for VCPU registers
authorJordan Niethe <jniethe5@gmail.com>
Thu, 14 Sep 2023 03:05:55 +0000 (13:05 +1000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 25 Jan 2024 23:35:13 +0000 (15:35 -0800)
[ Upstream commit ebc88ea7a6ad0ea349df9c765357d3aa4e662aa9 ]

Introduce accessor generator macros for Book3S HV VCPU registers. Use
the accessor functions to replace direct accesses to this registers.

This will be important later for Nested APIv2 support which requires
additional functionality for accessing and modifying VCPU state.

Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/20230914030600.16993-7-jniethe5@gmail.com
Stable-dep-of: ecd10702baae ("KVM: PPC: Book3S HV: Handle pending exceptions on guest entry with MSR_EE")
Signed-off-by: Sasha Levin <sashal@kernel.org>
arch/powerpc/kvm/book3s_64_mmu_radix.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_hv.h

index 5727078..10aacbf 100644 (file)
@@ -15,6 +15,7 @@
 
 #include <asm/kvm_ppc.h>
 #include <asm/kvm_book3s.h>
+#include "book3s_hv.h"
 #include <asm/page.h>
 #include <asm/mmu.h>
 #include <asm/pgalloc.h>
@@ -294,9 +295,9 @@ int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
        } else {
                if (!(pte & _PAGE_PRIVILEGED)) {
                        /* Check AMR/IAMR to see if strict mode is in force */
-                       if (vcpu->arch.amr & (1ul << 62))
+                       if (kvmppc_get_amr_hv(vcpu) & (1ul << 62))
                                gpte->may_read = 0;
-                       if (vcpu->arch.amr & (1ul << 63))
+                       if (kvmppc_get_amr_hv(vcpu) & (1ul << 63))
                                gpte->may_write = 0;
                        if (vcpu->arch.iamr & (1ul << 62))
                                gpte->may_execute = 0;
index 130bafd..7fa7fce 100644 (file)
@@ -868,7 +868,7 @@ static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
                /* Guests can't breakpoint the hypervisor */
                if ((value1 & CIABR_PRIV) == CIABR_PRIV_HYPER)
                        return H_P3;
-               vcpu->arch.ciabr  = value1;
+               kvmppc_set_ciabr_hv(vcpu, value1);
                return H_SUCCESS;
        case H_SET_MODE_RESOURCE_SET_DAWR0:
                if (!kvmppc_power8_compatible(vcpu))
@@ -879,8 +879,8 @@ static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
                        return H_UNSUPPORTED_FLAG_START;
                if (value2 & DABRX_HYP)
                        return H_P4;
-               vcpu->arch.dawr0  = value1;
-               vcpu->arch.dawrx0 = value2;
+               kvmppc_set_dawr0_hv(vcpu, value1);
+               kvmppc_set_dawrx0_hv(vcpu, value2);
                return H_SUCCESS;
        case H_SET_MODE_RESOURCE_SET_DAWR1:
                if (!kvmppc_power8_compatible(vcpu))
@@ -895,8 +895,8 @@ static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
                        return H_UNSUPPORTED_FLAG_START;
                if (value2 & DABRX_HYP)
                        return H_P4;
-               vcpu->arch.dawr1  = value1;
-               vcpu->arch.dawrx1 = value2;
+               kvmppc_set_dawr1_hv(vcpu, value1);
+               kvmppc_set_dawrx1_hv(vcpu, value2);
                return H_SUCCESS;
        case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE:
                /*
@@ -1544,7 +1544,7 @@ static int kvmppc_pmu_unavailable(struct kvm_vcpu *vcpu)
        if (!(vcpu->arch.hfscr_permitted & HFSCR_PM))
                return EMULATE_FAIL;
 
-       vcpu->arch.hfscr |= HFSCR_PM;
+       kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) | HFSCR_PM);
 
        return RESUME_GUEST;
 }
@@ -1554,7 +1554,7 @@ static int kvmppc_ebb_unavailable(struct kvm_vcpu *vcpu)
        if (!(vcpu->arch.hfscr_permitted & HFSCR_EBB))
                return EMULATE_FAIL;
 
-       vcpu->arch.hfscr |= HFSCR_EBB;
+       kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) | HFSCR_EBB);
 
        return RESUME_GUEST;
 }
@@ -1564,7 +1564,7 @@ static int kvmppc_tm_unavailable(struct kvm_vcpu *vcpu)
        if (!(vcpu->arch.hfscr_permitted & HFSCR_TM))
                return EMULATE_FAIL;
 
-       vcpu->arch.hfscr |= HFSCR_TM;
+       kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) | HFSCR_TM);
 
        return RESUME_GUEST;
 }
@@ -1863,7 +1863,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
         * Otherwise, we just generate a program interrupt to the guest.
         */
        case BOOK3S_INTERRUPT_H_FAC_UNAVAIL: {
-               u64 cause = vcpu->arch.hfscr >> 56;
+               u64 cause = kvmppc_get_hfscr_hv(vcpu) >> 56;
 
                r = EMULATE_FAIL;
                if (cpu_has_feature(CPU_FTR_ARCH_300)) {
@@ -2207,64 +2207,64 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
                *val = get_reg_val(id, vcpu->arch.dabrx);
                break;
        case KVM_REG_PPC_DSCR:
-               *val = get_reg_val(id, vcpu->arch.dscr);
+               *val = get_reg_val(id, kvmppc_get_dscr_hv(vcpu));
                break;
        case KVM_REG_PPC_PURR:
-               *val = get_reg_val(id, vcpu->arch.purr);
+               *val = get_reg_val(id, kvmppc_get_purr_hv(vcpu));
                break;
        case KVM_REG_PPC_SPURR:
-               *val = get_reg_val(id, vcpu->arch.spurr);
+               *val = get_reg_val(id, kvmppc_get_spurr_hv(vcpu));
                break;
        case KVM_REG_PPC_AMR:
-               *val = get_reg_val(id, vcpu->arch.amr);
+               *val = get_reg_val(id, kvmppc_get_amr_hv(vcpu));
                break;
        case KVM_REG_PPC_UAMOR:
-               *val = get_reg_val(id, vcpu->arch.uamor);
+               *val = get_reg_val(id, kvmppc_get_uamor_hv(vcpu));
                break;
        case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCR1:
                i = id - KVM_REG_PPC_MMCR0;
-               *val = get_reg_val(id, vcpu->arch.mmcr[i]);
+               *val = get_reg_val(id, kvmppc_get_mmcr_hv(vcpu, i));
                break;
        case KVM_REG_PPC_MMCR2:
-               *val = get_reg_val(id, vcpu->arch.mmcr[2]);
+               *val = get_reg_val(id, kvmppc_get_mmcr_hv(vcpu, 2));
                break;
        case KVM_REG_PPC_MMCRA:
-               *val = get_reg_val(id, vcpu->arch.mmcra);
+               *val = get_reg_val(id, kvmppc_get_mmcra_hv(vcpu));
                break;
        case KVM_REG_PPC_MMCRS:
                *val = get_reg_val(id, vcpu->arch.mmcrs);
                break;
        case KVM_REG_PPC_MMCR3:
-               *val = get_reg_val(id, vcpu->arch.mmcr[3]);
+               *val = get_reg_val(id, kvmppc_get_mmcr_hv(vcpu, 3));
                break;
        case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
                i = id - KVM_REG_PPC_PMC1;
-               *val = get_reg_val(id, vcpu->arch.pmc[i]);
+               *val = get_reg_val(id, kvmppc_get_pmc_hv(vcpu, i));
                break;
        case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
                i = id - KVM_REG_PPC_SPMC1;
                *val = get_reg_val(id, vcpu->arch.spmc[i]);
                break;
        case KVM_REG_PPC_SIAR:
-               *val = get_reg_val(id, vcpu->arch.siar);
+               *val = get_reg_val(id, kvmppc_get_siar_hv(vcpu));
                break;
        case KVM_REG_PPC_SDAR:
-               *val = get_reg_val(id, vcpu->arch.sdar);
+               *val = get_reg_val(id, kvmppc_get_siar_hv(vcpu));
                break;
        case KVM_REG_PPC_SIER:
-               *val = get_reg_val(id, vcpu->arch.sier[0]);
+               *val = get_reg_val(id, kvmppc_get_sier_hv(vcpu, 0));
                break;
        case KVM_REG_PPC_SIER2:
-               *val = get_reg_val(id, vcpu->arch.sier[1]);
+               *val = get_reg_val(id, kvmppc_get_sier_hv(vcpu, 1));
                break;
        case KVM_REG_PPC_SIER3:
-               *val = get_reg_val(id, vcpu->arch.sier[2]);
+               *val = get_reg_val(id, kvmppc_get_sier_hv(vcpu, 2));
                break;
        case KVM_REG_PPC_IAMR:
-               *val = get_reg_val(id, vcpu->arch.iamr);
+               *val = get_reg_val(id, kvmppc_get_iamr_hv(vcpu));
                break;
        case KVM_REG_PPC_PSPB:
-               *val = get_reg_val(id, vcpu->arch.pspb);
+               *val = get_reg_val(id, kvmppc_get_pspb_hv(vcpu));
                break;
        case KVM_REG_PPC_DPDES:
                /*
@@ -2282,19 +2282,19 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
                *val = get_reg_val(id, vcpu->arch.vcore->vtb);
                break;
        case KVM_REG_PPC_DAWR:
-               *val = get_reg_val(id, vcpu->arch.dawr0);
+               *val = get_reg_val(id, kvmppc_get_dawr0_hv(vcpu));
                break;
        case KVM_REG_PPC_DAWRX:
-               *val = get_reg_val(id, vcpu->arch.dawrx0);
+               *val = get_reg_val(id, kvmppc_get_dawrx0_hv(vcpu));
                break;
        case KVM_REG_PPC_DAWR1:
-               *val = get_reg_val(id, vcpu->arch.dawr1);
+               *val = get_reg_val(id, kvmppc_get_dawr1_hv(vcpu));
                break;
        case KVM_REG_PPC_DAWRX1:
-               *val = get_reg_val(id, vcpu->arch.dawrx1);
+               *val = get_reg_val(id, kvmppc_get_dawrx1_hv(vcpu));
                break;
        case KVM_REG_PPC_CIABR:
-               *val = get_reg_val(id, vcpu->arch.ciabr);
+               *val = get_reg_val(id, kvmppc_get_ciabr_hv(vcpu));
                break;
        case KVM_REG_PPC_CSIGR:
                *val = get_reg_val(id, vcpu->arch.csigr);
@@ -2312,7 +2312,7 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
                *val = get_reg_val(id, vcpu->arch.acop);
                break;
        case KVM_REG_PPC_WORT:
-               *val = get_reg_val(id, vcpu->arch.wort);
+               *val = get_reg_val(id, kvmppc_get_wort_hv(vcpu));
                break;
        case KVM_REG_PPC_TIDR:
                *val = get_reg_val(id, vcpu->arch.tid);
@@ -2345,7 +2345,7 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
                *val = get_reg_val(id, vcpu->arch.vcore->lpcr);
                break;
        case KVM_REG_PPC_PPR:
-               *val = get_reg_val(id, vcpu->arch.ppr);
+               *val = get_reg_val(id, kvmppc_get_ppr_hv(vcpu));
                break;
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
        case KVM_REG_PPC_TFHAR:
@@ -2425,6 +2425,9 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
        case KVM_REG_PPC_PTCR:
                *val = get_reg_val(id, vcpu->kvm->arch.l1_ptcr);
                break;
+       case KVM_REG_PPC_FSCR:
+               *val = get_reg_val(id, kvmppc_get_fscr_hv(vcpu));
+               break;
        default:
                r = -EINVAL;
                break;
@@ -2453,29 +2456,29 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
                vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP;
                break;
        case KVM_REG_PPC_DSCR:
-               vcpu->arch.dscr = set_reg_val(id, *val);
+               kvmppc_set_dscr_hv(vcpu, set_reg_val(id, *val));
                break;
        case KVM_REG_PPC_PURR:
-               vcpu->arch.purr = set_reg_val(id, *val);
+               kvmppc_set_purr_hv(vcpu, set_reg_val(id, *val));
                break;
        case KVM_REG_PPC_SPURR:
-               vcpu->arch.spurr = set_reg_val(id, *val);
+               kvmppc_set_spurr_hv(vcpu, set_reg_val(id, *val));
                break;
        case KVM_REG_PPC_AMR:
-               vcpu->arch.amr = set_reg_val(id, *val);
+               kvmppc_set_amr_hv(vcpu, set_reg_val(id, *val));
                break;
        case KVM_REG_PPC_UAMOR:
-               vcpu->arch.uamor = set_reg_val(id, *val);
+               kvmppc_set_uamor_hv(vcpu, set_reg_val(id, *val));
                break;
        case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCR1:
                i = id - KVM_REG_PPC_MMCR0;
-               vcpu->arch.mmcr[i] = set_reg_val(id, *val);
+               kvmppc_set_mmcr_hv(vcpu, i, set_reg_val(id, *val));
                break;
        case KVM_REG_PPC_MMCR2:
-               vcpu->arch.mmcr[2] = set_reg_val(id, *val);
+               kvmppc_set_mmcr_hv(vcpu, 2, set_reg_val(id, *val));
                break;
        case KVM_REG_PPC_MMCRA:
-               vcpu->arch.mmcra = set_reg_val(id, *val);
+               kvmppc_set_mmcra_hv(vcpu, set_reg_val(id, *val));
                break;
        case KVM_REG_PPC_MMCRS:
                vcpu->arch.mmcrs = set_reg_val(id, *val);
@@ -2485,32 +2488,32 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
                break;
        case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
                i = id - KVM_REG_PPC_PMC1;
-               vcpu->arch.pmc[i] = set_reg_val(id, *val);
+               kvmppc_set_pmc_hv(vcpu, i, set_reg_val(id, *val));
                break;
        case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
                i = id - KVM_REG_PPC_SPMC1;
                vcpu->arch.spmc[i] = set_reg_val(id, *val);
                break;
        case KVM_REG_PPC_SIAR:
-               vcpu->arch.siar = set_reg_val(id, *val);
+               kvmppc_set_siar_hv(vcpu, set_reg_val(id, *val));
                break;
        case KVM_REG_PPC_SDAR:
-               vcpu->arch.sdar = set_reg_val(id, *val);
+               kvmppc_set_sdar_hv(vcpu, set_reg_val(id, *val));
                break;
        case KVM_REG_PPC_SIER:
-               vcpu->arch.sier[0] = set_reg_val(id, *val);
+               kvmppc_set_sier_hv(vcpu, 0, set_reg_val(id, *val));
                break;
        case KVM_REG_PPC_SIER2:
-               vcpu->arch.sier[1] = set_reg_val(id, *val);
+               kvmppc_set_sier_hv(vcpu, 1, set_reg_val(id, *val));
                break;
        case KVM_REG_PPC_SIER3:
-               vcpu->arch.sier[2] = set_reg_val(id, *val);
+               kvmppc_set_sier_hv(vcpu, 2, set_reg_val(id, *val));
                break;
        case KVM_REG_PPC_IAMR:
-               vcpu->arch.iamr = set_reg_val(id, *val);
+               kvmppc_set_iamr_hv(vcpu, set_reg_val(id, *val));
                break;
        case KVM_REG_PPC_PSPB:
-               vcpu->arch.pspb = set_reg_val(id, *val);
+               kvmppc_set_pspb_hv(vcpu, set_reg_val(id, *val));
                break;
        case KVM_REG_PPC_DPDES:
                if (cpu_has_feature(CPU_FTR_ARCH_300))
@@ -2522,22 +2525,22 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
                vcpu->arch.vcore->vtb = set_reg_val(id, *val);
                break;
        case KVM_REG_PPC_DAWR:
-               vcpu->arch.dawr0 = set_reg_val(id, *val);
+               kvmppc_set_dawr0_hv(vcpu, set_reg_val(id, *val));
                break;
        case KVM_REG_PPC_DAWRX:
-               vcpu->arch.dawrx0 = set_reg_val(id, *val) & ~DAWRX_HYP;
+               kvmppc_set_dawrx0_hv(vcpu, set_reg_val(id, *val) & ~DAWRX_HYP);
                break;
        case KVM_REG_PPC_DAWR1:
-               vcpu->arch.dawr1 = set_reg_val(id, *val);
+               kvmppc_set_dawr1_hv(vcpu, set_reg_val(id, *val));
                break;
        case KVM_REG_PPC_DAWRX1:
-               vcpu->arch.dawrx1 = set_reg_val(id, *val) & ~DAWRX_HYP;
+               kvmppc_set_dawrx1_hv(vcpu, set_reg_val(id, *val) & ~DAWRX_HYP);
                break;
        case KVM_REG_PPC_CIABR:
-               vcpu->arch.ciabr = set_reg_val(id, *val);
+               kvmppc_set_ciabr_hv(vcpu, set_reg_val(id, *val));
                /* Don't allow setting breakpoints in hypervisor code */
-               if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
-                       vcpu->arch.ciabr &= ~CIABR_PRIV;        /* disable */
+               if ((kvmppc_get_ciabr_hv(vcpu) & CIABR_PRIV) == CIABR_PRIV_HYPER)
+                       kvmppc_set_ciabr_hv(vcpu, kvmppc_get_ciabr_hv(vcpu) & ~CIABR_PRIV);
                break;
        case KVM_REG_PPC_CSIGR:
                vcpu->arch.csigr = set_reg_val(id, *val);
@@ -2555,7 +2558,7 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
                vcpu->arch.acop = set_reg_val(id, *val);
                break;
        case KVM_REG_PPC_WORT:
-               vcpu->arch.wort = set_reg_val(id, *val);
+               kvmppc_set_wort_hv(vcpu, set_reg_val(id, *val));
                break;
        case KVM_REG_PPC_TIDR:
                vcpu->arch.tid = set_reg_val(id, *val);
@@ -2615,7 +2618,7 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
                kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), false);
                break;
        case KVM_REG_PPC_PPR:
-               vcpu->arch.ppr = set_reg_val(id, *val);
+               kvmppc_set_ppr_hv(vcpu, set_reg_val(id, *val));
                break;
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
        case KVM_REG_PPC_TFHAR:
@@ -2699,6 +2702,9 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
        case KVM_REG_PPC_PTCR:
                vcpu->kvm->arch.l1_ptcr = set_reg_val(id, *val);
                break;
+       case KVM_REG_PPC_FSCR:
+               kvmppc_set_fscr_hv(vcpu, set_reg_val(id, *val));
+               break;
        default:
                r = -EINVAL;
                break;
@@ -2916,13 +2922,14 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu)
        vcpu->arch.shared_big_endian = false;
 #endif
 #endif
-       vcpu->arch.mmcr[0] = MMCR0_FC;
+       kvmppc_set_mmcr_hv(vcpu, 0, MMCR0_FC);
+
        if (cpu_has_feature(CPU_FTR_ARCH_31)) {
-               vcpu->arch.mmcr[0] |= MMCR0_PMCCEXT;
-               vcpu->arch.mmcra = MMCRA_BHRB_DISABLE;
+               kvmppc_set_mmcr_hv(vcpu, 0, kvmppc_get_mmcr_hv(vcpu, 0) | MMCR0_PMCCEXT);
+               kvmppc_set_mmcra_hv(vcpu, MMCRA_BHRB_DISABLE);
        }
 
-       vcpu->arch.ctrl = CTRL_RUNLATCH;
+       kvmppc_set_ctrl_hv(vcpu, CTRL_RUNLATCH);
        /* default to host PVR, since we can't spoof it */
        kvmppc_set_pvr_hv(vcpu, mfspr(SPRN_PVR));
        spin_lock_init(&vcpu->arch.vpa_update_lock);
@@ -2938,29 +2945,30 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu)
         * don't set the HFSCR_MSGP bit, and that causes those instructions
         * to trap and then we emulate them.
         */
-       vcpu->arch.hfscr = HFSCR_TAR | HFSCR_EBB | HFSCR_PM | HFSCR_BHRB |
-               HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP;
+       kvmppc_set_hfscr_hv(vcpu, HFSCR_TAR | HFSCR_EBB | HFSCR_PM | HFSCR_BHRB |
+                           HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP);
 
        /* On POWER10 and later, allow prefixed instructions */
        if (cpu_has_feature(CPU_FTR_ARCH_31))
-               vcpu->arch.hfscr |= HFSCR_PREFIX;
+               kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) | HFSCR_PREFIX);
 
        if (cpu_has_feature(CPU_FTR_HVMODE)) {
-               vcpu->arch.hfscr &= mfspr(SPRN_HFSCR);
+               kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) & mfspr(SPRN_HFSCR));
+
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
                if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
-                       vcpu->arch.hfscr |= HFSCR_TM;
+                       kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) | HFSCR_TM);
 #endif
        }
        if (cpu_has_feature(CPU_FTR_TM_COMP))
                vcpu->arch.hfscr |= HFSCR_TM;
 
-       vcpu->arch.hfscr_permitted = vcpu->arch.hfscr;
+       vcpu->arch.hfscr_permitted = kvmppc_get_hfscr_hv(vcpu);
 
        /*
         * PM, EBB, TM are demand-faulted so start with it clear.
         */
-       vcpu->arch.hfscr &= ~(HFSCR_PM | HFSCR_EBB | HFSCR_TM);
+       kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) & ~(HFSCR_PM | HFSCR_EBB | HFSCR_TM));
 
        kvmppc_mmu_book3s_hv_init(vcpu);
 
@@ -4844,7 +4852,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
                msr |= MSR_VSX;
        if ((cpu_has_feature(CPU_FTR_TM) ||
            cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) &&
-                       (vcpu->arch.hfscr & HFSCR_TM))
+                       (kvmppc_get_hfscr_hv(vcpu) & HFSCR_TM))
                msr |= MSR_TM;
        msr = msr_check_and_set(msr);
 
index 2f2e59d..acd9a7a 100644 (file)
@@ -50,3 +50,61 @@ void accumulate_time(struct kvm_vcpu *vcpu, struct kvmhv_tb_accumulator *next);
 #define start_timing(vcpu, next) do {} while (0)
 #define end_timing(vcpu) do {} while (0)
 #endif
+
+#define KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_SET(reg, size)                  \
+static inline void kvmppc_set_##reg ##_hv(struct kvm_vcpu *vcpu, u##size val)  \
+{                                                                      \
+       vcpu->arch.reg = val;                                           \
+}
+
+#define KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_GET(reg, size)                  \
+static inline u##size kvmppc_get_##reg ##_hv(struct kvm_vcpu *vcpu)    \
+{                                                                      \
+       return vcpu->arch.reg;                                          \
+}
+
+#define KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(reg, size)                      \
+       KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_SET(reg, size)                   \
+       KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_GET(reg, size)                   \
+
+#define KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_SET(reg, size)            \
+static inline void kvmppc_set_##reg ##_hv(struct kvm_vcpu *vcpu, int i, u##size val)   \
+{                                                                      \
+       vcpu->arch.reg[i] = val;                                        \
+}
+
+#define KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_GET(reg, size)            \
+static inline u##size kvmppc_get_##reg ##_hv(struct kvm_vcpu *vcpu, int i)     \
+{                                                                      \
+       return vcpu->arch.reg[i];                                       \
+}
+
+#define KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(reg, size)                        \
+       KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_SET(reg, size)             \
+       KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_GET(reg, size)             \
+
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(mmcra, 64)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(hfscr, 64)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(fscr, 64)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dscr, 64)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(purr, 64)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(spurr, 64)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(amr, 64)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(uamor, 64)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(siar, 64)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(sdar, 64)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(iamr, 64)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawr0, 64)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawr1, 64)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawrx0, 64)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawrx1, 64)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(ciabr, 64)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(wort, 64)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(ppr, 64)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(ctrl, 64)
+
+KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(mmcr, 64)
+KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(sier, 64)
+KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(pmc, 32)
+
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(pspb, 32)