KVM: PPC: Book3S HV: Cleanup updates for kvm vcpu MMCR
authorAthira Rajeev <atrajeev@linux.vnet.ibm.com>
Fri, 17 Jul 2020 14:38:14 +0000 (10:38 -0400)
committerMichael Ellerman <mpe@ellerman.id.au>
Wed, 22 Jul 2020 11:56:01 +0000 (21:56 +1000)
Currently `kvm_vcpu_arch` stores all Monitor Mode Control registers
in a flat array in order: mmcr0, mmcr1, mmcra, mmcr2, mmcrs
Split this to give mmcra and mmcrs its own entries in vcpu and
use a flat array for mmcr0 to mmcr2. This patch implements this
cleanup to make code easier to read.

Signed-off-by: Athira Rajeev <atrajeev@linux.vnet.ibm.com>
[mpe: Fix MMCRA/MMCR2 uapi breakage as noted by paulus]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/1594996707-3727-3-git-send-email-atrajeev@linux.vnet.ibm.com
arch/powerpc/include/asm/kvm_host.h
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S

index 7e2d061..80b0005 100644 (file)
@@ -637,7 +637,9 @@ struct kvm_vcpu_arch {
        u32 ccr1;
        u32 dbsr;
 
-       u64 mmcr[5];
+       u64 mmcr[3];    /* MMCR0, MMCR1, MMCR2 */
+       u64 mmcra;
+       u64 mmcrs;
        u32 pmc[8];
        u32 spmc[2];
        u64 siar;
index 6657dc6..6fa4853 100644 (file)
@@ -559,6 +559,8 @@ int main(void)
        OFFSET(VCPU_IRQ_PENDING, kvm_vcpu, arch.irq_pending);
        OFFSET(VCPU_DBELL_REQ, kvm_vcpu, arch.doorbell_request);
        OFFSET(VCPU_MMCR, kvm_vcpu, arch.mmcr);
+       OFFSET(VCPU_MMCRA, kvm_vcpu, arch.mmcra);
+       OFFSET(VCPU_MMCRS, kvm_vcpu, arch.mmcrs);
        OFFSET(VCPU_PMC, kvm_vcpu, arch.pmc);
        OFFSET(VCPU_SPMC, kvm_vcpu, arch.spmc);
        OFFSET(VCPU_SIAR, kvm_vcpu, arch.siar);
index 6bf6664..b10bb40 100644 (file)
@@ -1679,10 +1679,19 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
        case KVM_REG_PPC_UAMOR:
                *val = get_reg_val(id, vcpu->arch.uamor);
                break;
-       case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS:
+       case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCR1:
                i = id - KVM_REG_PPC_MMCR0;
                *val = get_reg_val(id, vcpu->arch.mmcr[i]);
                break;
+       case KVM_REG_PPC_MMCR2:
+               *val = get_reg_val(id, vcpu->arch.mmcr[2]);
+               break;
+       case KVM_REG_PPC_MMCRA:
+               *val = get_reg_val(id, vcpu->arch.mmcra);
+               break;
+       case KVM_REG_PPC_MMCRS:
+               *val = get_reg_val(id, vcpu->arch.mmcrs);
+               break;
        case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
                i = id - KVM_REG_PPC_PMC1;
                *val = get_reg_val(id, vcpu->arch.pmc[i]);
@@ -1900,10 +1909,19 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
        case KVM_REG_PPC_UAMOR:
                vcpu->arch.uamor = set_reg_val(id, *val);
                break;
-       case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS:
+       case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCR1:
                i = id - KVM_REG_PPC_MMCR0;
                vcpu->arch.mmcr[i] = set_reg_val(id, *val);
                break;
+       case KVM_REG_PPC_MMCR2:
+               vcpu->arch.mmcr[2] = set_reg_val(id, *val);
+               break;
+       case KVM_REG_PPC_MMCRA:
+               vcpu->arch.mmcra = set_reg_val(id, *val);
+               break;
+       case KVM_REG_PPC_MMCRS:
+               vcpu->arch.mmcrs = set_reg_val(id, *val);
+               break;
        case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
                i = id - KVM_REG_PPC_PMC1;
                vcpu->arch.pmc[i] = set_reg_val(id, *val);
index 7194389..702eaa2 100644 (file)
@@ -3428,7 +3428,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
        mtspr   SPRN_PMC6, r9
        ld      r3, VCPU_MMCR(r4)
        ld      r5, VCPU_MMCR + 8(r4)
-       ld      r6, VCPU_MMCR + 16(r4)
+       ld      r6, VCPU_MMCRA(r4)
        ld      r7, VCPU_SIAR(r4)
        ld      r8, VCPU_SDAR(r4)
        mtspr   SPRN_MMCR1, r5
@@ -3436,14 +3436,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
        mtspr   SPRN_SIAR, r7
        mtspr   SPRN_SDAR, r8
 BEGIN_FTR_SECTION
-       ld      r5, VCPU_MMCR + 24(r4)
+       ld      r5, VCPU_MMCR + 16(r4)
        ld      r6, VCPU_SIER(r4)
        mtspr   SPRN_MMCR2, r5
        mtspr   SPRN_SIER, r6
 BEGIN_FTR_SECTION_NESTED(96)
        lwz     r7, VCPU_PMC + 24(r4)
        lwz     r8, VCPU_PMC + 28(r4)
-       ld      r9, VCPU_MMCR + 32(r4)
+       ld      r9, VCPU_MMCRS(r4)
        mtspr   SPRN_SPMC1, r7
        mtspr   SPRN_SPMC2, r8
        mtspr   SPRN_MMCRS, r9
@@ -3551,9 +3551,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
        mfspr   r8, SPRN_SDAR
        std     r4, VCPU_MMCR(r9)
        std     r5, VCPU_MMCR + 8(r9)
-       std     r6, VCPU_MMCR + 16(r9)
+       std     r6, VCPU_MMCRA(r9)
 BEGIN_FTR_SECTION
-       std     r10, VCPU_MMCR + 24(r9)
+       std     r10, VCPU_MMCR + 16(r9)
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
        std     r7, VCPU_SIAR(r9)
        std     r8, VCPU_SDAR(r9)
@@ -3578,7 +3578,7 @@ BEGIN_FTR_SECTION_NESTED(96)
        mfspr   r8, SPRN_MMCRS
        stw     r6, VCPU_PMC + 24(r9)
        stw     r7, VCPU_PMC + 28(r9)
-       std     r8, VCPU_MMCR + 32(r9)
+       std     r8, VCPU_MMCRS(r9)
        lis     r4, 0x8000
        mtspr   SPRN_MMCRS, r4
 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)