KVM: arm64: PMU: Allow ID_DFR0_EL1.PerfMon to be set from userspace
authorMarc Zyngier <maz@kernel.org>
Sun, 13 Nov 2022 16:38:28 +0000 (16:38 +0000)
committerMarc Zyngier <maz@kernel.org>
Sat, 19 Nov 2022 12:56:39 +0000 (12:56 +0000)
Allow userspace to write ID_DFR0_EL1, on the condition that only
the PerfMon field can be altered and be something that is compatible
with what was computed for the AArch64 view of the guest.

Reviewed-by: Reiji Watanabe <reijiw@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20221113163832.3154370-13-maz@kernel.org
arch/arm64/kvm/sys_regs.c

index 4958525..b8ac587 100644 (file)
@@ -1070,6 +1070,19 @@ static u8 vcpu_pmuver(const struct kvm_vcpu *vcpu)
        return vcpu->kvm->arch.dfr0_pmuver.unimp;
 }
 
+static u8 perfmon_to_pmuver(u8 perfmon)
+{
+       switch (perfmon) {
+       case ID_DFR0_PERFMON_8_0:
+               return ID_AA64DFR0_EL1_PMUVer_IMP;
+       case ID_DFR0_PERFMON_IMP_DEF:
+               return ID_AA64DFR0_EL1_PMUVer_IMP_DEF;
+       default:
+               /* Anything ARMv8.1+ and NI have the same value. For now. */
+               return perfmon;
+       }
+}
+
 static u8 pmuver_to_perfmon(u8 pmuver)
 {
        switch (pmuver) {
@@ -1281,6 +1294,46 @@ static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
        return 0;
 }
 
+static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
+                          const struct sys_reg_desc *rd,
+                          u64 val)
+{
+       u8 perfmon, host_perfmon;
+       bool valid_pmu;
+
+       host_perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit());
+
+       /*
+        * Allow DFR0_EL1.PerfMon to be set from userspace as long as
+        * it doesn't promise more than what the HW gives us on the
+        * AArch64 side (as everything is emulated with that), and
+        * that this is a PMUv3.
+        */
+       perfmon = FIELD_GET(ARM64_FEATURE_MASK(ID_DFR0_PERFMON), val);
+       if ((perfmon != ID_DFR0_PERFMON_IMP_DEF && perfmon > host_perfmon) ||
+           (perfmon != 0 && perfmon < ID_DFR0_PERFMON_8_0))
+               return -EINVAL;
+
+       valid_pmu = (perfmon != 0 && perfmon != ID_DFR0_PERFMON_IMP_DEF);
+
+       /* Make sure view register and PMU support do match */
+       if (kvm_vcpu_has_pmu(vcpu) != valid_pmu)
+               return -EINVAL;
+
+       /* We can only differ with PerfMon, and anything else is an error */
+       val ^= read_id_reg(vcpu, rd);
+       val &= ~ARM64_FEATURE_MASK(ID_DFR0_PERFMON);
+       if (val)
+               return -EINVAL;
+
+       if (valid_pmu)
+               vcpu->kvm->arch.dfr0_pmuver.imp = perfmon_to_pmuver(perfmon);
+       else
+               vcpu->kvm->arch.dfr0_pmuver.unimp = perfmon_to_pmuver(perfmon);
+
+       return 0;
+}
+
 /*
  * cpufeature ID register user accessors
  *
@@ -1502,7 +1555,9 @@ static const struct sys_reg_desc sys_reg_descs[] = {
        /* CRm=1 */
        AA32_ID_SANITISED(ID_PFR0_EL1),
        AA32_ID_SANITISED(ID_PFR1_EL1),
-       AA32_ID_SANITISED(ID_DFR0_EL1),
+       { SYS_DESC(SYS_ID_DFR0_EL1), .access = access_id_reg,
+         .get_user = get_id_reg, .set_user = set_id_dfr0_el1,
+         .visibility = aa32_id_visibility, },
        ID_HIDDEN(ID_AFR0_EL1),
        AA32_ID_SANITISED(ID_MMFR0_EL1),
        AA32_ID_SANITISED(ID_MMFR1_EL1),