KVM: arm/arm64: Remove pmc->bitmask
authorAndrew Murray <andrew.murray@arm.com>
Mon, 17 Jun 2019 19:01:04 +0000 (20:01 +0100)
committerMarc Zyngier <marc.zyngier@arm.com>
Fri, 5 Jul 2019 12:56:18 +0000 (13:56 +0100)
We currently use pmc->bitmask to determine the width of the pmc - however
it's superfluous as the pmc index already describes if the pmc is a cycle
counter or event counter. The architecture clearly describes the widths of
these counters.

Let's remove the bitmask to simplify the code.

Signed-off-by: Andrew Murray <andrew.murray@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
include/kvm/arm_pmu.h
virt/kvm/arm/pmu.c

index 45e5205..48a15d4 100644 (file)
@@ -17,7 +17,6 @@
 struct kvm_pmc {
        u8 idx; /* index into the pmu->pmc array */
        struct perf_event *perf_event;
-       u64 bitmask;
 };
 
 struct kvm_pmu {
index f77643f..24c6cf8 100644 (file)
 #include <kvm/arm_vgic.h>
 
 static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
+
+/**
+ * kvm_pmu_idx_is_64bit - determine if select_idx is a 64bit counter
+ * @vcpu: The vcpu pointer
+ * @select_idx: The counter index
+ */
+static bool kvm_pmu_idx_is_64bit(struct kvm_vcpu *vcpu, u64 select_idx)
+{
+       return (select_idx == ARMV8_PMU_CYCLE_IDX &&
+               __vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_LC);
+}
+
 /**
  * kvm_pmu_get_counter_value - get PMU counter value
  * @vcpu: The vcpu pointer
@@ -36,7 +48,10 @@ u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
                counter += perf_event_read_value(pmc->perf_event, &enabled,
                                                 &running);
 
-       return counter & pmc->bitmask;
+       if (!kvm_pmu_idx_is_64bit(vcpu, select_idx))
+               counter = lower_32_bits(counter);
+
+       return counter;
 }
 
 /**
@@ -102,7 +117,6 @@ void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
        for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
                kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]);
                pmu->pmc[i].idx = i;
-               pmu->pmc[i].bitmask = 0xffffffffUL;
        }
 }
 
@@ -337,8 +351,6 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
  */
 void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
 {
-       struct kvm_pmu *pmu = &vcpu->arch.pmu;
-       struct kvm_pmc *pmc;
        u64 mask;
        int i;
 
@@ -357,11 +369,6 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
                for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++)
                        kvm_pmu_set_counter_value(vcpu, i, 0);
        }
-
-       if (val & ARMV8_PMU_PMCR_LC) {
-               pmc = &pmu->pmc[ARMV8_PMU_CYCLE_IDX];
-               pmc->bitmask = 0xffffffffffffffffUL;
-       }
 }
 
 static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
@@ -409,7 +416,10 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
 
        counter = kvm_pmu_get_counter_value(vcpu, select_idx);
        /* The initial sample period (overflow count) of an event. */
-       attr.sample_period = (-counter) & pmc->bitmask;
+       if (kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
+               attr.sample_period = (-counter) & GENMASK(63, 0);
+       else
+               attr.sample_period = (-counter) & GENMASK(31, 0);
 
        event = perf_event_create_kernel_counter(&attr, -1, current,
                                                 kvm_pmu_perf_overflow, pmc);