KVM: arm64: Do not change the PMU event filter after a VCPU has run
authorMarc Zyngier <maz@kernel.org>
Thu, 27 Jan 2022 16:17:54 +0000 (16:17 +0000)
committerMarc Zyngier <maz@kernel.org>
Tue, 8 Feb 2022 17:51:21 +0000 (17:51 +0000)
Userspace can specify which events a guest is allowed to use with the
KVM_ARM_VCPU_PMU_V3_FILTER attribute. The list of allowed events can be
identified by a guest from reading the PMCEID{0,1}_EL0 registers.

Changing the PMU event filter after a VCPU has run can cause reads of the
registers performed before the filter is changed to return different values
than reads performed with the new event filter in place. The architecture
defines the two registers as read-only, and this behaviour contradicts
that.

Keep track when the first VCPU has run and deny changes to the PMU event
filter to prevent this from happening.

Signed-off-by: Marc Zyngier <maz@kernel.org>
[ Alexandru E: Added commit message, updated ioctl documentation ]
Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20220127161759.53553-2-alexandru.elisei@arm.com
Documentation/virt/kvm/devices/vcpu.rst
arch/arm64/include/asm/kvm_host.h
arch/arm64/kvm/arm.c
arch/arm64/kvm/pmu-emul.c

index 60a29972d3f1b840681e71fc2bcd6a48f6013b7f..d063aaee5bb73b692ff4bb4a4977efc4f870ed40 100644 (file)
@@ -70,7 +70,7 @@ irqchip.
         -ENODEV  PMUv3 not supported or GIC not initialized
         -ENXIO   PMUv3 not properly configured or in-kernel irqchip not
                  configured as required prior to calling this attribute
-        -EBUSY   PMUv3 already initialized
+        -EBUSY   PMUv3 already initialized or a VCPU has already run
         -EINVAL  Invalid filter range
         =======  ======================================================
 
index 5bc01e62c08a03d55f12d1bafa9f6fc9ead49f10..2869259e10c000bc79967f9eb6efcfb12ecb3c95 100644 (file)
@@ -136,6 +136,7 @@ struct kvm_arch {
 
        /* Memory Tagging Extension enabled for the guest */
        bool mte_enabled;
+       bool ran_once;
 };
 
 struct kvm_vcpu_fault_info {
index ecc5958e27fe2b3fc69b9b1121a626495cb13c46..4783dbf66df2dc745bd178369a41cb3c3b8fd950 100644 (file)
@@ -634,6 +634,10 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
        if (kvm_vm_is_protected(kvm))
                kvm_call_hyp_nvhe(__pkvm_vcpu_init_traps, vcpu);
 
+       mutex_lock(&kvm->lock);
+       kvm->arch.ran_once = true;
+       mutex_unlock(&kvm->lock);
+
        return ret;
 }
 
index fbcfd4ec6f926905a1441fbd00d024d89fd3a9de..bc771bc1a0413a5796f8ea9efb594398a297b1ef 100644 (file)
@@ -924,6 +924,8 @@ static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
 
 int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
 {
+       struct kvm *kvm = vcpu->kvm;
+
        if (!kvm_vcpu_has_pmu(vcpu))
                return -ENODEV;
 
@@ -941,7 +943,7 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
                int __user *uaddr = (int __user *)(long)attr->addr;
                int irq;
 
-               if (!irqchip_in_kernel(vcpu->kvm))
+               if (!irqchip_in_kernel(kvm))
                        return -EINVAL;
 
                if (get_user(irq, uaddr))
@@ -951,7 +953,7 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
                if (!(irq_is_ppi(irq) || irq_is_spi(irq)))
                        return -EINVAL;
 
-               if (!pmu_irq_is_valid(vcpu->kvm, irq))
+               if (!pmu_irq_is_valid(kvm, irq))
                        return -EINVAL;
 
                if (kvm_arm_pmu_irq_initialized(vcpu))
@@ -966,7 +968,7 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
                struct kvm_pmu_event_filter filter;
                int nr_events;
 
-               nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1;
+               nr_events = kvm_pmu_event_mask(kvm) + 1;
 
                uaddr = (struct kvm_pmu_event_filter __user *)(long)attr->addr;
 
@@ -978,12 +980,17 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
                     filter.action != KVM_PMU_EVENT_DENY))
                        return -EINVAL;
 
-               mutex_lock(&vcpu->kvm->lock);
+               mutex_lock(&kvm->lock);
+
+               if (kvm->arch.ran_once) {
+                       mutex_unlock(&kvm->lock);
+                       return -EBUSY;
+               }
 
-               if (!vcpu->kvm->arch.pmu_filter) {
-                       vcpu->kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL_ACCOUNT);
-                       if (!vcpu->kvm->arch.pmu_filter) {
-                               mutex_unlock(&vcpu->kvm->lock);
+               if (!kvm->arch.pmu_filter) {
+                       kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL_ACCOUNT);
+                       if (!kvm->arch.pmu_filter) {
+                               mutex_unlock(&kvm->lock);
                                return -ENOMEM;
                        }
 
@@ -994,17 +1001,17 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
                         * events, the default is to allow.
                         */
                        if (filter.action == KVM_PMU_EVENT_ALLOW)
-                               bitmap_zero(vcpu->kvm->arch.pmu_filter, nr_events);
+                               bitmap_zero(kvm->arch.pmu_filter, nr_events);
                        else
-                               bitmap_fill(vcpu->kvm->arch.pmu_filter, nr_events);
+                               bitmap_fill(kvm->arch.pmu_filter, nr_events);
                }
 
                if (filter.action == KVM_PMU_EVENT_ALLOW)
-                       bitmap_set(vcpu->kvm->arch.pmu_filter, filter.base_event, filter.nevents);
+                       bitmap_set(kvm->arch.pmu_filter, filter.base_event, filter.nevents);
                else
-                       bitmap_clear(vcpu->kvm->arch.pmu_filter, filter.base_event, filter.nevents);
+                       bitmap_clear(kvm->arch.pmu_filter, filter.base_event, filter.nevents);
 
-               mutex_unlock(&vcpu->kvm->lock);
+               mutex_unlock(&kvm->lock);
 
                return 0;
        }