struct kvm_device_attr *attr,
u32 *reg, bool is_write)
{
- return -ENXIO;
+ gpa_t addr;
+ int cpuid, ret, c;
+ struct kvm_vcpu *vcpu, *tmp_vcpu;
+ int vcpu_lock_idx = -1;
+
+ cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
+ KVM_DEV_ARM_VGIC_CPUID_SHIFT;
+ vcpu = kvm_get_vcpu(dev->kvm, cpuid);
+ addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
+
+ mutex_lock(&dev->kvm->lock);
+
+ ret = vgic_init(dev->kvm);
+ if (ret)
+ goto out;
+
+ if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Any time a vcpu is run, vcpu_load is called which tries to grab the
+ * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
+ * that no other VCPUs are run and fiddle with the vgic state while we
+ * access it.
+ */
+ ret = -EBUSY;
+ kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) {
+ if (!mutex_trylock(&tmp_vcpu->mutex))
+ goto out;
+ vcpu_lock_idx = c;
+ }
+
+ switch (attr->group) {
+ case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
+ ret = -EINVAL;
+ break;
+ case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
+ ret = vgic_v2_dist_uaccess(vcpu, is_write, addr, reg);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+out:
+ for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
+ tmp_vcpu = kvm_get_vcpu(dev->kvm, vcpu_lock_idx);
+ mutex_unlock(&tmp_vcpu->mutex);
+ }
+
+ mutex_unlock(&dev->kvm->lock);
+ return ret;
}
/* V2 ops */