1 // SPDX-License-Identifier: GPL-2.0-only
3 * VGIC MMIO handling functions
6 #include <linux/bitops.h>
7 #include <linux/bsearch.h>
9 #include <linux/kvm_host.h>
10 #include <kvm/iodev.h>
11 #include <kvm/arm_arch_timer.h>
12 #include <kvm/arm_vgic.h>
15 #include "vgic-mmio.h"
17 unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu,
18 gpa_t addr, unsigned int len)
23 unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu,
24 gpa_t addr, unsigned int len)
29 void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
30 unsigned int len, unsigned long val)
35 int vgic_mmio_uaccess_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
36 unsigned int len, unsigned long val)
42 unsigned long vgic_mmio_read_group(struct kvm_vcpu *vcpu,
43 gpa_t addr, unsigned int len)
45 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
49 /* Loop over all IRQs affected by this read */
50 for (i = 0; i < len * 8; i++) {
51 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
56 vgic_put_irq(vcpu->kvm, irq);
62 void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr,
63 unsigned int len, unsigned long val)
65 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
69 for (i = 0; i < len * 8; i++) {
70 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
72 raw_spin_lock_irqsave(&irq->irq_lock, flags);
73 irq->group = !!(val & BIT(i));
74 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
76 vgic_put_irq(vcpu->kvm, irq);
81 * Read accesses to both GICD_ICENABLER and GICD_ISENABLER return the value
82 * of the enabled bit, so there is only one function for both here.
84 unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu,
85 gpa_t addr, unsigned int len)
87 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
91 /* Loop over all IRQs affected by this read */
92 for (i = 0; i < len * 8; i++) {
93 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
98 vgic_put_irq(vcpu->kvm, irq);
104 void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
105 gpa_t addr, unsigned int len,
108 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
112 for_each_set_bit(i, &val, len * 8) {
113 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
115 raw_spin_lock_irqsave(&irq->irq_lock, flags);
116 if (vgic_irq_is_mapped_level(irq)) {
117 bool was_high = irq->line_level;
120 * We need to update the state of the interrupt because
121 * the guest might have changed the state of the device
122 * while the interrupt was disabled at the VGIC level.
124 irq->line_level = vgic_get_phys_line_level(irq);
126 * Deactivate the physical interrupt so the GIC will let
127 * us know when it is asserted again.
129 if (!irq->active && was_high && !irq->line_level)
130 vgic_irq_set_phys_active(irq, false);
133 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
135 vgic_put_irq(vcpu->kvm, irq);
139 void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
140 gpa_t addr, unsigned int len,
143 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
147 for_each_set_bit(i, &val, len * 8) {
148 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
150 raw_spin_lock_irqsave(&irq->irq_lock, flags);
152 irq->enabled = false;
154 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
155 vgic_put_irq(vcpu->kvm, irq);
159 unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
160 gpa_t addr, unsigned int len)
162 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
166 /* Loop over all IRQs affected by this read */
167 for (i = 0; i < len * 8; i++) {
168 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
171 raw_spin_lock_irqsave(&irq->irq_lock, flags);
172 if (irq_is_pending(irq))
174 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
176 vgic_put_irq(vcpu->kvm, irq);
183 * This function will return the VCPU that performed the MMIO access and
184 * trapped from within the VM, and will return NULL if this is a userspace
187 * We can disable preemption locally around accessing the per-CPU variable,
188 * and use the resolved vcpu pointer after enabling preemption again, because
189 * even if the current thread is migrated to another CPU, reading the per-CPU
190 * value later will give us the same value as we update the per-CPU variable
191 * in the preempt notifier handlers.
193 static struct kvm_vcpu *vgic_get_mmio_requester_vcpu(void)
195 struct kvm_vcpu *vcpu;
198 vcpu = kvm_arm_get_running_vcpu();
203 /* Must be called with irq->irq_lock held */
204 static void vgic_hw_irq_spending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
210 irq->pending_latch = true;
211 vgic_irq_set_phys_active(irq, true);
214 void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
215 gpa_t addr, unsigned int len,
218 bool is_uaccess = !vgic_get_mmio_requester_vcpu();
219 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
223 for_each_set_bit(i, &val, len * 8) {
224 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
226 raw_spin_lock_irqsave(&irq->irq_lock, flags);
228 vgic_hw_irq_spending(vcpu, irq, is_uaccess);
230 irq->pending_latch = true;
231 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
232 vgic_put_irq(vcpu->kvm, irq);
236 /* Must be called with irq->irq_lock held */
237 static void vgic_hw_irq_cpending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
243 irq->pending_latch = false;
246 * We don't want the guest to effectively mask the physical
247 * interrupt by doing a write to SPENDR followed by a write to
248 * CPENDR for HW interrupts, so we clear the active state on
249 * the physical side if the virtual interrupt is not active.
250 * This may lead to taking an additional interrupt on the
251 * host, but that should not be a problem as the worst that
252 * can happen is an additional vgic injection. We also clear
253 * the pending state to maintain proper semantics for edge HW
256 vgic_irq_set_phys_pending(irq, false);
258 vgic_irq_set_phys_active(irq, false);
261 void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
262 gpa_t addr, unsigned int len,
265 bool is_uaccess = !vgic_get_mmio_requester_vcpu();
266 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
270 for_each_set_bit(i, &val, len * 8) {
271 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
273 raw_spin_lock_irqsave(&irq->irq_lock, flags);
276 vgic_hw_irq_cpending(vcpu, irq, is_uaccess);
278 irq->pending_latch = false;
280 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
281 vgic_put_irq(vcpu->kvm, irq);
285 unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
286 gpa_t addr, unsigned int len)
288 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
292 /* Loop over all IRQs affected by this read */
293 for (i = 0; i < len * 8; i++) {
294 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
299 vgic_put_irq(vcpu->kvm, irq);
305 /* Must be called with irq->irq_lock held */
306 static void vgic_hw_irq_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
307 bool active, bool is_uaccess)
312 irq->active = active;
313 vgic_irq_set_phys_active(irq, active);
316 static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
320 struct kvm_vcpu *requester_vcpu = vgic_get_mmio_requester_vcpu();
322 raw_spin_lock_irqsave(&irq->irq_lock, flags);
325 vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
327 u32 model = vcpu->kvm->arch.vgic.vgic_model;
330 irq->active = active;
333 * The GICv2 architecture indicates that the source CPUID for
334 * an SGI should be provided during an EOI which implies that
335 * the active state is stored somewhere, but at the same time
336 * this state is not architecturally exposed anywhere and we
337 * have no way of knowing the right source.
339 * This may lead to a VCPU not being able to receive
340 * additional instances of a particular SGI after migration
341 * for a GICv2 VM on some GIC implementations. Oh well.
343 active_source = (requester_vcpu) ? requester_vcpu->vcpu_id : 0;
345 if (model == KVM_DEV_TYPE_ARM_VGIC_V2 &&
346 active && vgic_irq_is_sgi(irq->intid))
347 irq->active_source = active_source;
351 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
353 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
357 * If we are fiddling with an IRQ's active state, we have to make sure the IRQ
358 * is not queued on some running VCPU's LRs, because then the change to the
359 * active state can be overwritten when the VCPU's state is synced coming back
362 * For shared interrupts, we have to stop all the VCPUs because interrupts can
363 * be migrated while we don't hold the IRQ locks and we don't want to be
364 * chasing moving targets.
366 * For private interrupts we don't have to do anything because userspace
367 * accesses to the VGIC state already require all VCPUs to be stopped, and
368 * only the VCPU itself can modify its private interrupts active state, which
369 * guarantees that the VCPU is not running.
371 static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
373 if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
374 intid > VGIC_NR_PRIVATE_IRQS)
375 kvm_arm_halt_guest(vcpu->kvm);
378 /* See vgic_change_active_prepare */
379 static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid)
381 if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
382 intid > VGIC_NR_PRIVATE_IRQS)
383 kvm_arm_resume_guest(vcpu->kvm);
386 static void __vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
387 gpa_t addr, unsigned int len,
390 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
393 for_each_set_bit(i, &val, len * 8) {
394 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
395 vgic_mmio_change_active(vcpu, irq, false);
396 vgic_put_irq(vcpu->kvm, irq);
400 void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
401 gpa_t addr, unsigned int len,
404 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
406 mutex_lock(&vcpu->kvm->lock);
407 vgic_change_active_prepare(vcpu, intid);
409 __vgic_mmio_write_cactive(vcpu, addr, len, val);
411 vgic_change_active_finish(vcpu, intid);
412 mutex_unlock(&vcpu->kvm->lock);
415 int vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
416 gpa_t addr, unsigned int len,
419 __vgic_mmio_write_cactive(vcpu, addr, len, val);
423 static void __vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
424 gpa_t addr, unsigned int len,
427 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
430 for_each_set_bit(i, &val, len * 8) {
431 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
432 vgic_mmio_change_active(vcpu, irq, true);
433 vgic_put_irq(vcpu->kvm, irq);
437 void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
438 gpa_t addr, unsigned int len,
441 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
443 mutex_lock(&vcpu->kvm->lock);
444 vgic_change_active_prepare(vcpu, intid);
446 __vgic_mmio_write_sactive(vcpu, addr, len, val);
448 vgic_change_active_finish(vcpu, intid);
449 mutex_unlock(&vcpu->kvm->lock);
452 int vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,
453 gpa_t addr, unsigned int len,
456 __vgic_mmio_write_sactive(vcpu, addr, len, val);
460 unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
461 gpa_t addr, unsigned int len)
463 u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
467 for (i = 0; i < len; i++) {
468 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
470 val |= (u64)irq->priority << (i * 8);
472 vgic_put_irq(vcpu->kvm, irq);
479 * We currently don't handle changing the priority of an interrupt that
480 * is already pending on a VCPU. If there is a need for this, we would
481 * need to make this VCPU exit and re-evaluate the priorities, potentially
482 * leading to this interrupt getting presented now to the guest (if it has
483 * been masked by the priority mask before).
485 void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
486 gpa_t addr, unsigned int len,
489 u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
493 for (i = 0; i < len; i++) {
494 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
496 raw_spin_lock_irqsave(&irq->irq_lock, flags);
497 /* Narrow the priority range to what we actually support */
498 irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
499 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
501 vgic_put_irq(vcpu->kvm, irq);
505 unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu,
506 gpa_t addr, unsigned int len)
508 u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
512 for (i = 0; i < len * 4; i++) {
513 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
515 if (irq->config == VGIC_CONFIG_EDGE)
516 value |= (2U << (i * 2));
518 vgic_put_irq(vcpu->kvm, irq);
524 void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
525 gpa_t addr, unsigned int len,
528 u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
532 for (i = 0; i < len * 4; i++) {
533 struct vgic_irq *irq;
536 * The configuration cannot be changed for SGIs in general,
537 * for PPIs this is IMPLEMENTATION DEFINED. The arch timer
538 * code relies on PPIs being level triggered, so we also
539 * make them read-only here.
541 if (intid + i < VGIC_NR_PRIVATE_IRQS)
544 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
545 raw_spin_lock_irqsave(&irq->irq_lock, flags);
547 if (test_bit(i * 2 + 1, &val))
548 irq->config = VGIC_CONFIG_EDGE;
550 irq->config = VGIC_CONFIG_LEVEL;
552 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
553 vgic_put_irq(vcpu->kvm, irq);
557 u64 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid)
561 int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
563 for (i = 0; i < 32; i++) {
564 struct vgic_irq *irq;
566 if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
569 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
570 if (irq->config == VGIC_CONFIG_LEVEL && irq->line_level)
573 vgic_put_irq(vcpu->kvm, irq);
579 void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
583 int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
586 for (i = 0; i < 32; i++) {
587 struct vgic_irq *irq;
590 if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
593 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
596 * Line level is set irrespective of irq type
597 * (level or edge) to avoid dependency that VM should
598 * restore irq config before line level.
600 new_level = !!(val & (1U << i));
601 raw_spin_lock_irqsave(&irq->irq_lock, flags);
602 irq->line_level = new_level;
604 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
606 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
608 vgic_put_irq(vcpu->kvm, irq);
612 static int match_region(const void *key, const void *elt)
614 const unsigned int offset = (unsigned long)key;
615 const struct vgic_register_region *region = elt;
617 if (offset < region->reg_offset)
620 if (offset >= region->reg_offset + region->len)
626 const struct vgic_register_region *
627 vgic_find_mmio_region(const struct vgic_register_region *regions,
628 int nr_regions, unsigned int offset)
630 return bsearch((void *)(uintptr_t)offset, regions, nr_regions,
631 sizeof(regions[0]), match_region);
634 void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
636 if (kvm_vgic_global_state.type == VGIC_V2)
637 vgic_v2_set_vmcr(vcpu, vmcr);
639 vgic_v3_set_vmcr(vcpu, vmcr);
642 void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
644 if (kvm_vgic_global_state.type == VGIC_V2)
645 vgic_v2_get_vmcr(vcpu, vmcr);
647 vgic_v3_get_vmcr(vcpu, vmcr);
651 * kvm_mmio_read_buf() returns a value in a format where it can be converted
652 * to a byte array and be directly observed as the guest wanted it to appear
653 * in memory if it had done the store itself, which is LE for the GIC, as the
654 * guest knows the GIC is always LE.
656 * We convert this value to the CPUs native format to deal with it as a data
659 unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len)
661 unsigned long data = kvm_mmio_read_buf(val, len);
667 return le16_to_cpu(data);
669 return le32_to_cpu(data);
671 return le64_to_cpu(data);
676 * kvm_mmio_write_buf() expects a value in a format such that if converted to
677 * a byte array it is observed as the guest would see it if it could perform
678 * the load directly. Since the GIC is LE, and the guest knows this, the
679 * guest expects a value in little endian format.
681 * We convert the data value from the CPUs native format to LE so that the
682 * value is returned in the proper format.
684 void vgic_data_host_to_mmio_bus(void *buf, unsigned int len,
691 data = cpu_to_le16(data);
694 data = cpu_to_le32(data);
697 data = cpu_to_le64(data);
700 kvm_mmio_write_buf(buf, len, data);
704 struct vgic_io_device *kvm_to_vgic_iodev(const struct kvm_io_device *dev)
706 return container_of(dev, struct vgic_io_device, dev);
709 static bool check_region(const struct kvm *kvm,
710 const struct vgic_register_region *region,
713 int flags, nr_irqs = kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
717 flags = VGIC_ACCESS_8bit;
720 flags = VGIC_ACCESS_32bit;
723 flags = VGIC_ACCESS_64bit;
729 if ((region->access_flags & flags) && IS_ALIGNED(addr, len)) {
730 if (!region->bits_per_irq)
733 /* Do we access a non-allocated IRQ? */
734 return VGIC_ADDR_TO_INTID(addr, region->bits_per_irq) < nr_irqs;
740 const struct vgic_register_region *
741 vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
744 const struct vgic_register_region *region;
746 region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
747 addr - iodev->base_addr);
748 if (!region || !check_region(vcpu->kvm, region, addr, len))
754 static int vgic_uaccess_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
755 gpa_t addr, u32 *val)
757 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
758 const struct vgic_register_region *region;
759 struct kvm_vcpu *r_vcpu;
761 region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
767 r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
768 if (region->uaccess_read)
769 *val = region->uaccess_read(r_vcpu, addr, sizeof(u32));
771 *val = region->read(r_vcpu, addr, sizeof(u32));
776 static int vgic_uaccess_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
777 gpa_t addr, const u32 *val)
779 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
780 const struct vgic_register_region *region;
781 struct kvm_vcpu *r_vcpu;
783 region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
787 r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
788 if (region->uaccess_write)
789 return region->uaccess_write(r_vcpu, addr, sizeof(u32), *val);
791 region->write(r_vcpu, addr, sizeof(u32), *val);
796 * Userland access to VGIC registers.
798 int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev,
799 bool is_write, int offset, u32 *val)
802 return vgic_uaccess_write(vcpu, &dev->dev, offset, val);
804 return vgic_uaccess_read(vcpu, &dev->dev, offset, val);
807 static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
808 gpa_t addr, int len, void *val)
810 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
811 const struct vgic_register_region *region;
812 unsigned long data = 0;
814 region = vgic_get_mmio_region(vcpu, iodev, addr, len);
820 switch (iodev->iodev_type) {
822 data = region->read(vcpu, addr, len);
825 data = region->read(vcpu, addr, len);
828 data = region->read(iodev->redist_vcpu, addr, len);
831 data = region->its_read(vcpu->kvm, iodev->its, addr, len);
835 vgic_data_host_to_mmio_bus(val, len, data);
839 static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
840 gpa_t addr, int len, const void *val)
842 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
843 const struct vgic_register_region *region;
844 unsigned long data = vgic_data_mmio_bus_to_host(val, len);
846 region = vgic_get_mmio_region(vcpu, iodev, addr, len);
850 switch (iodev->iodev_type) {
852 region->write(vcpu, addr, len, data);
855 region->write(vcpu, addr, len, data);
858 region->write(iodev->redist_vcpu, addr, len, data);
861 region->its_write(vcpu->kvm, iodev->its, addr, len, data);
868 struct kvm_io_device_ops kvm_io_gic_ops = {
869 .read = dispatch_mmio_read,
870 .write = dispatch_mmio_write,
873 int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
876 struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev;
882 len = vgic_v2_init_dist_iodev(io_device);
885 len = vgic_v3_init_dist_iodev(io_device);
891 io_device->base_addr = dist_base_address;
892 io_device->iodev_type = IODEV_DIST;
893 io_device->redist_vcpu = NULL;
895 mutex_lock(&kvm->slots_lock);
896 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
897 len, &io_device->dev);
898 mutex_unlock(&kvm->slots_lock);