4 * Copyright (c) 2008 Bull S.A.S.
5 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
7 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
13 #include <linux/kvm_host.h>
14 #include <linux/slab.h>
15 #include <linux/kvm.h>
17 #include "coalesced_mmio.h"
19 static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev)
21 return container_of(dev, struct kvm_coalesced_mmio_dev, dev);
24 static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
27 /* is it in a batchable area ?
28 * (addr,len) is fully included in
29 * (zone->addr, zone->size)
32 return (dev->zone.addr <= addr &&
33 addr + len <= dev->zone.addr + dev->zone.size);
36 static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
38 struct kvm_coalesced_mmio_ring *ring;
41 /* Are we able to batch it ? */
43 /* last is the first free entry
44 * check if we don't meet the first used entry
45 * there is always one unused entry in the buffer
47 ring = dev->kvm->coalesced_mmio_ring;
48 avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
57 static int coalesced_mmio_write(struct kvm_io_device *this,
58 gpa_t addr, int len, const void *val)
60 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
61 struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
63 if (!coalesced_mmio_in_range(dev, addr, len))
66 spin_lock(&dev->kvm->ring_lock);
68 if (!coalesced_mmio_has_room(dev)) {
69 spin_unlock(&dev->kvm->ring_lock);
73 /* copy data in first free entry of the ring */
75 ring->coalesced_mmio[ring->last].phys_addr = addr;
76 ring->coalesced_mmio[ring->last].len = len;
77 memcpy(ring->coalesced_mmio[ring->last].data, val, len);
79 ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
80 spin_unlock(&dev->kvm->ring_lock);
84 static void coalesced_mmio_destructor(struct kvm_io_device *this)
86 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
93 static const struct kvm_io_device_ops coalesced_mmio_ops = {
94 .write = coalesced_mmio_write,
95 .destructor = coalesced_mmio_destructor,
98 int kvm_coalesced_mmio_init(struct kvm *kvm)
104 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
109 kvm->coalesced_mmio_ring = page_address(page);
112 * We're using this spinlock to sync access to the coalesced ring.
113 * The list doesn't need it's own lock since device registration and
114 * unregistration should only happen when kvm->slots_lock is held.
116 spin_lock_init(&kvm->ring_lock);
117 INIT_LIST_HEAD(&kvm->coalesced_zones);
123 void kvm_coalesced_mmio_free(struct kvm *kvm)
125 if (kvm->coalesced_mmio_ring)
126 free_page((unsigned long)kvm->coalesced_mmio_ring);
129 int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
130 struct kvm_coalesced_mmio_zone *zone)
133 struct kvm_coalesced_mmio_dev *dev;
135 dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
139 kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
143 mutex_lock(&kvm->slots_lock);
144 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, zone->addr,
145 zone->size, &dev->dev);
148 list_add_tail(&dev->list, &kvm->coalesced_zones);
149 mutex_unlock(&kvm->slots_lock);
154 mutex_unlock(&kvm->slots_lock);
164 int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
165 struct kvm_coalesced_mmio_zone *zone)
167 struct kvm_coalesced_mmio_dev *dev, *tmp;
169 mutex_lock(&kvm->slots_lock);
171 list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list)
172 if (coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
173 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dev->dev);
174 kvm_iodevice_destructor(&dev->dev);
177 mutex_unlock(&kvm->slots_lock);