}
}
-
-void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
-{
- if (kvm_enabled())
- kvm_coalesce_mmio_region(addr, size);
-}
-
-void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
-{
- if (kvm_enabled())
- kvm_uncoalesce_mmio_region(addr, size);
-}
-
void qemu_flush_coalesced_mmio_buffer(void)
{
if (kvm_enabled())
return ret;
}
-int kvm_coalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
+static void kvm_coalesce_mmio_region(MemoryListener *listener,
+ MemoryRegionSection *secion,
+ target_phys_addr_t start, target_phys_addr_t size)
{
- int ret = -ENOSYS;
KVMState *s = kvm_state;
if (s->coalesced_mmio) {
zone.size = size;
zone.pad = 0;
- ret = kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
+ (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
}
-
- return ret;
}
-int kvm_uncoalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
+static void kvm_uncoalesce_mmio_region(MemoryListener *listener,
+ MemoryRegionSection *secion,
+ target_phys_addr_t start, target_phys_addr_t size)
{
- int ret = -ENOSYS;
KVMState *s = kvm_state;
if (s->coalesced_mmio) {
zone.size = size;
zone.pad = 0;
- ret = kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
+ (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
}
-
- return ret;
}
int kvm_check_extension(KVMState *s, unsigned int extension)
.log_global_stop = kvm_log_global_stop,
.eventfd_add = kvm_mem_ioeventfd_add,
.eventfd_del = kvm_mem_ioeventfd_del,
+ .coalesced_mmio_add = kvm_coalesce_mmio_region,
+ .coalesced_mmio_del = kvm_uncoalesce_mmio_region,
.priority = 10,
};
return -ENOSYS;
}
-int kvm_coalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
-{
- return -ENOSYS;
-}
-
-int kvm_uncoalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
-{
- return -ENOSYS;
-}
-
int kvm_init(void)
{
return -ENOSYS;
void *kvm_arch_vmalloc(ram_addr_t size);
void kvm_setup_guest_memory(void *start, size_t size);
-int kvm_coalesce_mmio_region(target_phys_addr_t start, ram_addr_t size);
-int kvm_uncoalesce_mmio_region(target_phys_addr_t start, ram_addr_t size);
void kvm_flush_coalesced_mmio_buffer(void);
#endif
FlatRange *fr;
CoalescedMemoryRange *cmr;
AddrRange tmp;
+ MemoryRegionSection section;
FOR_EACH_FLAT_RANGE(fr, as->current_map) {
if (fr->mr == mr) {
- qemu_unregister_coalesced_mmio(int128_get64(fr->addr.start),
- int128_get64(fr->addr.size));
+ section = (MemoryRegionSection) {
+ .address_space = as->root,
+ .offset_within_address_space = int128_get64(fr->addr.start),
+ .size = int128_get64(fr->addr.size),
+ };
+
+ MEMORY_LISTENER_CALL(coalesced_mmio_del, Reverse, §ion,
+ int128_get64(fr->addr.start),
+ int128_get64(fr->addr.size));
QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
tmp = addrrange_shift(cmr->addr,
int128_sub(fr->addr.start,
continue;
}
tmp = addrrange_intersection(tmp, fr->addr);
- qemu_register_coalesced_mmio(int128_get64(tmp.start),
- int128_get64(tmp.size));
+ MEMORY_LISTENER_CALL(coalesced_mmio_add, Forward, §ion,
+ int128_get64(tmp.start),
+ int128_get64(tmp.size));
}
}
}
bool match_data, uint64_t data, EventNotifier *e);
void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
bool match_data, uint64_t data, EventNotifier *e);
+ void (*coalesced_mmio_add)(MemoryListener *listener, MemoryRegionSection *section,
+ target_phys_addr_t addr, target_phys_addr_t len);
+ void (*coalesced_mmio_del)(MemoryListener *listener, MemoryRegionSection *section,
+ target_phys_addr_t addr, target_phys_addr_t len);
/* Lower = earlier (during add), later (during del) */
unsigned priority;
MemoryRegion *address_space_filter;