From b139bd300f4f579c526d153efa4960c380e2b6e3 Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Mon, 27 Aug 2012 08:28:40 +0200 Subject: [PATCH] kvm: i386: Add services required for PCI device assignment These helpers abstract the interaction of upcoming pci-assign with the KVM kernel services. Put them under i386 only as other archs will implement device pass-through via VFIO and not this classic interface. Signed-off-by: Jan Kiszka Acked-by: Acked-by: Michael S. Tsirkin Signed-off-by: Avi Kivity --- target-i386/kvm.c | 141 +++++++++++++++++++++++++++++++++++++++++ target-i386/kvm_i386.h | 22 +++++++ 2 files changed, 163 insertions(+) diff --git a/target-i386/kvm.c b/target-i386/kvm.c index ffc294ec39..6790180b0a 100644 --- a/target-i386/kvm.c +++ b/target-i386/kvm.c @@ -31,6 +31,7 @@ #include "hw/apic.h" #include "ioport.h" #include "hyperv.h" +#include "hw/pci.h" //#define DEBUG_KVM @@ -2068,3 +2069,143 @@ void kvm_arch_init_irq_routing(KVMState *s) kvm_msi_via_irqfd_allowed = true; kvm_gsi_routing_allowed = true; } + +/* Classic KVM device assignment interface. Will remain x86 only. */ +int kvm_device_pci_assign(KVMState *s, PCIHostDeviceAddress *dev_addr, + uint32_t flags, uint32_t *dev_id) +{ + struct kvm_assigned_pci_dev dev_data = { + .segnr = dev_addr->domain, + .busnr = dev_addr->bus, + .devfn = PCI_DEVFN(dev_addr->slot, dev_addr->function), + .flags = flags, + }; + int ret; + + dev_data.assigned_dev_id = + (dev_addr->domain << 16) | (dev_addr->bus << 8) | dev_data.devfn; + + ret = kvm_vm_ioctl(s, KVM_ASSIGN_PCI_DEVICE, &dev_data); + if (ret < 0) { + return ret; + } + + *dev_id = dev_data.assigned_dev_id; + + return 0; +} + +int kvm_device_pci_deassign(KVMState *s, uint32_t dev_id) +{ + struct kvm_assigned_pci_dev dev_data = { + .assigned_dev_id = dev_id, + }; + + return kvm_vm_ioctl(s, KVM_DEASSIGN_PCI_DEVICE, &dev_data); +} + +static int kvm_assign_irq_internal(KVMState *s, uint32_t dev_id, + uint32_t irq_type, uint32_t guest_irq) +{ + struct kvm_assigned_irq assigned_irq = { + .assigned_dev_id = dev_id, + .guest_irq = guest_irq, + .flags = irq_type, + }; + + if (kvm_check_extension(s, KVM_CAP_ASSIGN_DEV_IRQ)) { + return kvm_vm_ioctl(s, KVM_ASSIGN_DEV_IRQ, &assigned_irq); + } else { + return kvm_vm_ioctl(s, KVM_ASSIGN_IRQ, &assigned_irq); + } +} + +int kvm_device_intx_assign(KVMState *s, uint32_t dev_id, bool use_host_msi, + uint32_t guest_irq) +{ + uint32_t irq_type = KVM_DEV_IRQ_GUEST_INTX | + (use_host_msi ? KVM_DEV_IRQ_HOST_MSI : KVM_DEV_IRQ_HOST_INTX); + + return kvm_assign_irq_internal(s, dev_id, irq_type, guest_irq); +} + +int kvm_device_intx_set_mask(KVMState *s, uint32_t dev_id, bool masked) +{ + struct kvm_assigned_pci_dev dev_data = { + .assigned_dev_id = dev_id, + .flags = masked ? KVM_DEV_ASSIGN_MASK_INTX : 0, + }; + + return kvm_vm_ioctl(s, KVM_ASSIGN_SET_INTX_MASK, &dev_data); +} + +static int kvm_deassign_irq_internal(KVMState *s, uint32_t dev_id, + uint32_t type) +{ + struct kvm_assigned_irq assigned_irq = { + .assigned_dev_id = dev_id, + .flags = type, + }; + + return kvm_vm_ioctl(s, KVM_DEASSIGN_DEV_IRQ, &assigned_irq); +} + +int kvm_device_intx_deassign(KVMState *s, uint32_t dev_id, bool use_host_msi) +{ + return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_INTX | + (use_host_msi ? KVM_DEV_IRQ_HOST_MSI : KVM_DEV_IRQ_HOST_INTX)); +} + +int kvm_device_msi_assign(KVMState *s, uint32_t dev_id, int virq) +{ + return kvm_assign_irq_internal(s, dev_id, KVM_DEV_IRQ_HOST_MSI | + KVM_DEV_IRQ_GUEST_MSI, virq); +} + +int kvm_device_msi_deassign(KVMState *s, uint32_t dev_id) +{ + return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_MSI | + KVM_DEV_IRQ_HOST_MSI); +} + +bool kvm_device_msix_supported(KVMState *s) +{ + /* The kernel lacks a corresponding KVM_CAP, so we probe by calling + * KVM_ASSIGN_SET_MSIX_NR with an invalid parameter. */ + return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_NR, NULL) == -EFAULT; +} + +int kvm_device_msix_init_vectors(KVMState *s, uint32_t dev_id, + uint32_t nr_vectors) +{ + struct kvm_assigned_msix_nr msix_nr = { + .assigned_dev_id = dev_id, + .entry_nr = nr_vectors, + }; + + return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_NR, &msix_nr); +} + +int kvm_device_msix_set_vector(KVMState *s, uint32_t dev_id, uint32_t vector, + int virq) +{ + struct kvm_assigned_msix_entry msix_entry = { + .assigned_dev_id = dev_id, + .gsi = virq, + .entry = vector, + }; + + return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_ENTRY, &msix_entry); +} + +int kvm_device_msix_assign(KVMState *s, uint32_t dev_id) +{ + return kvm_assign_irq_internal(s, dev_id, KVM_DEV_IRQ_HOST_MSIX | + KVM_DEV_IRQ_GUEST_MSIX, 0); +} + +int kvm_device_msix_deassign(KVMState *s, uint32_t dev_id) +{ + return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_MSIX | + KVM_DEV_IRQ_HOST_MSIX); +} diff --git a/target-i386/kvm_i386.h b/target-i386/kvm_i386.h index b82bbf401e..f6ab82f93c 100644 --- a/target-i386/kvm_i386.h +++ b/target-i386/kvm_i386.h @@ -11,6 +11,28 @@ #ifndef QEMU_KVM_I386_H #define QEMU_KVM_I386_H +#include "kvm.h" + bool kvm_allows_irq0_override(void); +int kvm_device_pci_assign(KVMState *s, PCIHostDeviceAddress *dev_addr, + uint32_t flags, uint32_t *dev_id); +int kvm_device_pci_deassign(KVMState *s, uint32_t dev_id); + +int kvm_device_intx_assign(KVMState *s, uint32_t dev_id, + bool use_host_msi, uint32_t guest_irq); +int kvm_device_intx_set_mask(KVMState *s, uint32_t dev_id, bool masked); +int kvm_device_intx_deassign(KVMState *s, uint32_t dev_id, bool use_host_msi); + +int kvm_device_msi_assign(KVMState *s, uint32_t dev_id, int virq); +int kvm_device_msi_deassign(KVMState *s, uint32_t dev_id); + +bool kvm_device_msix_supported(KVMState *s); +int kvm_device_msix_init_vectors(KVMState *s, uint32_t dev_id, + uint32_t nr_vectors); +int kvm_device_msix_set_vector(KVMState *s, uint32_t dev_id, uint32_t vector, + int virq); +int kvm_device_msix_assign(KVMState *s, uint32_t dev_id); +int kvm_device_msix_deassign(KVMState *s, uint32_t dev_id); + #endif -- 2.34.1