#include <linux/types.h>
struct device;
+struct intel_vgpu;
/*
* Specific GVT-g MPT modules function collections. Currently GVT-g supports
struct intel_gvt_mpt {
int (*host_init)(struct device *dev, void *gvt);
void (*host_exit)(struct device *dev, void *gvt);
- void (*detach_vgpu)(void *vgpu);
- int (*inject_msi)(unsigned long handle, u32 addr, u16 data);
- int (*enable_page_track)(unsigned long handle, u64 gfn);
- int (*disable_page_track)(unsigned long handle, u64 gfn);
- int (*read_gpa)(unsigned long handle, unsigned long gpa, void *buf,
+ void (*detach_vgpu)(struct intel_vgpu *vgpu);
+ int (*inject_msi)(struct intel_vgpu *vgpu, u32 addr, u16 data);
+ int (*enable_page_track)(struct intel_vgpu *vgpu, u64 gfn);
+ int (*disable_page_track)(struct intel_vgpu *vgpu, u64 gfn);
+ int (*read_gpa)(struct intel_vgpu *vgpu, unsigned long gpa, void *buf,
unsigned long len);
- int (*write_gpa)(unsigned long handle, unsigned long gpa, void *buf,
+ int (*write_gpa)(struct intel_vgpu *vgpu, unsigned long gpa, void *buf,
unsigned long len);
- unsigned long (*gfn_to_mfn)(unsigned long handle, unsigned long gfn);
+ unsigned long (*gfn_to_mfn)(struct intel_vgpu *vgpu, unsigned long gfn);
- int (*dma_map_guest_page)(unsigned long handle, unsigned long gfn,
+ int (*dma_map_guest_page)(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long size, dma_addr_t *dma_addr);
- void (*dma_unmap_guest_page)(unsigned long handle, dma_addr_t dma_addr);
+ void (*dma_unmap_guest_page)(struct intel_vgpu *vgpu,
+ dma_addr_t dma_addr);
- int (*dma_pin_guest_page)(unsigned long handle, dma_addr_t dma_addr);
+ int (*dma_pin_guest_page)(struct intel_vgpu *vgpu, dma_addr_t dma_addr);
- int (*set_opregion)(void *vgpu);
- int (*set_edid)(void *vgpu, int port_num);
- int (*get_vfio_device)(void *vgpu);
- void (*put_vfio_device)(void *vgpu);
- bool (*is_valid_gfn)(unsigned long handle, unsigned long gfn);
+ int (*set_opregion)(struct intel_vgpu *vgpu);
+ int (*set_edid)(struct intel_vgpu *vgpu, int port_num);
+ int (*get_vfio_device)(struct intel_vgpu *vgpu);
+ void (*put_vfio_device)(struct intel_vgpu *vgpu);
+ bool (*is_valid_gfn)(struct intel_vgpu *vgpu, unsigned long gfn);
};
#endif /* _GVT_HYPERCALL_H_ */
struct kref ref;
};
-static inline bool handle_valid(unsigned long handle)
-{
- return !!(handle & ~0xff);
-}
-
static ssize_t available_instances_show(struct mdev_type *mtype,
struct mdev_type_attribute *attr,
char *buf)
return 0;
}
-static int kvmgt_get_vfio_device(void *p_vgpu)
+static int kvmgt_get_vfio_device(struct intel_vgpu *vgpu)
{
- struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
-
vgpu->vfio_device = vfio_device_get_from_dev(
mdev_dev(vgpu->mdev));
if (!vgpu->vfio_device) {
}
-static int kvmgt_set_opregion(void *p_vgpu)
+static int kvmgt_set_opregion(struct intel_vgpu *vgpu)
{
- struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
void *base;
int ret;
return ret;
}
-static int kvmgt_set_edid(void *p_vgpu, int port_num)
+static int kvmgt_set_edid(struct intel_vgpu *vgpu, int port_num)
{
- struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
struct vfio_edid_region *base;
int ret;
return ret;
}
-static void kvmgt_put_vfio_device(void *data)
+static void kvmgt_put_vfio_device(struct intel_vgpu *vgpu)
{
- struct intel_vgpu *vgpu = data;
-
if (WARN_ON(!vgpu->vfio_device))
return;
{
struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
- if (handle_valid(vgpu->handle))
+ if (vgpu->attached)
return -EBUSY;
intel_gvt_destroy_vgpu(vgpu);
struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
int ret;
- if (!handle_valid(vgpu->handle))
+ if (!vgpu->attached)
return;
if (atomic_cmpxchg(&vgpu->released, 0, 1))
vfio_group_put_external_user(vgpu->vfio_group);
vgpu->kvm = NULL;
- vgpu->handle = 0;
+ vgpu->attached = false;
}
static void intel_vgpu_close_device(struct mdev_device *mdev)
intel_gvt_cleanup_vgpu_type_groups((struct intel_gvt *)gvt);
}
-static int kvmgt_page_track_add(unsigned long handle, u64 gfn)
+static int kvmgt_page_track_add(struct intel_vgpu *info, u64 gfn)
{
- struct intel_vgpu *info;
- struct kvm *kvm;
+ struct kvm *kvm = info->kvm;
struct kvm_memory_slot *slot;
int idx;
- if (!handle_valid(handle))
+ if (!info->attached)
return -ESRCH;
- info = (struct intel_vgpu *)handle;
- kvm = info->kvm;
-
idx = srcu_read_lock(&kvm->srcu);
slot = gfn_to_memslot(kvm, gfn);
if (!slot) {
return 0;
}
-static int kvmgt_page_track_remove(unsigned long handle, u64 gfn)
+static int kvmgt_page_track_remove(struct intel_vgpu *info, u64 gfn)
{
- struct intel_vgpu *info;
- struct kvm *kvm;
+ struct kvm *kvm = info->kvm;
struct kvm_memory_slot *slot;
int idx;
- if (!handle_valid(handle))
+ if (!info->attached)
return 0;
- info = (struct intel_vgpu *)handle;
- kvm = info->kvm;
-
idx = srcu_read_lock(&kvm->srcu);
slot = gfn_to_memslot(kvm, gfn);
if (!slot) {
mutex_lock(&vgpu->gvt->lock);
for_each_active_vgpu(vgpu->gvt, itr, id) {
- if (!handle_valid(itr->handle))
+ if (!itr->attached)
continue;
if (kvm && kvm == itr->kvm) {
static int kvmgt_guest_init(struct mdev_device *mdev)
{
- struct intel_vgpu *vgpu;
- struct kvm *kvm;
+ struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
+ struct kvm *kvm = vgpu->kvm;
- vgpu = mdev_get_drvdata(mdev);
- if (handle_valid(vgpu->handle))
+ if (vgpu->attached)
return -EEXIST;
- kvm = vgpu->kvm;
if (!kvm || kvm->mm != current->mm) {
gvt_vgpu_err("KVM is required to use Intel vGPU\n");
return -ESRCH;
if (__kvmgt_vgpu_exist(vgpu, kvm))
return -EEXIST;
- vgpu->handle = (unsigned long)vgpu;
+ vgpu->attached = true;
kvm_get_kvm(vgpu->kvm);
kvmgt_protect_table_init(vgpu);
return true;
}
-static void kvmgt_detach_vgpu(void *p_vgpu)
+static void kvmgt_detach_vgpu(struct intel_vgpu *vgpu)
{
int i;
- struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
if (!vgpu->region)
return;
vgpu->region = NULL;
}
-static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
+static int kvmgt_inject_msi(struct intel_vgpu *vgpu, u32 addr, u16 data)
{
- struct intel_vgpu *vgpu;
-
- if (!handle_valid(handle))
+ if (!vgpu->attached)
return -ESRCH;
- vgpu = (struct intel_vgpu *)handle;
-
/*
* When guest is poweroff, msi_trigger is set to NULL, but vgpu's
* config and mmio register isn't restored to default during guest
return -EFAULT;
}
-static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
+static unsigned long kvmgt_gfn_to_pfn(struct intel_vgpu *vgpu,
+ unsigned long gfn)
{
- struct intel_vgpu *vgpu;
kvm_pfn_t pfn;
- if (!handle_valid(handle))
+ if (!vgpu->attached)
return INTEL_GVT_INVALID_ADDR;
- vgpu = (struct intel_vgpu *)handle;
-
pfn = gfn_to_pfn(vgpu->kvm, gfn);
if (is_error_noslot_pfn(pfn))
return INTEL_GVT_INVALID_ADDR;
return pfn;
}
-static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
+static int kvmgt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long size, dma_addr_t *dma_addr)
{
- struct intel_vgpu *vgpu;
struct gvt_dma *entry;
int ret;
- if (!handle_valid(handle))
+ if (!vgpu->attached)
return -EINVAL;
- vgpu = (struct intel_vgpu *)handle;
-
mutex_lock(&vgpu->cache_lock);
entry = __gvt_cache_find_gfn(vgpu, gfn);
return ret;
}
-static int kvmgt_dma_pin_guest_page(unsigned long handle, dma_addr_t dma_addr)
+static int kvmgt_dma_pin_guest_page(struct intel_vgpu *vgpu,
+ dma_addr_t dma_addr)
{
- struct intel_vgpu *vgpu;
struct gvt_dma *entry;
int ret = 0;
- if (!handle_valid(handle))
+ if (!vgpu->attached)
return -ENODEV;
- vgpu = (struct intel_vgpu *)handle;
-
mutex_lock(&vgpu->cache_lock);
entry = __gvt_cache_find_dma_addr(vgpu, dma_addr);
if (entry)
__gvt_cache_remove_entry(entry->vgpu, entry);
}
-static void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr)
+static void kvmgt_dma_unmap_guest_page(struct intel_vgpu *vgpu,
+ dma_addr_t dma_addr)
{
- struct intel_vgpu *vgpu;
struct gvt_dma *entry;
- if (!handle_valid(handle))
+ if (!vgpu->attached)
return;
- vgpu = (struct intel_vgpu *)handle;
-
mutex_lock(&vgpu->cache_lock);
entry = __gvt_cache_find_dma_addr(vgpu, dma_addr);
if (entry)
mutex_unlock(&vgpu->cache_lock);
}
-static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
+static int kvmgt_rw_gpa(struct intel_vgpu *vgpu, unsigned long gpa,
void *buf, unsigned long len, bool write)
{
- struct intel_vgpu *vgpu;
-
- if (!handle_valid(handle))
+ if (!vgpu->attached)
return -ESRCH;
-
- vgpu = (struct intel_vgpu *)handle;
-
return vfio_dma_rw(vgpu->vfio_group, gpa, buf, len, write);
}
-static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa,
+static int kvmgt_read_gpa(struct intel_vgpu *vgpu, unsigned long gpa,
void *buf, unsigned long len)
{
- return kvmgt_rw_gpa(handle, gpa, buf, len, false);
+ return kvmgt_rw_gpa(vgpu, gpa, buf, len, false);
}
-static int kvmgt_write_gpa(unsigned long handle, unsigned long gpa,
+static int kvmgt_write_gpa(struct intel_vgpu *vgpu, unsigned long gpa,
void *buf, unsigned long len)
{
- return kvmgt_rw_gpa(handle, gpa, buf, len, true);
+ return kvmgt_rw_gpa(vgpu, gpa, buf, len, true);
}
-static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
+static bool kvmgt_is_valid_gfn(struct intel_vgpu *vgpu, unsigned long gfn)
{
- struct intel_vgpu *vgpu;
- struct kvm *kvm;
+ struct kvm *kvm = vgpu->kvm;
int idx;
bool ret;
- if (!handle_valid(handle))
+ if (!vgpu->attached)
return false;
- vgpu = (struct intel_vgpu *)handle;
- kvm = vgpu->kvm;
-
idx = srcu_read_lock(&kvm->srcu);
ret = kvm_is_visible_gfn(kvm, gfn);
srcu_read_unlock(&kvm->srcu, idx);
trace_inject_msi(vgpu->id, addr, data);
- ret = intel_gvt_host.mpt->inject_msi(vgpu->handle, addr, data);
+ ret = intel_gvt_host.mpt->inject_msi(vgpu, addr, data);
if (ret)
return ret;
return 0;
static inline int intel_gvt_hypervisor_enable_page_track(
struct intel_vgpu *vgpu, unsigned long gfn)
{
- return intel_gvt_host.mpt->enable_page_track(vgpu->handle, gfn);
+ return intel_gvt_host.mpt->enable_page_track(vgpu, gfn);
}
/**
static inline int intel_gvt_hypervisor_disable_page_track(
struct intel_vgpu *vgpu, unsigned long gfn)
{
- return intel_gvt_host.mpt->disable_page_track(vgpu->handle, gfn);
+ return intel_gvt_host.mpt->disable_page_track(vgpu, gfn);
}
/**
static inline int intel_gvt_hypervisor_read_gpa(struct intel_vgpu *vgpu,
unsigned long gpa, void *buf, unsigned long len)
{
- return intel_gvt_host.mpt->read_gpa(vgpu->handle, gpa, buf, len);
+ return intel_gvt_host.mpt->read_gpa(vgpu, gpa, buf, len);
}
/**
static inline int intel_gvt_hypervisor_write_gpa(struct intel_vgpu *vgpu,
unsigned long gpa, void *buf, unsigned long len)
{
- return intel_gvt_host.mpt->write_gpa(vgpu->handle, gpa, buf, len);
+ return intel_gvt_host.mpt->write_gpa(vgpu, gpa, buf, len);
}
/**
static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn(
struct intel_vgpu *vgpu, unsigned long gfn)
{
- return intel_gvt_host.mpt->gfn_to_mfn(vgpu->handle, gfn);
+ return intel_gvt_host.mpt->gfn_to_mfn(vgpu, gfn);
}
/**
struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size,
dma_addr_t *dma_addr)
{
- return intel_gvt_host.mpt->dma_map_guest_page(vgpu->handle, gfn, size,
+ return intel_gvt_host.mpt->dma_map_guest_page(vgpu, gfn, size,
dma_addr);
}
static inline void intel_gvt_hypervisor_dma_unmap_guest_page(
struct intel_vgpu *vgpu, dma_addr_t dma_addr)
{
- intel_gvt_host.mpt->dma_unmap_guest_page(vgpu->handle, dma_addr);
+ intel_gvt_host.mpt->dma_unmap_guest_page(vgpu, dma_addr);
}
/**
intel_gvt_hypervisor_dma_pin_guest_page(struct intel_vgpu *vgpu,
dma_addr_t dma_addr)
{
- return intel_gvt_host.mpt->dma_pin_guest_page(vgpu->handle, dma_addr);
+ return intel_gvt_host.mpt->dma_pin_guest_page(vgpu, dma_addr);
}
/**
if (!intel_gvt_host.mpt->is_valid_gfn)
return true;
- return intel_gvt_host.mpt->is_valid_gfn(vgpu->handle, gfn);
+ return intel_gvt_host.mpt->is_valid_gfn(vgpu, gfn);
}
#endif /* _GVT_MPT_H_ */