#include <linux/vfio.h>
#include <linux/mdev.h>
-struct intel_gvt_host intel_gvt_host;
-
static const struct intel_gvt_ops intel_gvt_ops = {
.emulate_cfg_read = intel_vgpu_emulate_cfg_read,
.emulate_cfg_write = intel_vgpu_emulate_cfg_write,
* resources owned by a GVT device.
*
*/
-void intel_gvt_clean_device(struct drm_i915_private *i915)
+static void intel_gvt_clean_device(struct drm_i915_private *i915)
{
struct intel_gvt *gvt = fetch_and_zero(&i915->gvt);
if (drm_WARN_ON(&i915->drm, !gvt))
return;
+ intel_gvt_hypervisor_host_exit(i915->drm.dev, gvt);
intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
intel_gvt_clean_vgpu_types(gvt);
* Zero on success, negative error code if failed.
*
*/
-int intel_gvt_init_device(struct drm_i915_private *i915)
+static int intel_gvt_init_device(struct drm_i915_private *i915)
{
struct intel_gvt *gvt;
struct intel_vgpu *vgpu;
intel_gvt_debugfs_init(gvt);
+ ret = intel_gvt_hypervisor_host_init(i915->drm.dev, gvt,
+ &intel_gvt_ops);
+ if (ret)
+ goto out_destroy_idle_vgpu;
+
gvt_dbg_core("gvt device initialization is done\n");
- intel_gvt_host.dev = i915->drm.dev;
- intel_gvt_host.initialized = true;
return 0;
+out_destroy_idle_vgpu:
+ intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
+ intel_gvt_debugfs_clean(gvt);
out_clean_types:
intel_gvt_clean_vgpu_types(gvt);
out_clean_thread:
return ret;
}
-int
-intel_gvt_pm_resume(struct intel_gvt *gvt)
+static void intel_gvt_pm_resume(struct drm_i915_private *i915)
{
+ struct intel_gvt *gvt = i915->gvt;
+
intel_gvt_restore_fence(gvt);
intel_gvt_restore_mmio(gvt);
intel_gvt_restore_ggtt(gvt);
- return 0;
-}
-
-int
-intel_gvt_register_hypervisor(const struct intel_gvt_mpt *m)
-{
- int ret;
- void *gvt;
-
- if (!intel_gvt_host.initialized)
- return -ENODEV;
-
- intel_gvt_host.mpt = m;
- gvt = (void *)kdev_to_i915(intel_gvt_host.dev)->gvt;
-
- ret = intel_gvt_hypervisor_host_init(intel_gvt_host.dev, gvt,
- &intel_gvt_ops);
- if (ret < 0)
- return -ENODEV;
- return 0;
}
-EXPORT_SYMBOL_GPL(intel_gvt_register_hypervisor);
-void
-intel_gvt_unregister_hypervisor(void)
-{
- void *gvt = (void *)kdev_to_i915(intel_gvt_host.dev)->gvt;
- intel_gvt_hypervisor_host_exit(intel_gvt_host.dev, gvt);
-}
-EXPORT_SYMBOL_GPL(intel_gvt_unregister_hypervisor);
+const struct intel_vgpu_ops intel_gvt_vgpu_ops = {
+ .init_device = intel_gvt_init_device,
+ .clean_device = intel_gvt_clean_device,
+ .pm_resume = intel_gvt_pm_resume,
+};
#include "i915_drv.h"
#include "i915_vgpu.h"
#include "intel_gvt.h"
-#include "gvt/gvt.h"
+#include "gem/i915_gem_dmabuf.h"
+#include "gt/intel_context.h"
+#include "gt/intel_ring.h"
+#include "gt/shmem_utils.h"
/**
* DOC: Intel GVT-g host support
* doc is available on https://01.org/group/2230/documentation-list.
*/
+static LIST_HEAD(intel_gvt_devices);
+static const struct intel_vgpu_ops *intel_gvt_ops;
+static DEFINE_MUTEX(intel_gvt_mutex);
+
static bool is_supported_device(struct drm_i915_private *dev_priv)
{
if (IS_BROADWELL(dev_priv))
return false;
}
-/**
- * intel_gvt_sanitize_options - sanitize GVT related options
- * @dev_priv: drm i915 private data
- *
- * This function is called at the i915 options sanitize stage.
- */
-void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv)
-{
- if (!dev_priv->params.enable_gvt)
- return;
-
- if (intel_vgpu_active(dev_priv)) {
- drm_info(&dev_priv->drm, "GVT-g is disabled for guest\n");
- goto bail;
- }
-
- if (!is_supported_device(dev_priv)) {
- drm_info(&dev_priv->drm,
- "Unsupported device. GVT-g is disabled\n");
- goto bail;
- }
-
- return;
-bail:
- dev_priv->params.enable_gvt = 0;
-}
-
static void free_initial_hw_state(struct drm_i915_private *dev_priv)
{
struct i915_virtual_gpu *vgpu = &dev_priv->vgpu;
return ret;
}
-/**
- * intel_gvt_init - initialize GVT components
- * @dev_priv: drm i915 private data
- *
- * This function is called at the initialization stage to create a GVT device.
- *
- * Returns:
- * Zero on success, negative error code if failed.
- *
- */
-int intel_gvt_init(struct drm_i915_private *dev_priv)
+static void intel_gvt_init_device(struct drm_i915_private *dev_priv)
{
- int ret;
-
- if (i915_inject_probe_failure(dev_priv))
- return -ENODEV;
-
if (!dev_priv->params.enable_gvt) {
drm_dbg(&dev_priv->drm,
"GVT-g is disabled by kernel params\n");
- return 0;
+ return;
+ }
+
+ if (intel_vgpu_active(dev_priv)) {
+ drm_info(&dev_priv->drm, "GVT-g is disabled for guest\n");
+ return;
+ }
+
+ if (!is_supported_device(dev_priv)) {
+ drm_info(&dev_priv->drm,
+ "Unsupported device. GVT-g is disabled\n");
+ return;
}
if (intel_uc_wants_guc_submission(&to_gt(dev_priv)->uc)) {
drm_err(&dev_priv->drm,
- "i915 GVT-g loading failed due to Graphics virtualization is not yet supported with GuC submission\n");
- return -EIO;
+ "Graphics virtualization is not yet supported with GuC submission\n");
+ return;
}
- ret = save_initial_hw_state(dev_priv);
- if (ret) {
- drm_dbg(&dev_priv->drm, "Fail to save initial HW state\n");
- goto err_save_hw_state;
+ if (save_initial_hw_state(dev_priv)) {
+ drm_dbg(&dev_priv->drm, "Failed to save initial HW state\n");
+ return;
}
- ret = intel_gvt_init_device(dev_priv);
- if (ret) {
+ if (intel_gvt_ops->init_device(dev_priv))
drm_dbg(&dev_priv->drm, "Fail to init GVT device\n");
- goto err_init_device;
+}
+
+static void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
+{
+ if (dev_priv->gvt)
+ intel_gvt_ops->clean_device(dev_priv);
+ free_initial_hw_state(dev_priv);
+}
+
+int intel_gvt_set_ops(const struct intel_vgpu_ops *ops)
+{
+ struct drm_i915_private *dev_priv;
+
+ mutex_lock(&intel_gvt_mutex);
+ if (intel_gvt_ops) {
+ mutex_unlock(&intel_gvt_mutex);
+ return -EINVAL;
}
+ intel_gvt_ops = ops;
- return 0;
+ list_for_each_entry(dev_priv, &intel_gvt_devices, vgpu.entry)
+ intel_gvt_init_device(dev_priv);
+ mutex_unlock(&intel_gvt_mutex);
-err_init_device:
- free_initial_hw_state(dev_priv);
-err_save_hw_state:
- dev_priv->params.enable_gvt = 0;
return 0;
}
+EXPORT_SYMBOL_NS_GPL(intel_gvt_set_ops, I915_GVT);
+
+void intel_gvt_clear_ops(const struct intel_vgpu_ops *ops)
+{
+ struct drm_i915_private *dev_priv;
+
+ mutex_lock(&intel_gvt_mutex);
+ if (intel_gvt_ops != ops) {
+ mutex_unlock(&intel_gvt_mutex);
+ return;
+ }
+
+ list_for_each_entry(dev_priv, &intel_gvt_devices, vgpu.entry)
+ intel_gvt_clean_device(dev_priv);
+
+ intel_gvt_ops = NULL;
+ mutex_unlock(&intel_gvt_mutex);
+}
+EXPORT_SYMBOL_NS_GPL(intel_gvt_clear_ops, I915_GVT);
-static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
+/**
+ * intel_gvt_init - initialize GVT components
+ * @dev_priv: drm i915 private data
+ *
+ * This function is called at the initialization stage to create a GVT device.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_gvt_init(struct drm_i915_private *dev_priv)
{
- return dev_priv->gvt;
+ if (i915_inject_probe_failure(dev_priv))
+ return -ENODEV;
+
+ mutex_lock(&intel_gvt_mutex);
+ list_add_tail(&dev_priv->vgpu.entry, &intel_gvt_devices);
+ if (intel_gvt_ops)
+ intel_gvt_init_device(dev_priv);
+ mutex_unlock(&intel_gvt_mutex);
+
+ return 0;
}
/**
*/
void intel_gvt_driver_remove(struct drm_i915_private *dev_priv)
{
- if (!intel_gvt_active(dev_priv))
- return;
-
+ mutex_lock(&intel_gvt_mutex);
intel_gvt_clean_device(dev_priv);
- free_initial_hw_state(dev_priv);
+ list_del(&dev_priv->vgpu.entry);
+ mutex_unlock(&intel_gvt_mutex);
}
/**
*/
void intel_gvt_resume(struct drm_i915_private *dev_priv)
{
- if (intel_gvt_active(dev_priv))
- intel_gvt_pm_resume(dev_priv->gvt);
+ mutex_lock(&intel_gvt_mutex);
+ if (dev_priv->gvt)
+ intel_gvt_ops->pm_resume(dev_priv);
+ mutex_unlock(&intel_gvt_mutex);
}
+
+/*
+ * Exported here so that the exports only get created when GVT support is
+ * actually enabled.
+ */
+EXPORT_SYMBOL_NS_GPL(i915_gem_object_alloc, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_gem_object_create_shmem, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_gem_object_init, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_gem_object_ggtt_pin_ww, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_gem_object_pin_map, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_gem_object_set_to_cpu_domain, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(__i915_gem_object_flush_map, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(__i915_gem_object_set_pages, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_gem_gtt_insert, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_gem_prime_export, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_gem_ww_ctx_init, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_gem_ww_ctx_backoff, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_gem_ww_ctx_fini, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_ppgtt_create, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_request_add, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_request_create, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_request_wait, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_reserve_fence, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_unreserve_fence, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_vm_release, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(_i915_vma_move_to_active, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(intel_context_create, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(__intel_context_do_pin, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(__intel_context_do_unpin, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(intel_ring_begin, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(intel_runtime_pm_get, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(intel_runtime_pm_put_unchecked, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_for_reg, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_get, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_put, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(shmem_pin_map, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(shmem_unpin_map, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(__px_dma, I915_GVT);