}
}
-void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu)
+void clean_execlist(struct intel_vgpu *vgpu)
{
enum intel_engine_id i;
struct intel_engine_cs *engine;
}
}
-int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
+int init_execlist(struct intel_vgpu *vgpu)
{
enum intel_engine_id i;
struct intel_engine_cs *engine;
for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
init_vgpu_execlist(vgpu, engine->id);
}
+
+const struct intel_vgpu_submission_ops intel_vgpu_execlist_submission_ops = {
+ .name = "execlist",
+ .init = init_execlist,
+ .clean = clean_execlist,
+};
int weight;
};
+enum {
+ INTEL_VGPU_EXECLIST_SUBMISSION = 1,
+ INTEL_VGPU_GUC_SUBMISSION,
+};
+
+struct intel_vgpu_submission_ops {
+ const char *name;
+ int (*init)(struct intel_vgpu *vgpu);
+ void (*clean)(struct intel_vgpu *vgpu);
+};
+
struct intel_vgpu_submission {
struct intel_vgpu_execlist execlist[I915_NUM_ENGINES];
struct list_head workload_q_head[I915_NUM_ENGINES];
DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
void *ring_scan_buffer[I915_NUM_ENGINES];
int ring_scan_buffer_size[I915_NUM_ENGINES];
+ const struct intel_vgpu_submission_ops *ops;
+ int virtual_submission_interface;
+ bool active;
};
struct intel_vgpu {
static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
+ struct intel_vgpu_submission *s = &vgpu->submission;
u32 data = *(u32 *)p_data;
int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset);
bool enable_execlist;
+ int ret;
write_vreg(vgpu, offset, p_data, bytes);
(enable_execlist ? "enabling" : "disabling"),
ring_id);
- if (enable_execlist)
- intel_vgpu_start_schedule(vgpu);
+ if (!enable_execlist)
+ return 0;
+
+ if (s->active)
+ return 0;
+
+ ret = intel_vgpu_select_submission_ops(vgpu,
+ INTEL_VGPU_EXECLIST_SUBMISSION);
+ if (ret)
+ return ret;
+
+ intel_vgpu_start_schedule(vgpu);
}
return 0;
}
{
struct intel_vgpu_submission *s = &vgpu->submission;
+ intel_vgpu_select_submission_ops(vgpu, 0);
i915_gem_context_put(s->shadow_ctx);
kmem_cache_destroy(s->workloads);
}
}
/**
+ * intel_vgpu_select_submission_ops - select virtual submission interface
+ * @vgpu: a vGPU
+ * @interface: expected vGPU virtual submission interface
+ *
+ * This function is called when guest configures submission interface.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
+ unsigned int interface)
+{
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ const struct intel_vgpu_submission_ops *ops[] = {
+ [INTEL_VGPU_EXECLIST_SUBMISSION] =
+ &intel_vgpu_execlist_submission_ops,
+ };
+ int ret;
+
+ if (WARN_ON(interface >= ARRAY_SIZE(ops)))
+ return -EINVAL;
+
+ if (s->active) {
+ s->ops->clean(vgpu);
+ s->active = false;
+ gvt_dbg_core("vgpu%d: de-select ops [ %s ] \n",
+ vgpu->id, s->ops->name);
+ }
+
+ if (interface == 0) {
+ s->ops = NULL;
+ s->virtual_submission_interface = 0;
+ gvt_dbg_core("vgpu%d: no submission ops\n", vgpu->id);
+ return 0;
+ }
+
+ ret = ops[interface]->init(vgpu);
+ if (ret)
+ return ret;
+
+ s->ops = ops[interface];
+ s->virtual_submission_interface = interface;
+ s->active = true;
+
+ gvt_dbg_core("vgpu%d: activate ops [ %s ]\n",
+ vgpu->id, s->ops->name);
+
+ return 0;
+}
+
+/**
* intel_vgpu_destroy_workload - destroy a vGPU workload
* @vgpu: a vGPU
*
void intel_vgpu_clean_submission(struct intel_vgpu *vgpu);
+int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
+ unsigned int interface);
+
+extern const struct intel_vgpu_submission_ops
+intel_vgpu_execlist_submission_ops;
+
struct intel_vgpu_workload *
intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
struct execlist_ctx_descriptor_format *desc);
idr_remove(&gvt->vgpu_idr, vgpu->id);
intel_vgpu_clean_sched_policy(vgpu);
intel_vgpu_clean_submission(vgpu);
- intel_vgpu_clean_execlist(vgpu);
intel_vgpu_clean_display(vgpu);
intel_vgpu_clean_opregion(vgpu);
intel_vgpu_clean_gtt(vgpu);
if (ret)
goto out_clean_gtt;
- ret = intel_vgpu_init_execlist(vgpu);
- if (ret)
- goto out_clean_display;
-
ret = intel_vgpu_setup_submission(vgpu);
if (ret)
- goto out_clean_execlist;
+ goto out_clean_display;
ret = intel_vgpu_init_sched_policy(vgpu);
if (ret)
- goto out_clean_shadow_ctx;
+ goto out_clean_submission;
mutex_unlock(&gvt->lock);
return vgpu;
-out_clean_shadow_ctx:
+out_clean_submission:
intel_vgpu_clean_submission(vgpu);
-out_clean_execlist:
- intel_vgpu_clean_execlist(vgpu);
out_clean_display:
intel_vgpu_clean_display(vgpu);
out_clean_gtt: