#define get_desc_from_elsp_dwords(ed, i) \
((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))
-static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
+static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
{
const int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
struct intel_shadow_bb_entry *entry_obj;
vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0);
if (IS_ERR(vma)) {
- return;
+ return PTR_ERR(vma);
}
/* FIXME: we are not tracking our pinned VMA leaving it
if (gmadr_bytes == 8)
entry_obj->bb_start_cmd_va[2] = 0;
}
+ return 0;
}
static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
return 0;
}
-static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
struct i915_vma *vma;
unsigned char *per_ctx_va =
wa_ctx->indirect_ctx.size;
if (wa_ctx->indirect_ctx.size == 0)
- return;
+ return 0;
vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
0, CACHELINE_BYTES, 0);
if (IS_ERR(vma)) {
- return;
+ return PTR_ERR(vma);
}
/* FIXME: we are not tracking our pinned VMA leaving it
memset(per_ctx_va, 0, CACHELINE_BYTES);
update_wa_ctx_2_shadow_ctx(wa_ctx);
-}
-
-static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
-{
- struct intel_vgpu *vgpu = workload->vgpu;
- struct execlist_ctx_descriptor_format ctx[2];
- int ring_id = workload->ring_id;
-
- intel_vgpu_pin_mm(workload->shadow_mm);
- intel_vgpu_sync_oos_pages(workload->vgpu);
- intel_vgpu_flush_post_shadow(workload->vgpu);
- prepare_shadow_batch_buffer(workload);
- prepare_shadow_wa_ctx(&workload->wa_ctx);
- if (!workload->emulate_schedule_in)
- return 0;
-
- ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
- ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
-
- return emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx);
+ return 0;
}
static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
}
}
+static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu *vgpu = workload->vgpu;
+ struct execlist_ctx_descriptor_format ctx[2];
+ int ring_id = workload->ring_id;
+ int ret;
+
+ ret = intel_vgpu_pin_mm(workload->shadow_mm);
+ if (ret) {
+ gvt_vgpu_err("fail to vgpu pin mm\n");
+ goto out;
+ }
+
+ ret = intel_vgpu_sync_oos_pages(workload->vgpu);
+ if (ret) {
+ gvt_vgpu_err("fail to vgpu sync oos pages\n");
+ goto err_unpin_mm;
+ }
+
+ ret = intel_vgpu_flush_post_shadow(workload->vgpu);
+ if (ret) {
+ gvt_vgpu_err("fail to flush post shadow\n");
+ goto err_unpin_mm;
+ }
+
+ ret = prepare_shadow_batch_buffer(workload);
+ if (ret) {
+ gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
+ goto err_unpin_mm;
+ }
+
+ ret = prepare_shadow_wa_ctx(&workload->wa_ctx);
+ if (ret) {
+ gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n");
+ goto err_shadow_batch;
+ }
+
+ if (!workload->emulate_schedule_in)
+ return 0;
+
+ ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
+ ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
+
+ ret = emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx);
+ if (!ret)
+ goto out;
+ else
+ gvt_vgpu_err("fail to emulate execlist schedule in\n");
+
+ release_shadow_wa_ctx(&workload->wa_ctx);
+err_shadow_batch:
+ release_shadow_batch_buffer(workload);
+err_unpin_mm:
+ intel_vgpu_unpin_mm(workload->shadow_mm);
+out:
+ return ret;
+}
+
static int complete_execlist_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
gvt_dbg_el("complete workload %p status %d\n", workload,
workload->status);
- release_shadow_batch_buffer(workload);
- release_shadow_wa_ctx(&workload->wa_ctx);
+ if (!workload->status) {
+ release_shadow_batch_buffer(workload);
+ release_shadow_wa_ctx(&workload->wa_ctx);
+ }
if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
/* if workload->status is not successful means HW GPU