[INTEL_ENGINE_CLASS_RENDER] = -1,
[INTEL_ENGINE_CLASS_COPY] = -1,
[INTEL_ENGINE_CLASS_COMPUTE] = -1,
+ [INTEL_ENGINE_CLASS_VIDEO] = -1,
};
int engine_counts[] = {
intel_engines_count(info, INTEL_ENGINE_CLASS_COPY),
[INTEL_ENGINE_CLASS_COMPUTE] =
intel_engines_count(info, INTEL_ENGINE_CLASS_COMPUTE),
+ [INTEL_ENGINE_CLASS_VIDEO] =
+ intel_engines_count(info, INTEL_ENGINE_CLASS_VIDEO),
};
/* For each queue, we look for the next instance that matches the class we
enum intel_engine_class engine_class = engine_classes[i];
assert(engine_class == INTEL_ENGINE_CLASS_RENDER ||
engine_class == INTEL_ENGINE_CLASS_COPY ||
- engine_class == INTEL_ENGINE_CLASS_COMPUTE);
+ engine_class == INTEL_ENGINE_CLASS_COMPUTE ||
+ engine_class == INTEL_ENGINE_CLASS_VIDEO);
if (engine_counts[engine_class] <= 0)
return false;
* * "gc" is for graphics queues with compute support
* * "g" is for graphics queues with no compute support
* * "c" is for compute queues with no graphics support
+ * * "v" is for video queues with no graphics support
*
* For example, ANV_QUEUE_OVERRIDE=gc=2,c=1 would override the number of
* advertised queues to be 2 queues with graphics+compute support, and 1 queue
* number of graphics+compute queues to be 0.
*/
static void
-anv_override_engine_counts(int *gc_count, int *g_count, int *c_count)
+anv_override_engine_counts(int *gc_count, int *g_count, int *c_count, int *v_count)
{
int gc_override = -1;
int g_override = -1;
int c_override = -1;
+ int v_override = -1;
char *env = getenv("ANV_QUEUE_OVERRIDE");
if (env == NULL)
g_override = strtol(next + 2, NULL, 0);
} else if (strncmp(next, "c=", 2) == 0) {
c_override = strtol(next + 2, NULL, 0);
+ } else if (strncmp(next, "v=", 2) == 0) {
+ v_override = strtol(next + 2, NULL, 0);
} else {
mesa_logw("Ignoring unsupported ANV_QUEUE_OVERRIDE token: %s", next);
}
"Vulkan specification");
if (c_override >= 0)
*c_count = c_override;
+ if (v_override >= 0)
+ *v_count = v_override;
}
static void
int gc_count =
intel_engines_count(pdevice->engine_info,
INTEL_ENGINE_CLASS_RENDER);
+ int v_count =
+ intel_engines_count(pdevice->engine_info, I915_ENGINE_CLASS_VIDEO);
int g_count = 0;
int c_count = 0;
if (debug_get_bool_option("INTEL_COMPUTE_CLASS", false))
enum intel_engine_class compute_class =
c_count < 1 ? INTEL_ENGINE_CLASS_RENDER : INTEL_ENGINE_CLASS_COMPUTE;
- anv_override_engine_counts(&gc_count, &g_count, &c_count);
+ anv_override_engine_counts(&gc_count, &g_count, &c_count, &v_count);
if (gc_count > 0) {
pdevice->queue.families[family_count++] = (struct anv_queue_family) {
.engine_class = compute_class,
};
}
+ if (v_count > 0) {
+ pdevice->queue.families[family_count++] = (struct anv_queue_family) {
+ .queueFlags = VK_QUEUE_VIDEO_DECODE_BIT_KHR,
+ .queueCount = v_count,
+ .engine_class = I915_ENGINE_CLASS_VIDEO,
+ };
+ }
/* Increase count below when other families are added as a reminder to
* increase the ANV_MAX_QUEUE_FAMILIES value.
*/
return (queue_family->queueFlags & VK_QUEUE_GRAPHICS_BIT) != 0;
}
+static bool
+is_video_queue_cmd_buffer(const struct anv_cmd_buffer *cmd_buffer)
+{
+ struct anv_queue_family *queue_family = cmd_buffer->queue_family;
+ return (queue_family->queueFlags & VK_QUEUE_VIDEO_DECODE_BIT_KHR) != 0;
+}
+
ALWAYS_INLINE static void
genX(emit_dummy_post_sync_op)(struct anv_cmd_buffer *cmd_buffer,
uint32_t vertex_count)
if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY)
cmd_buffer->usage_flags &= ~VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT;
+ if (is_video_queue_cmd_buffer(cmd_buffer))
+ return VK_SUCCESS;
+
trace_intel_begin_cmd_buffer(&cmd_buffer->trace);
genX(cmd_buffer_emit_state_base_address)(cmd_buffer);
if (anv_batch_has_error(&cmd_buffer->batch))
return cmd_buffer->batch.status;
+ if (is_video_queue_cmd_buffer(cmd_buffer)) {
+ anv_cmd_buffer_end_batch_buffer(cmd_buffer);
+ return VK_SUCCESS;
+ }
+
anv_measure_endcommandbuffer(cmd_buffer);
#if GFX_HAS_GENERATED_CMDS
VkAccessFlags2 src_flags = 0;
VkAccessFlags2 dst_flags = 0;
+ if (is_video_queue_cmd_buffer(cmd_buffer))
+ return;
+
for (uint32_t i = 0; i < dep_info->memoryBarrierCount; i++) {
src_flags |= dep_info->pMemoryBarriers[i].srcAccessMask;
dst_flags |= dep_info->pMemoryBarriers[i].dstAccessMask;
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_event, event, _event);
+ if (is_video_queue_cmd_buffer(cmd_buffer)) {
+ anv_batch_emit(&cmd_buffer->batch, GENX(MI_FLUSH_DW), flush) {
+ flush.PostSyncOperation = WriteImmediateData;
+ flush.Address = anv_state_pool_state_address(
+ &cmd_buffer->device->dynamic_state_pool,
+ event->state);
+ flush.ImmediateData = VK_EVENT_SET;
+ }
+ return;
+ }
+
VkPipelineStageFlags2 src_stages = 0;
for (uint32_t i = 0; i < pDependencyInfo->memoryBarrierCount; i++)
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_event, event, _event);
+ if (is_video_queue_cmd_buffer(cmd_buffer)) {
+ anv_batch_emit(&cmd_buffer->batch, GENX(MI_FLUSH_DW), flush) {
+ flush.PostSyncOperation = WriteImmediateData;
+ flush.Address = anv_state_pool_state_address(
+ &cmd_buffer->device->dynamic_state_pool,
+ event->state);
+ flush.ImmediateData = VK_EVENT_RESET;
+ }
+ return;
+ }
+
cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_POST_SYNC_BIT;
genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
case INTEL_ENGINE_CLASS_COMPUTE:
res = init_compute_queue_state(queue);
break;
+ case I915_ENGINE_CLASS_VIDEO:
+ res = VK_SUCCESS;
+ break;
default:
res = vk_error(device, VK_ERROR_INITIALIZATION_FAILED);
break;