VK_EXT_extended_dynamic_state2 DONE (anv, lvp, radv, tu, vn)
VK_EXT_inline_uniform_block DONE (anv, lvp, radv, v3dv, vn)
VK_EXT_pipeline_creation_cache_control DONE (anv, lvp, radv, tu, v3dv, vn)
- VK_EXT_pipeline_creation_feedback DONE (anv, lvp, radv, tu, v3dv)
+ VK_EXT_pipeline_creation_feedback DONE (anv, lvp, radv, tu, v3dv, vn)
VK_EXT_private_data DONE (anv, lvp, pvr, radv, tu, v3dv, vn)
VK_EXT_image_robustness DONE (anv, lvp, radv, tu, vn)
VK_EXT_shader_demote_to_helper_invocation DONE (anv, lvp, radv, tu, vn)
.EXT_image_robustness = true,
.EXT_inline_uniform_block = true,
.EXT_pipeline_creation_cache_control = true,
+ /* TODO(VK_EXT_pipeline_creation_feedback): The native implementation
+ * invalidates all feedback. Teach the venus protocol to receive valid
+ * feedback from renderer.
+ *
+ * Even though we implement this natively, we still require host driver
+ * support to avoid invalid usage in the renderer, because we (the guest
+ * driver) do not scrub the extension bits from the
+ * VkGraphicsPipelineCreateInfo pNext chain. The host driver still writes
+ * feedback into VkPipelineCreationFeedback, which is harmless, but the
+ * renderer does not send the returned feedback to us due to protocol
+ * deficiencies.
+ */
+ .EXT_pipeline_creation_feedback = true,
.EXT_shader_demote_to_helper_invocation = true,
.EXT_subgroup_size_control = true,
.EXT_texel_buffer_alignment = true,
return fixes->create_infos;
}
+/**
+ * We invalidate each VkPipelineCreationFeedback. This is a legal but useless
+ * implementation.
+ *
+ * We invalidate because the venus protocol (as of 2022-08-25) does not know
+ * that the VkPipelineCreationFeedback structs in the
+ * VkGraphicsPipelineCreateInfo pNext are output parameters. Before
+ * VK_EXT_pipeline_creation_feedback, the pNext chain was input-only.
+ */
+static void
+vn_invalidate_pipeline_creation_feedback(const VkBaseInStructure *chain)
+{
+ const VkPipelineCreationFeedbackCreateInfo *feedback_info =
+ vk_find_struct_const(chain, PIPELINE_CREATION_FEEDBACK_CREATE_INFO);
+
+ if (!feedback_info)
+ return;
+
+ feedback_info->pPipelineCreationFeedback->flags &=
+ ~VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT;
+
+ for (uint32_t i = 0; i < feedback_info->pipelineStageCreationFeedbackCount;
+ i++) {
+ feedback_info->pPipelineStageCreationFeedbacks[i].flags &=
+ ~VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT;
+ }
+}
+
VkResult
vn_CreateGraphicsPipelines(VkDevice device,
VkPipelineCache pipelineCache,
}
for (uint32_t i = 0; i < createInfoCount; i++) {
- if ((pCreateInfos[i].flags & VN_PIPELINE_CREATE_SYNC_MASK)) {
+ if ((pCreateInfos[i].flags & VN_PIPELINE_CREATE_SYNC_MASK))
want_sync = true;
- break;
- }
+
+ vn_invalidate_pipeline_creation_feedback(
+ (const VkBaseInStructure *)pCreateInfos[i].pNext);
}
if (want_sync) {
return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
for (uint32_t i = 0; i < createInfoCount; i++) {
- if ((pCreateInfos[i].flags & VN_PIPELINE_CREATE_SYNC_MASK)) {
+ if ((pCreateInfos[i].flags & VN_PIPELINE_CREATE_SYNC_MASK))
want_sync = true;
- break;
- }
+
+ vn_invalidate_pipeline_creation_feedback(
+ (const VkBaseInStructure *)pCreateInfos[i].pNext);
}
if (want_sync) {