This should be useful for stress-testing dynamic rendering.
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/17378>
fraction = 10
[deqp.env]
IR3_SHADER_DEBUG = "spillall"
+
+# dynamic rendering testing using emulated renderpass
+[[deqp]]
+deqp = "/deqp/external/vulkancts/modules/vulkan/deqp-vk"
+caselists = ["/deqp/mustpass/vk-master.txt"]
+skips = ["install/freedreno-a630-premerge-skips.txt"]
+include = ["dEQP-VK.renderpass2.*"]
+prefix = "dynamic-"
+fraction = 10
+[deqp.env]
+TU_DEBUG = "dynamic"
#include "adreno_common.xml.h"
#include "vk_format.h"
+#include "vk_render_pass.h"
#include "vk_util.h"
#include "tu_cs.h"
}
if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
- if (pBeginInfo->pInheritanceInfo->renderPass) {
- cmd_buffer->state.pass = tu_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass);
- cmd_buffer->state.subpass =
- &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass];
- } else {
- const VkCommandBufferInheritanceRenderingInfo *rendering_info =
- vk_find_struct_const(pBeginInfo->pInheritanceInfo->pNext,
- COMMAND_BUFFER_INHERITANCE_RENDERING_INFO);
+ const VkCommandBufferInheritanceRenderingInfo *rendering_info =
+ vk_find_struct_const(pBeginInfo->pInheritanceInfo->pNext,
+ COMMAND_BUFFER_INHERITANCE_RENDERING_INFO);
+
+ if (unlikely(cmd_buffer->device->instance->debug_flags & TU_DEBUG_DYNAMIC)) {
+ rendering_info =
+ vk_get_command_buffer_inheritance_rendering_info(cmd_buffer->vk.level,
+ pBeginInfo);
+ }
+
+ if (rendering_info) {
tu_setup_dynamic_inheritance(cmd_buffer, rendering_info);
cmd_buffer->state.pass = &cmd_buffer->dynamic_pass;
cmd_buffer->state.subpass = &cmd_buffer->dynamic_subpass;
+ } else {
+ cmd_buffer->state.pass = tu_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass);
+ cmd_buffer->state.subpass =
+ &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass];
}
tu_lrz_begin_secondary_cmdbuf(cmd_buffer);
} else {
const VkSubpassBeginInfo *pSubpassBeginInfo)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
+
+ if (unlikely(cmd->device->instance->debug_flags & TU_DEBUG_DYNAMIC)) {
+ vk_common_CmdBeginRenderPass2(commandBuffer, pRenderPassBegin,
+ pSubpassBeginInfo);
+ return;
+ }
+
TU_FROM_HANDLE(tu_render_pass, pass, pRenderPassBegin->renderPass);
TU_FROM_HANDLE(tu_framebuffer, fb, pRenderPassBegin->framebuffer);
}
}
+ if (unlikely(cmd->device->instance->debug_flags & TU_DEBUG_DYNAMIC)) {
+ const VkRenderingSelfDependencyInfoMESA *self_dependency =
+ vk_find_struct_const(pRenderingInfo->pNext, RENDERING_SELF_DEPENDENCY_INFO_MESA);
+ if (self_dependency &&
+ (self_dependency->colorSelfDependencies ||
+ self_dependency->depthSelfDependency ||
+ self_dependency->stencilSelfDependency)) {
+ /* Mesa's renderpass emulation requires us to use normal attachments
+ * for input attachments, and currently doesn't try to keep track of
+ * which color/depth attachment an input attachment corresponds to.
+ * So when there's a self-dependency, we have to use sysmem.
+ */
+ cmd->state.rp.disable_gmem = true;
+ }
+ }
+
cmd->state.renderpass_cache.pending_flush_bits =
cmd->state.cache.pending_flush_bits;
cmd->state.renderpass_cache.flush_bits = 0;
const VkSubpassEndInfo *pSubpassEndInfo)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
+
+ if (unlikely(cmd->device->instance->debug_flags & TU_DEBUG_DYNAMIC)) {
+ vk_common_CmdNextSubpass2(commandBuffer, pSubpassBeginInfo,
+ pSubpassEndInfo);
+ return;
+ }
+
const struct tu_render_pass *pass = cmd->state.pass;
struct tu_cs *cs = &cmd->draw_cs;
const struct tu_subpass *last_subpass = cmd->state.subpass;
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
+ if (unlikely(cmd_buffer->device->instance->debug_flags & TU_DEBUG_DYNAMIC)) {
+ vk_common_CmdEndRenderPass2(commandBuffer, pSubpassEndInfo);
+ return;
+ }
+
cmd_buffer->trace_renderpass_end = u_trace_end_iterator(&cmd_buffer->trace);
tu_cs_end(&cmd_buffer->draw_cs);
{
switch (type) {
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
+ if (unlikely(dev->instance->debug_flags & TU_DEBUG_DYNAMIC))
+ return A6XX_TEX_CONST_DWORDS * 4;
+
/* Input attachment doesn't use descriptor sets at all */
return 0;
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
break;
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
/* nothing in descriptor set - framebuffer state is used instead */
+ if (unlikely(device->instance->debug_flags & TU_DEBUG_DYNAMIC))
+ write_image_descriptor(ptr, writeset->descriptorType, writeset->pImageInfo + j);
break;
default:
unreachable("unimplemented descriptor type");
break;
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
/* nothing in descriptor set - framebuffer state is used instead */
+ if (unlikely(device->instance->debug_flags & TU_DEBUG_DYNAMIC))
+ write_image_descriptor(ptr, templ->entry[i].descriptor_type, src);
break;
default:
unreachable("unimplemented descriptor type");
{ "rast_order", TU_DEBUG_RAST_ORDER },
{ "unaligned_store", TU_DEBUG_UNALIGNED_STORE },
{ "log_skip_gmem_ops", TU_DEBUG_LOG_SKIP_GMEM_OPS },
+ { "dynamic", TU_DEBUG_DYNAMIC },
{ NULL, 0 }
};
VkFramebuffer *pFramebuffer)
{
TU_FROM_HANDLE(tu_device, device, _device);
+
+ if (unlikely(device->instance->debug_flags & TU_DEBUG_DYNAMIC))
+ return vk_common_CreateFramebuffer(_device, pCreateInfo, pAllocator,
+ pFramebuffer);
+
TU_FROM_HANDLE(tu_render_pass, pass, pCreateInfo->renderPass);
struct tu_framebuffer *framebuffer;
const VkAllocationCallbacks *pAllocator)
{
TU_FROM_HANDLE(tu_device, device, _device);
+
+ if (unlikely(device->instance->debug_flags & TU_DEBUG_DYNAMIC)) {
+ vk_common_DestroyFramebuffer(_device, _fb, pAllocator);
+ return;
+ }
+
TU_FROM_HANDLE(tu_framebuffer, fb, _fb);
if (!fb)
VkRenderPass *pRenderPass)
{
TU_FROM_HANDLE(tu_device, device, _device);
+
+ if (unlikely(device->instance->debug_flags & TU_DEBUG_DYNAMIC))
+ return vk_common_CreateRenderPass2(_device, pCreateInfo, pAllocator,
+ pRenderPass);
+
struct tu_render_pass *pass;
size_t size;
size_t attachments_offset;
const VkAllocationCallbacks *pAllocator)
{
TU_FROM_HANDLE(tu_device, device, _device);
+
+ if (unlikely(device->instance->debug_flags & TU_DEBUG_DYNAMIC)) {
+ vk_common_DestroyRenderPass(_device, _pass, pAllocator);
+ return;
+ }
+
TU_FROM_HANDLE(tu_render_pass, pass, _pass);
if (!_pass)
#include "util/u_atomic.h"
#include "vk_format.h"
#include "vk_pipeline.h"
+#include "vk_render_pass.h"
#include "vk_util.h"
#include "tu_cs.h"
const VkPipelineRenderingCreateInfo *rendering_info =
vk_find_struct_const(create_info->pNext, PIPELINE_RENDERING_CREATE_INFO);
+ if (unlikely(dev->instance->debug_flags & TU_DEBUG_DYNAMIC) && !rendering_info)
+ rendering_info = vk_get_pipeline_rendering_create_info(create_info);
+
if (rendering_info) {
builder->subpass_raster_order_attachment_access = false;
builder->subpass_feedback_loop_ds = false;
*/
builder->emit_msaa_state = !builder->rasterizer_discard;
+ const VkRenderingSelfDependencyInfoMESA *self_dependency =
+ vk_find_struct_const(rendering_info->pNext, RENDERING_SELF_DEPENDENCY_INFO_MESA);
+
+ if (self_dependency) {
+ builder->subpass_feedback_loop_ds =
+ self_dependency->depthSelfDependency ||
+ self_dependency->stencilSelfDependency;
+ builder->subpass_feedback_loop_color =
+ self_dependency->colorSelfDependencies;
+ }
+
if (!builder->rasterizer_discard) {
builder->depth_attachment_format =
rendering_info->depthAttachmentFormat == VK_FORMAT_UNDEFINED ?
#include <vulkan/vulkan.h>
#include "tu_entrypoints.h"
+#include "vulkan/runtime/vk_common_entrypoints.h"
#include "vk_format.h"
#include "vk_image.h"
TU_DEBUG_LOG_SKIP_GMEM_OPS = 1 << 17,
TU_DEBUG_PERF = 1 << 18,
TU_DEBUG_NOLRZFC = 1 << 19,
+ TU_DEBUG_DYNAMIC = 1 << 20,
};
struct tu_instance
}
static nir_ssa_def *
-build_bindless(nir_builder *b, nir_deref_instr *deref, bool is_sampler,
+build_bindless(struct tu_device *dev, nir_builder *b,
+ nir_deref_instr *deref, bool is_sampler,
struct tu_shader *shader,
const struct tu_pipeline_layout *layout)
{
&layout->set[set].layout->binding[binding];
/* input attachments use non bindless workaround */
- if (bind_layout->type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT) {
+ if (bind_layout->type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT &&
+ likely(!(dev->instance->debug_flags & TU_DEBUG_DYNAMIC))) {
const struct glsl_type *glsl_type = glsl_without_array(var->type);
uint32_t idx = var->data.index * 2;
}
static void
-lower_image_deref(nir_builder *b,
+lower_image_deref(struct tu_device *dev, nir_builder *b,
nir_intrinsic_instr *instr, struct tu_shader *shader,
const struct tu_pipeline_layout *layout)
{
nir_deref_instr *deref = nir_src_as_deref(instr->src[0]);
- nir_ssa_def *bindless = build_bindless(b, deref, false, shader, layout);
+ nir_ssa_def *bindless = build_bindless(dev, b, deref, false, shader, layout);
nir_rewrite_image_intrinsic(instr, bindless, true);
}
case nir_intrinsic_image_deref_atomic_comp_swap:
case nir_intrinsic_image_deref_size:
case nir_intrinsic_image_deref_samples:
- lower_image_deref(b, instr, shader, layout);
+ lower_image_deref(dev, b, instr, shader, layout);
return true;
default:
}
static bool
-lower_tex(nir_builder *b, nir_tex_instr *tex,
+lower_tex(nir_builder *b, nir_tex_instr *tex, struct tu_device *dev,
struct tu_shader *shader, const struct tu_pipeline_layout *layout)
{
lower_tex_ycbcr(layout, b, tex);
int sampler_src_idx = nir_tex_instr_src_index(tex, nir_tex_src_sampler_deref);
if (sampler_src_idx >= 0) {
nir_deref_instr *deref = nir_src_as_deref(tex->src[sampler_src_idx].src);
- nir_ssa_def *bindless = build_bindless(b, deref, true, shader, layout);
+ nir_ssa_def *bindless = build_bindless(dev, b, deref, true, shader, layout);
nir_instr_rewrite_src(&tex->instr, &tex->src[sampler_src_idx].src,
nir_src_for_ssa(bindless));
tex->src[sampler_src_idx].src_type = nir_tex_src_sampler_handle;
int tex_src_idx = nir_tex_instr_src_index(tex, nir_tex_src_texture_deref);
if (tex_src_idx >= 0) {
nir_deref_instr *deref = nir_src_as_deref(tex->src[tex_src_idx].src);
- nir_ssa_def *bindless = build_bindless(b, deref, false, shader, layout);
+ nir_ssa_def *bindless = build_bindless(dev, b, deref, false, shader, layout);
nir_instr_rewrite_src(&tex->instr, &tex->src[tex_src_idx].src,
nir_src_for_ssa(bindless));
tex->src[tex_src_idx].src_type = nir_tex_src_texture_handle;
b->cursor = nir_before_instr(instr);
switch (instr->type) {
case nir_instr_type_tex:
- return lower_tex(b, nir_instr_as_tex(instr), params->shader, params->layout);
+ return lower_tex(b, nir_instr_as_tex(instr), params->dev, params->shader, params->layout);
case nir_instr_type_intrinsic:
return lower_intrinsic(b, nir_instr_as_intrinsic(instr), params->dev, params->shader, params->layout);
default: