struct tu_cs *cs,
enum tu_cmd_flush_bits flushes)
{
+ if (unlikely(cmd_buffer->device->physical_device->instance->debug_flags & TU_DEBUG_FLUSHALL))
+ flushes |= TU_CMD_FLAG_ALL_FLUSH | TU_CMD_FLAG_GPU_INVALIDATE;
+
+ if (unlikely(cmd_buffer->device->physical_device->instance->debug_flags & TU_DEBUG_SYNCDRAW))
+ flushes |= TU_CMD_FLAG_WAIT_MEM_WRITES |
+ TU_CMD_FLAG_WAIT_FOR_IDLE |
+ TU_CMD_FLAG_WAIT_FOR_ME;
+
/* Experiments show that invalidating CCU while it still has data in it
* doesn't work, so make sure to always flush before invalidating in case
* any data remains that hasn't yet been made available through a barrier.
tu_emit_cache_flush_renderpass(struct tu_cmd_buffer *cmd_buffer,
struct tu_cs *cs)
{
- if (!cmd_buffer->state.renderpass_cache.flush_bits)
+ if (!cmd_buffer->state.renderpass_cache.flush_bits &&
+ likely(!cmd_buffer->device->physical_device->instance->debug_flags))
return;
tu6_emit_flushes(cmd_buffer, cs, cmd_buffer->state.renderpass_cache.flush_bits);
cmd_buffer->state.renderpass_cache.flush_bits = 0;
{ "nomultipos", TU_DEBUG_NOMULTIPOS },
{ "nolrz", TU_DEBUG_NOLRZ },
{ "perfc", TU_DEBUG_PERFC },
+ { "flushall", TU_DEBUG_FLUSHALL },
+ { "syncdraw", TU_DEBUG_SYNCDRAW },
{ NULL, 0 }
};
TU_DEBUG_NOMULTIPOS = 1 << 7,
TU_DEBUG_NOLRZ = 1 << 8,
TU_DEBUG_PERFC = 1 << 9,
+ TU_DEBUG_FLUSHALL = 1 << 10,
+ TU_DEBUG_SYNCDRAW = 1 << 11,
};
struct tu_instance