{
VkResult result;
- bool u_trace_enabled = u_trace_context_actively_tracing(&queue->device->trace_context);
+ bool u_trace_enabled = u_trace_should_process(&queue->device->trace_context);
bool has_trace_points = false;
struct vk_command_buffer **vk_cmd_buffers = vk_submit->command_buffers;
iris_bo_wait_rendering(batch->bo); /* if execbuf failed; this is a nop */
}
- if (u_trace_context_actively_tracing(&ice->ds.trace_context))
+ if (u_trace_should_process(&ice->ds.trace_context))
iris_utrace_flush(batch, submission_id);
/* Start a new batch buffer. */
intel_ds_end_submit(struct intel_ds_queue *queue,
uint64_t start_ts)
{
- if (!u_trace_context_actively_tracing(&queue->device->trace_context)) {
+ if (!u_trace_should_process(&queue->device->trace_context)) {
queue->device->sync_gpu_ts = 0;
queue->device->next_clock_sync_ns = 0;
return;
struct anv_cmd_buffer **cmd_buffers,
uint32_t *utrace_copies)
{
- if (!u_trace_context_actively_tracing(&device->ds.trace_context))
+ if (!u_trace_should_process(&device->ds.trace_context))
return 0;
uint32_t utraces = 0;
struct anv_cmd_buffer **cmd_buffers,
uint32_t *utrace_copies)
{
- if (!u_trace_context_actively_tracing(&device->ds.trace_context))
+ if (!u_trace_should_process(&device->ds.trace_context))
return 0;
uint32_t utraces = 0;
return p_atomic_read_relaxed(&utctx->enabled_traces) != 0;
}
+/**
+ * Return whether chunks should be processed or not.
+ */
static ALWAYS_INLINE bool
-u_trace_context_actively_tracing(struct u_trace_context *utctx) {
+u_trace_should_process(struct u_trace_context *utctx) {
return p_atomic_read_relaxed(&utctx->enabled_traces) & U_TRACE_TYPE_REQUIRE_PROCESSING;
}