return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
VkResult result =
- vk_command_buffer_init(&cmd_buffer->vk, &device->vk);
+ vk_command_buffer_init(&cmd_buffer->vk, &device->vk, level);
if (result != VK_SUCCESS) {
vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
return result;
cmd_buffer->device = device;
cmd_buffer->pool = pool;
- cmd_buffer->level = level;
list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
cmd_buffer->queue_family_index = pool->queue_family_index;
uint64_t va;
va = radv_buffer_get_va(device->trace_bo);
- if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)
+ if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)
va += 4;
++cmd_buffer->state.trace_id;
list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
result = radv_reset_cmd_buffer(cmd_buffer);
- cmd_buffer->level = pAllocateInfo->level;
vk_command_buffer_finish(&cmd_buffer->vk);
VkResult init_result =
- vk_command_buffer_init(&cmd_buffer->vk, &device->vk);
+ vk_command_buffer_init(&cmd_buffer->vk, &device->vk, pAllocateInfo->level);
if (init_result != VK_SUCCESS)
result = init_result;
cmd_buffer->state.mesh_shading = false;
cmd_buffer->usage_flags = pBeginInfo->flags;
- if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY &&
+ if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY &&
(pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
struct radv_subpass *subpass = NULL;
for (uint32_t j = 0; j < submission->command_buffer_count; j++) {
struct radv_cmd_buffer *cmd_buffer =
(struct radv_cmd_buffer *)submission->command_buffers[j];
- assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
+ assert(cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
cs_array[j] = cmd_buffer->cs;
if ((cmd_buffer->usage_flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT))
struct list_head pool_link;
VkCommandBufferUsageFlags usage_flags;
- VkCommandBufferLevel level;
enum radv_cmd_buffer_status status;
struct radeon_cmdbuf *cs;
struct radv_cmd_state state;
static void
cmd_buffer_init(struct v3dv_cmd_buffer *cmd_buffer,
struct v3dv_device *device,
- struct v3dv_cmd_pool *pool,
- VkCommandBufferLevel level)
+ struct v3dv_cmd_pool *pool)
{
/* Do not reset the base object! If we are calling this from a command
* buffer reset that would reset the loader's dispatch table for the
cmd_buffer->device = device;
cmd_buffer->pool = pool;
- cmd_buffer->level = level;
list_inithead(&cmd_buffer->private_objs);
list_inithead(&cmd_buffer->jobs);
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
VkResult result;
- result = vk_command_buffer_init(&cmd_buffer->vk, &device->vk);
+ result = vk_command_buffer_init(&cmd_buffer->vk, &device->vk, level);
if (result != VK_SUCCESS) {
vk_free2(&device->vk.alloc, &pool->alloc, cmd_buffer);
return result;
}
- cmd_buffer_init(cmd_buffer, device, pool, level);
+ cmd_buffer_init(cmd_buffer, device, pool);
*pCommandBuffer = v3dv_cmd_buffer_to_handle(cmd_buffer);
const struct v3dv_physical_device *physical_device =
&cmd_buffer->device->instance->physicalDevice;
- if (cmd_buffer->level != VK_COMMAND_BUFFER_LEVEL_PRIMARY)
+ if (cmd_buffer->vk.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY)
return false;
if (!cmd_buffer->state.job)
* a transfer command. The only exception are secondary command buffers
* inside a render pass.
*/
- assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY ||
+ assert(cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY ||
v3dv_cl_offset(&job->bcl) > 0);
/* When we merge multiple subpasses into the same job we must only emit one
* that case we want to defer this until we finish recording the primary
* job into which we execute the secondary.
*/
- if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY ||
+ if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY ||
!cmd_buffer->state.pass) {
cmd_buffer_add_cpu_jobs_for_pending_state(cmd_buffer);
}
cmd_buffer->state.dirty_descriptor_stages = ~0;
/* Honor inheritance of occlussion queries in secondaries if requested */
- if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY &&
+ if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY &&
cmd_buffer->state.inheritance.occlusion_query_enable) {
cmd_buffer->state.dirty &= ~V3DV_CMD_DIRTY_OCCLUSION_QUERY;
}
if (cmd_buffer->status != V3DV_CMD_BUFFER_STATUS_INITIALIZED) {
struct v3dv_device *device = cmd_buffer->device;
struct v3dv_cmd_pool *pool = cmd_buffer->pool;
- VkCommandBufferLevel level = cmd_buffer->level;
/* cmd_buffer_init below will re-add the command buffer to the pool
* so remove it here so we don't end up adding it again.
if (cmd_buffer->status != V3DV_CMD_BUFFER_STATUS_NEW)
cmd_buffer_free_resources(cmd_buffer);
- cmd_buffer_init(cmd_buffer, device, pool, level);
+ cmd_buffer_init(cmd_buffer, device, pool);
}
assert(cmd_buffer->status == V3DV_CMD_BUFFER_STATUS_INITIALIZED);
struct v3dv_cmd_buffer *cmd_buffer,
const VkCommandBufferInheritanceInfo *inheritance_info)
{
- assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
+ assert(cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
assert(cmd_buffer->usage_flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT);
assert(inheritance_info);
cmd_buffer->usage_flags = pBeginInfo->flags;
- if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
+ if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
result =
cmd_buffer_begin_render_pass_secondary(cmd_buffer,
} else if (framebuffer) {
state->attachments[i].image_view = framebuffer->attachments[i];
} else {
- assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
+ assert(cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
state->attachments[i].image_view = NULL;
}
}
static void
cmd_buffer_emit_subpass_clears(struct v3dv_cmd_buffer *cmd_buffer)
{
- assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
+ assert(cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
assert(cmd_buffer->state.pass);
assert(cmd_buffer->state.subpass_idx < cmd_buffer->state.pass->subpass_count);
* attachment load clears, but we don't have any instances of that right
* now.
*/
- if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY)
+ if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY)
cmd_buffer_emit_subpass_clears(cmd_buffer);
return job;
assert(subpass_idx < cmd_buffer->state.pass->subpass_count);
struct v3dv_job *job;
- if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
+ if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
job = cmd_buffer_subpass_create_job(cmd_buffer, subpass_idx,
V3DV_JOB_TYPE_GPU_CL);
} else {
- assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
+ assert(cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
job = cmd_buffer_subpass_create_job(cmd_buffer, subpass_idx,
V3DV_JOB_TYPE_GPU_CL_SECONDARY);
}
* inside a render pass.
*/
if (cmd_buffer->state.job) {
- assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY &&
+ assert(cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY &&
cmd_buffer->state.pass);
v3dv_cmd_buffer_finish_job(cmd_buffer);
}
* draw calls in them, and then using that info to decide if we need to
* restart the primary job into which they are being recorded.
*/
- if (cmd_buffer->level != VK_COMMAND_BUFFER_LEVEL_PRIMARY)
+ if (cmd_buffer->vk.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY)
return;
/* Drop the current job and restart it with MSAA enabled */
struct list_head list_link;
VkCommandBufferUsageFlags usage_flags;
- VkCommandBufferLevel level;
enum v3dv_cmd_buffer_status status;
} else if (cmd_buffer->state.framebuffer) {
num_layers = cmd_buffer->state.framebuffer->layers;
} else {
- assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
+ assert(cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
num_layers = 2048;
#if DEBUG
fprintf(stderr, "Skipping gl_LayerID shader sanity check for "
* buffer.
*/
if (!framebuffer) {
- assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
+ assert(cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
return;
}
struct v3dv_framebuffer *fb = state->framebuffer;
if (!fb) {
- assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
+ assert(cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
perf_debug("Loading depth aspect in a secondary command buffer "
"without framebuffer info disables early-z tests.\n");
job->first_ez_state = V3D_EZ_DISABLED;
if (cmd->state.tessfactor_addr_set)
return;
- assert(cmd->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
+ assert(cmd->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
tu_cs_emit_regs(&cmd->cs, A6XX_PC_TESSFACTOR_ADDR(.qword = cmd->device->tess_bo.iova));
cmd->state.tessfactor_addr_set = true;
if (cmd_buffer == NULL)
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
- VkResult result = vk_command_buffer_init(&cmd_buffer->vk, &device->vk);
+ VkResult result = vk_command_buffer_init(&cmd_buffer->vk, &device->vk, level);
if (result != VK_SUCCESS) {
vk_free2(&device->vk.alloc, NULL, cmd_buffer);
return result;
cmd_buffer->device = device;
cmd_buffer->pool = pool;
- cmd_buffer->level = level;
if (pool) {
list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
result = tu_reset_cmd_buffer(cmd_buffer);
- cmd_buffer->level = pAllocateInfo->level;
vk_command_buffer_finish(&cmd_buffer->vk);
VkResult init_result =
- vk_command_buffer_init(&cmd_buffer->vk, &device->vk);
+ vk_command_buffer_init(&cmd_buffer->vk, &device->vk, pAllocateInfo->level);
if (init_result != VK_SUCCESS)
result = init_result;
tu_cs_begin(&cmd_buffer->draw_epilogue_cs);
/* setup initial configuration into command buffer */
- if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
+ if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
switch (cmd_buffer->queue_family_index) {
case TU_QUEUE_GENERAL:
tu6_init_hw(cmd_buffer, &cmd_buffer->cs);
default:
break;
}
- } else if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
+ } else if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
assert(pBeginInfo->pInheritanceInfo);
vk_foreach_struct(ext, pBeginInfo->pInheritanceInfo) {
/* Set up the tess factor address if this is the first tess pipeline bound
* to the primary cmdbuf.
*/
- if (cmd->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY)
+ if (cmd->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY)
tu6_lazy_emit_tessfactor_addr(cmd);
/* maximum number of patches that can fit in tess factor/param buffers */
struct list_head renderpass_autotune_results;
VkCommandBufferUsageFlags usage_flags;
- VkCommandBufferLevel level;
enum tu_cmd_buffer_status status;
struct tu_cmd_state state;
if (cmd_buffer == NULL)
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
- VkResult result = vk_command_buffer_init(&cmd_buffer->vk, &device->vk);
+ VkResult result = vk_command_buffer_init(&cmd_buffer->vk, &device->vk, level);
if (result != VK_SUCCESS) {
vk_free(&pool->alloc, cmd_buffer);
return result;
list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
result = lvp_reset_cmd_buffer(cmd_buffer);
- cmd_buffer->level = pAllocateInfo->level;
vk_command_buffer_finish(&cmd_buffer->vk);
VkResult init_result =
- vk_command_buffer_init(&cmd_buffer->vk, &device->vk);
+ vk_command_buffer_init(&cmd_buffer->vk, &device->vk,
+ pAllocateInfo->level);
if (init_result != VK_SUCCESS)
result = init_result;
struct lvp_device * device;
- VkCommandBufferLevel level;
enum lvp_cmd_buffer_status status;
struct lvp_cmd_pool * pool;
struct list_head pool_link;
{
struct anv_batch_bo *batch_bo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
- if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
+ if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
/* When we start a batch buffer, we subtract a certain amount of
* padding from the end to ensure that we always have room to emit a
* BATCH_BUFFER_START to chain to the next BO. We need to remove
cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_PRIMARY;
} else {
- assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
+ assert(cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
/* If this is a secondary command buffer, we need to determine the
* mode in which it will be executed with vkExecuteCommands. We
* determine this statically here so that this stays in sync with the
if (cmd_buffer == NULL)
return vk_error(pool, VK_ERROR_OUT_OF_HOST_MEMORY);
- result = vk_command_buffer_init(&cmd_buffer->vk, &device->vk);
+ result = vk_command_buffer_init(&cmd_buffer->vk, &device->vk, level);
if (result != VK_SUCCESS)
goto fail_alloc;
cmd_buffer->device = device;
cmd_buffer->pool = pool;
- cmd_buffer->level = level;
result = anv_cmd_buffer_init_batch_bo_chain(cmd_buffer);
if (result != VK_SUCCESS)
uintptr_t framebuffer = (uintptr_t)cmd_buffer->state.framebuffer;
if (!measure->base.framebuffer &&
- cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)
+ cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)
/* secondary command buffer inherited the framebuffer from the primary */
measure->base.framebuffer = framebuffer;
struct anv_state_stream general_state_stream;
VkCommandBufferUsageFlags usage_flags;
- VkCommandBufferLevel level;
struct anv_query_pool *perf_query_pool;
* secondary command buffer is considered to be entirely inside a render
* pass. If this is a primary command buffer, then this bit is ignored.
*/
- if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY)
+ if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY)
cmd_buffer->usage_flags &= ~VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT;
trace_intel_begin_cmd_buffer(&cmd_buffer->trace, cmd_buffer);
}
#if GFX_VERx10 >= 75
- if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
+ if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
const VkCommandBufferInheritanceConditionalRenderingInfoEXT *conditional_rendering_info =
vk_find_struct_const(pBeginInfo->pInheritanceInfo->pNext, COMMAND_BUFFER_INHERITANCE_CONDITIONAL_RENDERING_INFO_EXT);
emit_isp_disable(cmd_buffer);
- trace_intel_end_cmd_buffer(&cmd_buffer->trace, cmd_buffer, cmd_buffer->level);
+ trace_intel_end_cmd_buffer(&cmd_buffer->trace, cmd_buffer,
+ cmd_buffer->vk.level);
anv_cmd_buffer_end_batch_buffer(cmd_buffer);
{
ANV_FROM_HANDLE(anv_cmd_buffer, primary, commandBuffer);
- assert(primary->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
+ assert(primary->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
if (anv_batch_has_error(&primary->batch))
return;
for (uint32_t i = 0; i < commandBufferCount; i++) {
ANV_FROM_HANDLE(anv_cmd_buffer, secondary, pCmdBuffers[i]);
- assert(secondary->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
+ assert(secondary->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
assert(!anv_batch_has_error(&secondary->batch));
#if GFX_VERx10 >= 75
if (anv_batch_has_error(&cmd_buffer->batch))
return;
- assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
+ assert(cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
uint32_t prev_subpass = anv_get_subpass_id(&cmd_buffer->state);
cmd_buffer_end_subpass(cmd_buffer);
vp->x + vp->width - 1);
/* Do this math using int64_t so overflow gets clamped correctly. */
- if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
+ if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
y_min = clamp_int64((uint64_t) y_min,
cmd_buffer->state.render_area.offset.y, max);
x_min = clamp_int64((uint64_t) x_min,
struct list_head batches;
VkCommandBufferUsageFlags usage_flags;
- VkCommandBufferLevel level;
enum panvk_cmd_buffer_status status;
struct panvk_cmd_state state;
if (!cmdbuf)
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
- VkResult result = vk_command_buffer_init(&cmdbuf->vk, &device->vk);
+ VkResult result = vk_command_buffer_init(&cmdbuf->vk, &device->vk, level);
if (result != VK_SUCCESS) {
vk_free(&device->vk.alloc, cmdbuf);
return result;
}
cmdbuf->device = device;
- cmdbuf->level = level;
cmdbuf->pool = pool;
if (pool) {
list_del(&cmdbuf->pool_link);
list_addtail(&cmdbuf->pool_link, &pool->active_cmd_buffers);
- cmdbuf->level = pAllocateInfo->level;
vk_command_buffer_finish(&cmdbuf->vk);
- result = vk_command_buffer_init(&cmdbuf->vk, &device->vk);
+ result = vk_command_buffer_init(&cmdbuf->vk, &device->vk, pAllocateInfo->level);
} else {
result = panvk_create_cmdbuf(device, pool, pAllocateInfo->level, &cmdbuf);
}
VkResult
vk_command_buffer_init(struct vk_command_buffer *command_buffer,
- struct vk_device *device)
+ struct vk_device *device,
+ VkCommandBufferLevel level)
{
memset(command_buffer, 0, sizeof(*command_buffer));
vk_object_base_init(device, &command_buffer->base,
VK_OBJECT_TYPE_COMMAND_BUFFER);
+ command_buffer->level = level;
util_dynarray_init(&command_buffer->labels, NULL);
command_buffer->region_begin = true;
struct vk_command_buffer {
struct vk_object_base base;
+ /** VkCommandBufferAllocateInfo::level */
+ VkCommandBufferLevel level;
+
/**
* VK_EXT_debug_utils
*
VkResult MUST_CHECK
vk_command_buffer_init(struct vk_command_buffer *command_buffer,
- struct vk_device *device);
+ struct vk_device *device,
+ VkCommandBufferLevel level);
void
vk_command_buffer_reset(struct vk_command_buffer *command_buffer);