Also save is_secondary to the CS object.
Signed-off-by: Timur Kristóf <timur.kristof@gmail.com>
Reviewed-by: Bas Nieuwenhuizen <bas@basnieuwenhuizen.nl>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/22354>
ring = radv_queue_family_to_ring(device->physical_device, cmd_buffer->qf);
- cmd_buffer->cs = device->ws->cs_create(device->ws, ring);
+ cmd_buffer->cs = device->ws->cs_create(
+ device->ws, ring, cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
if (!cmd_buffer->cs) {
radv_destroy_cmd_buffer(&cmd_buffer->vk);
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
{
assert(!cmd_buffer->ace_internal.cs);
struct radv_device *device = cmd_buffer->device;
- struct radeon_cmdbuf *ace_cs = device->ws->cs_create(device->ws, AMD_IP_COMPUTE);
+ struct radeon_cmdbuf *ace_cs = device->ws->cs_create(
+ device->ws, AMD_IP_COMPUTE, cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
if (!ace_cs)
vk_command_buffer_set_error(&cmd_buffer->vk, VK_ERROR_OUT_OF_HOST_MEMORY);
struct radeon_info *info = &device->physical_device->rad_info;
VkResult result;
- struct radeon_cmdbuf *cs = ws->cs_create(ws, AMD_IP_GFX);
+ struct radeon_cmdbuf *cs = ws->cs_create(ws, AMD_IP_GFX, false);
if (!cs)
return VK_ERROR_OUT_OF_HOST_MEMORY;
struct radeon_cmdbuf *cs;
VkResult result;
- cs = ws->cs_create(ws, AMD_IP_GFX);
+ cs = ws->cs_create(ws, AMD_IP_GFX, false);
if (!cs)
return VK_ERROR_OUT_OF_HOST_MEMORY;
radv_emit_shadow_regs_preamble(cs, device, &queue->state);
for (int i = 0; i < 3; ++i) {
enum rgp_flush_bits sqtt_flush_bits = 0;
struct radeon_cmdbuf *cs = NULL;
- cs = ws->cs_create(ws, radv_queue_family_to_ring(device->physical_device, queue->qf));
+ cs = ws->cs_create(ws, radv_queue_family_to_ring(device->physical_device, queue->qf), false);
if (!cs) {
result = VK_ERROR_OUT_OF_HOST_MEMORY;
goto fail;
if (r != VK_SUCCESS)
return r;
- struct radeon_cmdbuf *leader_pre_cs = ws->cs_create(ws, leader_ip);
- struct radeon_cmdbuf *leader_post_cs = ws->cs_create(ws, leader_ip);
- struct radeon_cmdbuf *ace_pre_cs = ws->cs_create(ws, AMD_IP_COMPUTE);
- struct radeon_cmdbuf *ace_post_cs = ws->cs_create(ws, AMD_IP_COMPUTE);
+ struct radeon_cmdbuf *leader_pre_cs = ws->cs_create(ws, leader_ip, false);
+ struct radeon_cmdbuf *leader_post_cs = ws->cs_create(ws, leader_ip, false);
+ struct radeon_cmdbuf *ace_pre_cs = ws->cs_create(ws, AMD_IP_COMPUTE, false);
+ struct radeon_cmdbuf *ace_post_cs = ws->cs_create(ws, AMD_IP_COMPUTE, false);
if (!leader_pre_cs || !leader_post_cs || !ace_pre_cs || !ace_post_cs)
goto fail;
if (*cs_ref)
return *cs_ref;
- cs = device->ws->cs_create(device->ws, AMD_IP_GFX);
+ cs = device->ws->cs_create(device->ws, AMD_IP_GFX, false);
if (!cs)
return NULL;
enum radeon_bo_domain (*cs_domain)(const struct radeon_winsys *ws);
- struct radeon_cmdbuf *(*cs_create)(struct radeon_winsys *ws, enum amd_ip_type amd_ip_type);
+ struct radeon_cmdbuf *(*cs_create)(struct radeon_winsys *ws, enum amd_ip_type amd_ip_type,
+ bool is_secondary);
void (*cs_destroy)(struct radeon_cmdbuf *cs);
for (unsigned i = 0; i < RADV_SHADER_UPLOAD_CS_COUNT; i++) {
struct radv_shader_dma_submission *submission = calloc(1, sizeof(struct radv_shader_dma_submission));
- submission->cs = ws->cs_create(ws, AMD_IP_SDMA);
+ submission->cs = ws->cs_create(ws, AMD_IP_SDMA, false);
if (!submission->cs)
return VK_ERROR_OUT_OF_HOST_MEMORY;
list_addtail(&submission->list, &device->shader_dma_submissions);
device->thread_trace.start_cs[family] = NULL;
}
- cs = ws->cs_create(ws, radv_queue_ring(queue));
+ cs = ws->cs_create(ws, radv_queue_ring(queue), false);
if (!cs)
return false;
device->thread_trace.stop_cs[family] = NULL;
}
- cs = ws->cs_create(ws, radv_queue_ring(queue));
+ cs = ws->cs_create(ws, radv_queue_ring(queue), false);
if (!cs)
return false;
void
cik_create_gfx_config(struct radv_device *device)
{
- struct radeon_cmdbuf *cs = device->ws->cs_create(device->ws, AMD_IP_GFX);
+ struct radeon_cmdbuf *cs = device->ws->cs_create(device->ws, AMD_IP_GFX, false);
if (!cs)
return;
VkResult status;
struct radv_amdgpu_cs *chained_to;
bool use_ib;
+ bool is_secondary;
int buffer_hash_table[1024];
unsigned hw_ip;
}
static struct radeon_cmdbuf *
-radv_amdgpu_cs_create(struct radeon_winsys *ws, enum amd_ip_type ip_type)
+radv_amdgpu_cs_create(struct radeon_winsys *ws, enum amd_ip_type ip_type, bool is_secondary)
{
struct radv_amdgpu_cs *cs;
uint32_t ib_pad_dw_mask = MAX2(3, radv_amdgpu_winsys(ws)->info.ib_pad_dw_mask[ip_type]);
if (!cs)
return NULL;
+ cs->is_secondary = is_secondary;
cs->ws = radv_amdgpu_winsys(ws);
radv_amdgpu_init_cs(cs, ip_type);
}
static struct radeon_cmdbuf *
-radv_null_cs_create(struct radeon_winsys *ws, enum amd_ip_type ip_type)
+radv_null_cs_create(struct radeon_winsys *ws, enum amd_ip_type ip_type, UNUSED bool is_secondary)
{
struct radv_null_cs *cs = calloc(1, sizeof(struct radv_null_cs));
if (!cs)