}
/* This is "align_mask" copied from the kernel, maximums of all IP versions. */
- info->ib_pad_dw_mask[AMD_IP_GFX] = 0xff;
- info->ib_pad_dw_mask[AMD_IP_COMPUTE] = 0xff;
- info->ib_pad_dw_mask[AMD_IP_SDMA] = 0xf;
- info->ib_pad_dw_mask[AMD_IP_UVD] = 0xf;
- info->ib_pad_dw_mask[AMD_IP_VCE] = 0x3f;
- info->ib_pad_dw_mask[AMD_IP_UVD_ENC] = 0x3f;
- info->ib_pad_dw_mask[AMD_IP_VCN_DEC] = 0xf;
- info->ib_pad_dw_mask[AMD_IP_VCN_ENC] = 0x3f;
- info->ib_pad_dw_mask[AMD_IP_VCN_JPEG] = 0xf;
+ info->ip[AMD_IP_GFX].ib_pad_dw_mask = 0xff;
+ info->ip[AMD_IP_COMPUTE].ib_pad_dw_mask = 0xff;
+ info->ip[AMD_IP_SDMA].ib_pad_dw_mask = 0xf;
+ info->ip[AMD_IP_UVD].ib_pad_dw_mask = 0xf;
+ info->ip[AMD_IP_VCE].ib_pad_dw_mask = 0x3f;
+ info->ip[AMD_IP_UVD_ENC].ib_pad_dw_mask = 0x3f;
+ info->ip[AMD_IP_VCN_DEC].ib_pad_dw_mask = 0xf;
+ info->ip[AMD_IP_VCN_ENC].ib_pad_dw_mask = 0x3f;
+ info->ip[AMD_IP_VCN_JPEG].ib_pad_dw_mask = 0xf;
/* Only require gfx or compute. */
if (!info->ip[AMD_IP_GFX].num_queues && !info->ip[AMD_IP_COMPUTE].num_queues) {
for (unsigned i = 0; i < AMD_NUM_IP_TYPES; i++) {
if (info->ip[i].num_queues) {
- fprintf(f, " IP %-7s %2u.%u \tqueues:%u (align:%u, pad_dw:0x%x)\n", ip_string[i],
+ fprintf(f, " IP %-7s %2u.%u \tqueues:%u \talign:%u \tpad_dw:0x%x\n", ip_string[i],
info->ip[i].ver_major, info->ip[i].ver_minor, info->ip[i].num_queues,
- info->ip[i].ib_alignment, info->ib_pad_dw_mask[i]);
+ info->ip[i].ib_alignment, info->ip[i].ib_pad_dw_mask);
}
}
uint8_t ver_rev;
uint8_t num_queues;
uint32_t ib_alignment;
+ uint32_t ib_pad_dw_mask;
};
struct radeon_info {
bool family_overridden; /* AMD_FORCE_FAMILY was used, skip command submission */
bool is_pro_graphics;
bool has_graphics; /* false if the chip is compute-only */
- uint32_t ib_pad_dw_mask[AMD_NUM_IP_TYPES];
bool has_clear_state;
bool has_distributed_tess;
bool has_dcc_constant_encode;
memcpy(map, preamble_ib, preamble_num_dw * 4);
/* Pad the IB. */
- uint32_t ib_pad_dw_mask = ws->info.ib_pad_dw_mask[cs->ip_type];
+ uint32_t ib_pad_dw_mask = ws->info.ip[cs->ip_type].ib_pad_dw_mask;
while (preamble_num_dw & ib_pad_dw_mask)
map[preamble_num_dw++] = PKT3_NOP_PAD;
amdgpu_bo_unmap(&ws->dummy_ws.base, preamble_bo);
rcs->current.max_dw += cs_epilog_dw;
/* Pad with NOPs but leave 4 dwords for INDIRECT_BUFFER. */
- uint32_t ib_pad_dw_mask = cs->ws->info.ib_pad_dw_mask[cs->ip_type];
+ uint32_t ib_pad_dw_mask = cs->ws->info.ip[cs->ip_type].ib_pad_dw_mask;
while ((rcs->current.cdw & ib_pad_dw_mask) != ib_pad_dw_mask - 3)
radeon_emit(rcs, PKT3_NOP_PAD);
struct amdgpu_cs *cs = amdgpu_cs(rcs);
struct amdgpu_winsys *ws = cs->ws;
int error_code = 0;
- uint32_t ib_pad_dw_mask = ws->info.ib_pad_dw_mask[cs->ip_type];
+ uint32_t ib_pad_dw_mask = ws->info.ip[cs->ip_type].ib_pad_dw_mask;
rcs->current.max_dw += amdgpu_cs_epilog_dws(cs);