return AMD_IP_COMPUTE;
case RADV_QUEUE_TRANSFER:
return AMD_IP_SDMA;
+ case RADV_QUEUE_VIDEO_DEC:
+ return radv_has_uvd(physical_device) ? AMD_IP_UVD : AMD_IP_VCN_DEC;
+ case RADV_QUEUE_VIDEO_ENC:
+ return AMD_IP_VCN_ENC;
default:
unreachable("Unknown queue family");
}
radv_emit_mip_change_flush_default(cmd_buffer);
- if (cmd_buffer->qf != RADV_QUEUE_TRANSFER) {
+ if (cmd_buffer->qf == RADV_QUEUE_GENERAL ||
+ cmd_buffer->qf == RADV_QUEUE_COMPUTE) {
if (cmd_buffer->device->physical_device->rad_info.gfx_level == GFX6)
cmd_buffer->state.flush_bits |=
RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_PS_PARTIAL_FLUSH | RADV_CMD_FLAG_WB_L2;
/* Make sure CP DMA is idle at the end of IBs because the kernel
* doesn't wait for it.
*/
- si_cp_dma_wait_for_idle(cmd_buffer);
+ if (cmd_buffer->qf != RADV_QUEUE_VIDEO_DEC)
+ si_cp_dma_wait_for_idle(cmd_buffer);
radv_describe_end_cmd_buffer(cmd_buffer);
struct vk_command_buffer *const *cmd_buffers, uint32_t cmd_buffer_count,
bool *use_perf_counters, bool *use_ace)
{
- if (queue->qf == RADV_QUEUE_TRANSFER)
+ if (queue->qf != RADV_QUEUE_GENERAL &&
+ queue->qf != RADV_QUEUE_COMPUTE)
return VK_SUCCESS;
/* Figure out the needs of the current submission.
RADV_QUEUE_GENERAL,
RADV_QUEUE_COMPUTE,
RADV_QUEUE_TRANSFER,
+ RADV_QUEUE_VIDEO_DEC,
+ RADV_QUEUE_VIDEO_ENC,
RADV_MAX_QUEUE_FAMILIES,
RADV_QUEUE_FOREIGN = RADV_MAX_QUEUE_FAMILIES,
RADV_QUEUE_IGNORED,
enum amd_ip_type radv_queue_family_to_ring(struct radv_physical_device *physical_device,
enum radv_queue_family f);
+static inline bool
+radv_has_uvd(struct radv_physical_device *phys_dev)
+{
+ enum radeon_family family = phys_dev->rad_info.family;
+ /* Only support UVD on TONGA+ */
+ if (family < CHIP_TONGA)
+ return false;
+ return phys_dev->rad_info.ip[AMD_IP_UVD].num_queues > 0;
+}
+
struct radv_queue_ring_info {
uint32_t scratch_size_per_wave;
uint32_t scratch_waves;