struct radv_image *image, VkImageLayout layout,
const VkBufferImageCopy2 *region)
{
+ struct radv_device *device = cmd_buffer->device;
if (cmd_buffer->qf == RADV_QUEUE_TRANSFER) {
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
/* RADV_QUEUE_TRANSFER should only be used for the prime blit */
assert(!region->imageOffset.x && !region->imageOffset.y && !region->imageOffset.z);
assert(image->vk.image_type == VK_IMAGE_TYPE_2D);
assert(image->info.width == region->imageExtent.width);
assert(image->info.height == region->imageExtent.height);
- ASSERTED bool res = radv_sdma_copy_image(cmd_buffer, image, buffer, region);
+ ASSERTED bool res = radv_sdma_copy_image(device, cs, image, buffer, region);
assert(res);
- radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, image->bindings[0].bo);
- radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, buffer->bo);
+ radv_cs_add_buffer(device->ws, cs, image->bindings[0].bo);
+ radv_cs_add_buffer(device->ws, cs, buffer->bo);
return;
}
if (!radv_is_buffer_format_supported(img_info.format, NULL)) {
uint32_t queue_mask = radv_image_queue_family_mask(image, cmd_buffer->qf,
cmd_buffer->qf);
- bool compressed =
- radv_layout_dcc_compressed(cmd_buffer->device, image, region->imageSubresource.mipLevel,
- layout, queue_mask);
+ bool compressed = radv_layout_dcc_compressed(device, image, region->imageSubresource.mipLevel,
+ layout, queue_mask);
if (compressed) {
radv_decompress_dcc(cmd_buffer, image,
&(VkImageSubresourceRange){
#include "radv_private.h"
static bool
-radv_sdma_v4_v5_copy_image_to_buffer(struct radv_cmd_buffer *cmd_buffer, struct radv_image *image,
- struct radv_buffer *buffer,
+radv_sdma_v4_v5_copy_image_to_buffer(struct radv_device *device, struct radeon_cmdbuf *cs,
+ struct radv_image *image, struct radv_buffer *buffer,
const VkBufferImageCopy2 *region)
{
assert(image->plane_count == 1);
- struct radv_device *device = cmd_buffer->device;
unsigned bpp = image->planes[0].surface.bpe;
uint64_t dst_address = buffer->bo->va;
uint64_t src_address = image->bindings[0].bo->va + image->planes[0].surface.u.gfx9.surf_offset;
/* Linear -> linear sub-window copy. */
if (image->planes[0].surface.is_linear) {
- ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 7);
+ ASSERTED unsigned cdw_max = radeon_check_space(device->ws, cs, 7);
unsigned bytes = src_pitch * copy_height * bpp;
if (!(bytes < (1u << 22)))
src_address += image->planes[0].surface.u.gfx9.offset[0];
- radeon_emit(cmd_buffer->cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY,
+ radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY,
CIK_SDMA_COPY_SUB_OPCODE_LINEAR, (tmz ? 4 : 0)));
- radeon_emit(cmd_buffer->cs, bytes - 1);
- radeon_emit(cmd_buffer->cs, 0);
- radeon_emit(cmd_buffer->cs, src_address);
- radeon_emit(cmd_buffer->cs, src_address >> 32);
- radeon_emit(cmd_buffer->cs, dst_address);
- radeon_emit(cmd_buffer->cs, dst_address >> 32);
+ radeon_emit(cs, bytes - 1);
+ radeon_emit(cs, 0);
+ radeon_emit(cs, src_address);
+ radeon_emit(cs, src_address >> 32);
+ radeon_emit(cs, dst_address);
+ radeon_emit(cs, dst_address >> 32);
- assert(cmd_buffer->cs->cdw <= cdw_max);
+ assert(cs->cdw <= cdw_max);
return true;
}
linear_slice_pitch < (1 << 28) && copy_width < (1 << 14) && copy_height < (1 << 14)))
return false;
- ASSERTED unsigned cdw_max =
- radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 14 + (dcc ? 3 : 0));
+ ASSERTED unsigned cdw_max = radeon_check_space(device->ws, cs, 14 + (dcc ? 3 : 0));
- radeon_emit(cmd_buffer->cs,
+ radeon_emit(cs,
CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY, CIK_SDMA_COPY_SUB_OPCODE_TILED_SUB_WINDOW,
(tmz ? 4 : 0)) |
dcc << 19 | (is_v5 ? 0 : 0 /* tiled->buffer.b.b.last_level */) << 20 |
1u << 31);
- radeon_emit(
- cmd_buffer->cs,
+ radeon_emit(cs,
(uint32_t)tiled_address | (image->planes[0].surface.tile_swizzle << 8));
- radeon_emit(cmd_buffer->cs, (uint32_t)(tiled_address >> 32));
- radeon_emit(cmd_buffer->cs, 0);
- radeon_emit(cmd_buffer->cs, ((tiled_width - 1) << 16));
- radeon_emit(cmd_buffer->cs, (tiled_height - 1));
+ radeon_emit(cs, (uint32_t)(tiled_address >> 32));
+ radeon_emit(cs, 0);
+ radeon_emit(cs, ((tiled_width - 1) << 16));
+ radeon_emit(cs, (tiled_height - 1));
radeon_emit(
- cmd_buffer->cs,
+ cs,
util_logbase2(bpp) | image->planes[0].surface.u.gfx9.swizzle_mode << 3 |
image->planes[0].surface.u.gfx9.resource_type << 9 |
(is_v5 ? 0 /* tiled->buffer.b.b.last_level */ : image->planes[0].surface.u.gfx9.epitch)
<< 16);
- radeon_emit(cmd_buffer->cs, (uint32_t)linear_address);
- radeon_emit(cmd_buffer->cs, (uint32_t)(linear_address >> 32));
- radeon_emit(cmd_buffer->cs, 0);
- radeon_emit(cmd_buffer->cs, ((linear_pitch - 1) << 16));
- radeon_emit(cmd_buffer->cs, linear_slice_pitch - 1);
- radeon_emit(cmd_buffer->cs, (copy_width - 1) | ((copy_height - 1) << 16));
- radeon_emit(cmd_buffer->cs, 0);
+ radeon_emit(cs, (uint32_t)linear_address);
+ radeon_emit(cs, (uint32_t)(linear_address >> 32));
+ radeon_emit(cs, 0);
+ radeon_emit(cs, ((linear_pitch - 1) << 16));
+ radeon_emit(cs, linear_slice_pitch - 1);
+ radeon_emit(cs, (copy_width - 1) | ((copy_height - 1) << 16));
+ radeon_emit(cs, 0);
if (dcc) {
uint64_t md_address = tiled_address + image->planes[0].surface.meta_offset;
hw_type = radv_translate_buffer_numformat(desc, vk_format_get_first_non_void_channel(format));
/* Add metadata */
- radeon_emit(cmd_buffer->cs, (uint32_t)md_address);
- radeon_emit(cmd_buffer->cs, (uint32_t)(md_address >> 32));
- radeon_emit(cmd_buffer->cs,
+ radeon_emit(cs, (uint32_t)md_address);
+ radeon_emit(cs, (uint32_t)(md_address >> 32));
+ radeon_emit(cs,
hw_fmt | vi_alpha_is_on_msb(device, format) << 8 | hw_type << 9 |
image->planes[0].surface.u.gfx9.color.dcc.max_compressed_block_size << 24 |
V_028C78_MAX_BLOCK_SIZE_256B << 26 | tmz << 29 |
image->planes[0].surface.u.gfx9.color.dcc.pipe_aligned << 31);
}
- assert(cmd_buffer->cs->cdw <= cdw_max);
+ assert(cs->cdw <= cdw_max);
return true;
}
}
bool
-radv_sdma_copy_image(struct radv_cmd_buffer *cmd_buffer, struct radv_image *image,
+radv_sdma_copy_image(struct radv_device *device, struct radeon_cmdbuf *cs, struct radv_image *image,
struct radv_buffer *buffer, const VkBufferImageCopy2 *region)
{
- assert(cmd_buffer->device->physical_device->rad_info.gfx_level >= GFX9);
- return radv_sdma_v4_v5_copy_image_to_buffer(cmd_buffer, image, buffer, region);
+ assert(device->physical_device->rad_info.gfx_level >= GFX9);
+ return radv_sdma_v4_v5_copy_image_to_buffer(device, cs, image, buffer, region);
}