struct pipe_video_buffer *target,
struct pipe_picture_desc *picture)
{
- struct r600_texture *luma = (struct r600_texture *)
- ((struct vl_video_buffer *)target)->resources[0];
- struct r600_texture *chroma = (struct r600_texture *)
- ((struct vl_video_buffer *)target)->resources[1];
+ struct si_texture *luma = (struct si_texture *)
+ ((struct vl_video_buffer *)target)->resources[0];
+ struct si_texture *chroma = (struct si_texture *)
+ ((struct vl_video_buffer *)target)->resources[1];
rvcn_dec_message_header_t *header;
rvcn_dec_message_index_t *index;
rvcn_dec_message_decode_t *decode;
}
static unsigned encode_tile_info(struct si_context *sctx,
- struct r600_texture *tex, unsigned level,
+ struct si_texture *tex, unsigned level,
bool set_bpp)
{
struct radeon_info *info = &sctx->screen->info;
const struct pipe_box *src_box)
{
struct radeon_info *info = &sctx->screen->info;
- struct r600_texture *rsrc = (struct r600_texture*)src;
- struct r600_texture *rdst = (struct r600_texture*)dst;
- unsigned bpp = rdst->surface.bpe;
- uint64_t dst_address = rdst->buffer.gpu_address +
- rdst->surface.u.legacy.level[dst_level].offset;
- uint64_t src_address = rsrc->buffer.gpu_address +
- rsrc->surface.u.legacy.level[src_level].offset;
- unsigned dst_mode = rdst->surface.u.legacy.level[dst_level].mode;
- unsigned src_mode = rsrc->surface.u.legacy.level[src_level].mode;
- unsigned dst_tile_index = rdst->surface.u.legacy.tiling_index[dst_level];
- unsigned src_tile_index = rsrc->surface.u.legacy.tiling_index[src_level];
+ struct si_texture *ssrc = (struct si_texture*)src;
+ struct si_texture *sdst = (struct si_texture*)dst;
+ unsigned bpp = sdst->surface.bpe;
+ uint64_t dst_address = sdst->buffer.gpu_address +
+ sdst->surface.u.legacy.level[dst_level].offset;
+ uint64_t src_address = ssrc->buffer.gpu_address +
+ ssrc->surface.u.legacy.level[src_level].offset;
+ unsigned dst_mode = sdst->surface.u.legacy.level[dst_level].mode;
+ unsigned src_mode = ssrc->surface.u.legacy.level[src_level].mode;
+ unsigned dst_tile_index = sdst->surface.u.legacy.tiling_index[dst_level];
+ unsigned src_tile_index = ssrc->surface.u.legacy.tiling_index[src_level];
unsigned dst_tile_mode = info->si_tile_mode_array[dst_tile_index];
unsigned src_tile_mode = info->si_tile_mode_array[src_tile_index];
unsigned dst_micro_mode = G_009910_MICRO_TILE_MODE_NEW(dst_tile_mode);
unsigned src_micro_mode = G_009910_MICRO_TILE_MODE_NEW(src_tile_mode);
unsigned dst_tile_swizzle = dst_mode == RADEON_SURF_MODE_2D ?
- rdst->surface.tile_swizzle : 0;
+ sdst->surface.tile_swizzle : 0;
unsigned src_tile_swizzle = src_mode == RADEON_SURF_MODE_2D ?
- rsrc->surface.tile_swizzle : 0;
- unsigned dst_pitch = rdst->surface.u.legacy.level[dst_level].nblk_x;
- unsigned src_pitch = rsrc->surface.u.legacy.level[src_level].nblk_x;
- uint64_t dst_slice_pitch = ((uint64_t)rdst->surface.u.legacy.level[dst_level].slice_size_dw * 4) / bpp;
- uint64_t src_slice_pitch = ((uint64_t)rsrc->surface.u.legacy.level[src_level].slice_size_dw * 4) / bpp;
- unsigned dst_width = minify_as_blocks(rdst->buffer.b.b.width0,
- dst_level, rdst->surface.blk_w);
- unsigned src_width = minify_as_blocks(rsrc->buffer.b.b.width0,
- src_level, rsrc->surface.blk_w);
- unsigned dst_height = minify_as_blocks(rdst->buffer.b.b.height0,
- dst_level, rdst->surface.blk_h);
- unsigned src_height = minify_as_blocks(rsrc->buffer.b.b.height0,
- src_level, rsrc->surface.blk_h);
- unsigned srcx = src_box->x / rsrc->surface.blk_w;
- unsigned srcy = src_box->y / rsrc->surface.blk_h;
+ ssrc->surface.tile_swizzle : 0;
+ unsigned dst_pitch = sdst->surface.u.legacy.level[dst_level].nblk_x;
+ unsigned src_pitch = ssrc->surface.u.legacy.level[src_level].nblk_x;
+ uint64_t dst_slice_pitch = ((uint64_t)sdst->surface.u.legacy.level[dst_level].slice_size_dw * 4) / bpp;
+ uint64_t src_slice_pitch = ((uint64_t)ssrc->surface.u.legacy.level[src_level].slice_size_dw * 4) / bpp;
+ unsigned dst_width = minify_as_blocks(sdst->buffer.b.b.width0,
+ dst_level, sdst->surface.blk_w);
+ unsigned src_width = minify_as_blocks(ssrc->buffer.b.b.width0,
+ src_level, ssrc->surface.blk_w);
+ unsigned dst_height = minify_as_blocks(sdst->buffer.b.b.height0,
+ dst_level, sdst->surface.blk_h);
+ unsigned src_height = minify_as_blocks(ssrc->buffer.b.b.height0,
+ src_level, ssrc->surface.blk_h);
+ unsigned srcx = src_box->x / ssrc->surface.blk_w;
+ unsigned srcy = src_box->y / ssrc->surface.blk_h;
unsigned srcz = src_box->z;
- unsigned copy_width = DIV_ROUND_UP(src_box->width, rsrc->surface.blk_w);
- unsigned copy_height = DIV_ROUND_UP(src_box->height, rsrc->surface.blk_h);
+ unsigned copy_width = DIV_ROUND_UP(src_box->width, ssrc->surface.blk_w);
+ unsigned copy_height = DIV_ROUND_UP(src_box->height, ssrc->surface.blk_h);
unsigned copy_depth = src_box->depth;
assert(src_level <= src->last_level);
assert(dst_level <= dst->last_level);
- assert(rdst->surface.u.legacy.level[dst_level].offset +
+ assert(sdst->surface.u.legacy.level[dst_level].offset +
dst_slice_pitch * bpp * (dstz + src_box->depth) <=
- rdst->buffer.buf->size);
- assert(rsrc->surface.u.legacy.level[src_level].offset +
+ sdst->buffer.buf->size);
+ assert(ssrc->surface.u.legacy.level[src_level].offset +
src_slice_pitch * bpp * (srcz + src_box->depth) <=
- rsrc->buffer.buf->size);
+ ssrc->buffer.buf->size);
- if (!si_prepare_for_dma_blit(sctx, rdst, dst_level, dstx, dsty,
- dstz, rsrc, src_level, src_box))
+ if (!si_prepare_for_dma_blit(sctx, sdst, dst_level, dstx, dsty,
+ dstz, ssrc, src_level, src_box))
return false;
- dstx /= rdst->surface.blk_w;
- dsty /= rdst->surface.blk_h;
+ dstx /= sdst->surface.blk_w;
+ dsty /= sdst->surface.blk_h;
if (srcx >= (1 << 14) ||
srcy >= (1 << 14) ||
srcy + copy_height != (1 << 14)))) {
struct radeon_cmdbuf *cs = sctx->dma_cs;
- si_need_dma_space(sctx, 13, &rdst->buffer, &rsrc->buffer);
+ si_need_dma_space(sctx, 13, &sdst->buffer, &ssrc->buffer);
radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY,
CIK_SDMA_COPY_SUB_OPCODE_LINEAR_SUB_WINDOW, 0) |
/* Tiled <-> linear sub-window copy. */
if ((src_mode >= RADEON_SURF_MODE_1D) != (dst_mode >= RADEON_SURF_MODE_1D)) {
- struct r600_texture *tiled = src_mode >= RADEON_SURF_MODE_1D ? rsrc : rdst;
- struct r600_texture *linear = tiled == rsrc ? rdst : rsrc;
- unsigned tiled_level = tiled == rsrc ? src_level : dst_level;
- unsigned linear_level = linear == rsrc ? src_level : dst_level;
- unsigned tiled_x = tiled == rsrc ? srcx : dstx;
- unsigned linear_x = linear == rsrc ? srcx : dstx;
- unsigned tiled_y = tiled == rsrc ? srcy : dsty;
- unsigned linear_y = linear == rsrc ? srcy : dsty;
- unsigned tiled_z = tiled == rsrc ? srcz : dstz;
- unsigned linear_z = linear == rsrc ? srcz : dstz;
- unsigned tiled_width = tiled == rsrc ? src_width : dst_width;
- unsigned linear_width = linear == rsrc ? src_width : dst_width;
- unsigned tiled_pitch = tiled == rsrc ? src_pitch : dst_pitch;
- unsigned linear_pitch = linear == rsrc ? src_pitch : dst_pitch;
- unsigned tiled_slice_pitch = tiled == rsrc ? src_slice_pitch : dst_slice_pitch;
- unsigned linear_slice_pitch = linear == rsrc ? src_slice_pitch : dst_slice_pitch;
- uint64_t tiled_address = tiled == rsrc ? src_address : dst_address;
- uint64_t linear_address = linear == rsrc ? src_address : dst_address;
- unsigned tiled_micro_mode = tiled == rsrc ? src_micro_mode : dst_micro_mode;
+ struct si_texture *tiled = src_mode >= RADEON_SURF_MODE_1D ? ssrc : sdst;
+ struct si_texture *linear = tiled == ssrc ? sdst : ssrc;
+ unsigned tiled_level = tiled == ssrc ? src_level : dst_level;
+ unsigned linear_level = linear == ssrc ? src_level : dst_level;
+ unsigned tiled_x = tiled == ssrc ? srcx : dstx;
+ unsigned linear_x = linear == ssrc ? srcx : dstx;
+ unsigned tiled_y = tiled == ssrc ? srcy : dsty;
+ unsigned linear_y = linear == ssrc ? srcy : dsty;
+ unsigned tiled_z = tiled == ssrc ? srcz : dstz;
+ unsigned linear_z = linear == ssrc ? srcz : dstz;
+ unsigned tiled_width = tiled == ssrc ? src_width : dst_width;
+ unsigned linear_width = linear == ssrc ? src_width : dst_width;
+ unsigned tiled_pitch = tiled == ssrc ? src_pitch : dst_pitch;
+ unsigned linear_pitch = linear == ssrc ? src_pitch : dst_pitch;
+ unsigned tiled_slice_pitch = tiled == ssrc ? src_slice_pitch : dst_slice_pitch;
+ unsigned linear_slice_pitch = linear == ssrc ? src_slice_pitch : dst_slice_pitch;
+ uint64_t tiled_address = tiled == ssrc ? src_address : dst_address;
+ uint64_t linear_address = linear == ssrc ? src_address : dst_address;
+ unsigned tiled_micro_mode = tiled == ssrc ? src_micro_mode : dst_micro_mode;
assert(tiled_pitch % 8 == 0);
assert(tiled_slice_pitch % 64 == 0);
copy_height <= (1 << 14) &&
copy_depth <= (1 << 11)) {
struct radeon_cmdbuf *cs = sctx->dma_cs;
- uint32_t direction = linear == rdst ? 1u << 31 : 0;
+ uint32_t direction = linear == sdst ? 1u << 31 : 0;
- si_need_dma_space(sctx, 14, &rdst->buffer, &rsrc->buffer);
+ si_need_dma_space(sctx, 14, &sdst->buffer, &ssrc->buffer);
radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY,
CIK_SDMA_COPY_SUB_OPCODE_TILED_SUB_WINDOW, 0) |
/* check if these fit into the bitfields */
src_address % 256 == 0 &&
dst_address % 256 == 0 &&
- rsrc->surface.u.legacy.tile_split <= 4096 &&
- rdst->surface.u.legacy.tile_split <= 4096 &&
+ ssrc->surface.u.legacy.tile_split <= 4096 &&
+ sdst->surface.u.legacy.tile_split <= 4096 &&
dstx % 8 == 0 &&
dsty % 8 == 0 &&
srcx % 8 == 0 &&
dstx + copy_width != (1 << 14)))) {
struct radeon_cmdbuf *cs = sctx->dma_cs;
- si_need_dma_space(sctx, 15, &rdst->buffer, &rsrc->buffer);
+ si_need_dma_space(sctx, 15, &sdst->buffer, &ssrc->buffer);
radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY,
CIK_SDMA_COPY_SUB_OPCODE_T2T_SUB_WINDOW, 0));
radeon_emit(cs, srcx | (srcy << 16));
radeon_emit(cs, srcz | (src_pitch_tile_max << 16));
radeon_emit(cs, src_slice_tile_max);
- radeon_emit(cs, encode_tile_info(sctx, rsrc, src_level, true));
+ radeon_emit(cs, encode_tile_info(sctx, ssrc, src_level, true));
radeon_emit(cs, dst_address);
radeon_emit(cs, dst_address >> 32);
radeon_emit(cs, dstx | (dsty << 16));
radeon_emit(cs, dstz | (dst_pitch_tile_max << 16));
radeon_emit(cs, dst_slice_tile_max);
- radeon_emit(cs, encode_tile_info(sctx, rdst, dst_level, false));
+ radeon_emit(cs, encode_tile_info(sctx, sdst, dst_level, false));
if (sctx->chip_class == CIK) {
radeon_emit(cs, copy_width_aligned |
(copy_height_aligned << 16));
static unsigned
si_blit_dbcb_copy(struct si_context *sctx,
- struct r600_texture *src,
- struct r600_texture *dst,
+ struct si_texture *src,
+ struct si_texture *dst,
unsigned planes, unsigned level_mask,
unsigned first_layer, unsigned last_layer,
unsigned first_sample, unsigned last_sample)
}
void si_blit_decompress_depth(struct pipe_context *ctx,
- struct r600_texture *texture,
- struct r600_texture *staging,
+ struct si_texture *texture,
+ struct si_texture *staging,
unsigned first_level, unsigned last_level,
unsigned first_layer, unsigned last_layer,
unsigned first_sample, unsigned last_sample)
*/
static void
si_blit_decompress_zs_planes_in_place(struct si_context *sctx,
- struct r600_texture *texture,
+ struct si_texture *texture,
unsigned planes, unsigned level_mask,
unsigned first_layer, unsigned last_layer)
{
*/
static void
si_blit_decompress_zs_in_place(struct si_context *sctx,
- struct r600_texture *texture,
+ struct si_texture *texture,
unsigned levels_z, unsigned levels_s,
unsigned first_layer, unsigned last_layer)
{
static void
si_decompress_depth(struct si_context *sctx,
- struct r600_texture *tex,
+ struct si_texture *tex,
unsigned required_planes,
unsigned first_level, unsigned last_level,
unsigned first_layer, unsigned last_layer)
if (copy_planes &&
(tex->flushed_depth_texture ||
si_init_flushed_depth_texture(&sctx->b, &tex->buffer.b.b, NULL))) {
- struct r600_texture *dst = tex->flushed_depth_texture;
+ struct si_texture *dst = tex->flushed_depth_texture;
unsigned fully_copied_levels;
unsigned levels = 0;
while (mask) {
struct pipe_sampler_view *view;
struct si_sampler_view *sview;
- struct r600_texture *tex;
+ struct si_texture *tex;
i = u_bit_scan(&mask);
assert(view);
sview = (struct si_sampler_view*)view;
- tex = (struct r600_texture *)view->texture;
+ tex = (struct si_texture *)view->texture;
assert(tex->db_compatible);
si_decompress_depth(sctx, tex,
}
static void si_blit_decompress_color(struct si_context *sctx,
- struct r600_texture *rtex,
- unsigned first_level, unsigned last_level,
- unsigned first_layer, unsigned last_layer,
- bool need_dcc_decompress)
+ struct si_texture *tex,
+ unsigned first_level, unsigned last_level,
+ unsigned first_layer, unsigned last_layer,
+ bool need_dcc_decompress)
{
void* custom_blend;
unsigned layer, checked_last_layer, max_layer;
u_bit_consecutive(first_level, last_level - first_level + 1);
if (!need_dcc_decompress)
- level_mask &= rtex->dirty_level_mask;
+ level_mask &= tex->dirty_level_mask;
if (!level_mask)
return;
if (need_dcc_decompress) {
custom_blend = sctx->custom_blend_dcc_decompress;
- assert(rtex->dcc_offset);
+ assert(tex->dcc_offset);
/* disable levels without DCC */
for (int i = first_level; i <= last_level; i++) {
- if (!vi_dcc_enabled(rtex, i))
+ if (!vi_dcc_enabled(tex, i))
level_mask &= ~(1 << i);
}
- } else if (rtex->surface.fmask_size) {
+ } else if (tex->surface.fmask_size) {
custom_blend = sctx->custom_blend_fmask_decompress;
} else {
custom_blend = sctx->custom_blend_eliminate_fastclear;
/* The smaller the mipmap level, the less layers there are
* as far as 3D textures are concerned. */
- max_layer = util_max_layer(&rtex->buffer.b.b, level);
+ max_layer = util_max_layer(&tex->buffer.b.b, level);
checked_last_layer = MIN2(last_layer, max_layer);
for (layer = first_layer; layer <= checked_last_layer; layer++) {
struct pipe_surface *cbsurf, surf_tmpl;
- surf_tmpl.format = rtex->buffer.b.b.format;
+ surf_tmpl.format = tex->buffer.b.b.format;
surf_tmpl.u.tex.level = level;
surf_tmpl.u.tex.first_layer = layer;
surf_tmpl.u.tex.last_layer = layer;
- cbsurf = sctx->b.create_surface(&sctx->b, &rtex->buffer.b.b, &surf_tmpl);
+ cbsurf = sctx->b.create_surface(&sctx->b, &tex->buffer.b.b, &surf_tmpl);
/* Required before and after FMASK and DCC_DECOMPRESS. */
if (custom_blend == sctx->custom_blend_fmask_decompress ||
/* The texture will always be dirty if some layers aren't flushed.
* I don't think this case occurs often though. */
if (first_layer == 0 && last_layer >= max_layer) {
- rtex->dirty_level_mask &= ~(1 << level);
+ tex->dirty_level_mask &= ~(1 << level);
}
}
sctx->decompression_enabled = false;
- si_make_CB_shader_coherent(sctx, rtex->buffer.b.b.nr_samples,
- vi_dcc_enabled(rtex, first_level));
+ si_make_CB_shader_coherent(sctx, tex->buffer.b.b.nr_samples,
+ vi_dcc_enabled(tex, first_level));
}
static void
-si_decompress_color_texture(struct si_context *sctx, struct r600_texture *tex,
+si_decompress_color_texture(struct si_context *sctx, struct si_texture *tex,
unsigned first_level, unsigned last_level)
{
/* CMASK or DCC can be discarded and we can still end up here. */
while (mask) {
struct pipe_sampler_view *view;
- struct r600_texture *tex;
+ struct si_texture *tex;
i = u_bit_scan(&mask);
view = textures->views[i];
assert(view);
- tex = (struct r600_texture *)view->texture;
+ tex = (struct si_texture *)view->texture;
si_decompress_color_texture(sctx, tex, view->u.tex.first_level,
view->u.tex.last_level);
while (mask) {
const struct pipe_image_view *view;
- struct r600_texture *tex;
+ struct si_texture *tex;
i = u_bit_scan(&mask);
view = &images->views[i];
assert(view->resource->target != PIPE_BUFFER);
- tex = (struct r600_texture *)view->resource;
+ tex = (struct si_texture *)view->resource;
si_decompress_color_texture(sctx, tex, view->u.tex.level,
view->u.tex.level);
}
static void si_check_render_feedback_texture(struct si_context *sctx,
- struct r600_texture *tex,
+ struct si_texture *tex,
unsigned first_level,
unsigned last_level,
unsigned first_layer,
surf = (struct r600_surface*)sctx->framebuffer.state.cbufs[j];
- if (tex == (struct r600_texture *)surf->base.texture &&
+ if (tex == (struct si_texture *)surf->base.texture &&
surf->base.u.tex.level >= first_level &&
surf->base.u.tex.level <= last_level &&
surf->base.u.tex.first_layer <= last_layer &&
while (mask) {
const struct pipe_sampler_view *view;
- struct r600_texture *tex;
+ struct si_texture *tex;
unsigned i = u_bit_scan(&mask);
if(view->texture->target == PIPE_BUFFER)
continue;
- tex = (struct r600_texture *)view->texture;
+ tex = (struct si_texture *)view->texture;
si_check_render_feedback_texture(sctx, tex,
view->u.tex.first_level,
while (mask) {
const struct pipe_image_view *view;
- struct r600_texture *tex;
+ struct si_texture *tex;
unsigned i = u_bit_scan(&mask);
if (view->resource->target == PIPE_BUFFER)
continue;
- tex = (struct r600_texture *)view->resource;
+ tex = (struct si_texture *)view->resource;
si_check_render_feedback_texture(sctx, tex,
view->u.tex.level,
util_dynarray_foreach(&sctx->resident_tex_handles,
struct si_texture_handle *, tex_handle) {
struct pipe_sampler_view *view;
- struct r600_texture *tex;
+ struct si_texture *tex;
view = (*tex_handle)->view;
if (view->texture->target == PIPE_BUFFER)
continue;
- tex = (struct r600_texture *)view->texture;
+ tex = (struct si_texture *)view->texture;
si_check_render_feedback_texture(sctx, tex,
view->u.tex.first_level,
util_dynarray_foreach(&sctx->resident_img_handles,
struct si_image_handle *, img_handle) {
struct pipe_image_view *view;
- struct r600_texture *tex;
+ struct si_texture *tex;
view = &(*img_handle)->view;
if (view->resource->target == PIPE_BUFFER)
continue;
- tex = (struct r600_texture *)view->resource;
+ tex = (struct si_texture *)view->resource;
si_check_render_feedback_texture(sctx, tex,
view->u.tex.level,
util_dynarray_foreach(&sctx->resident_tex_needs_color_decompress,
struct si_texture_handle *, tex_handle) {
struct pipe_sampler_view *view = (*tex_handle)->view;
- struct r600_texture *tex = (struct r600_texture *)view->texture;
+ struct si_texture *tex = (struct si_texture *)view->texture;
si_decompress_color_texture(sctx, tex, view->u.tex.first_level,
view->u.tex.last_level);
struct si_texture_handle *, tex_handle) {
struct pipe_sampler_view *view = (*tex_handle)->view;
struct si_sampler_view *sview = (struct si_sampler_view *)view;
- struct r600_texture *tex = (struct r600_texture *)view->texture;
+ struct si_texture *tex = (struct si_texture *)view->texture;
si_decompress_depth(sctx, tex,
sview->is_stencil_sampler ? PIPE_MASK_S : PIPE_MASK_Z,
util_dynarray_foreach(&sctx->resident_img_needs_color_decompress,
struct si_image_handle *, img_handle) {
struct pipe_image_view *view = &(*img_handle)->view;
- struct r600_texture *tex = (struct r600_texture *)view->resource;
+ struct si_texture *tex = (struct si_texture *)view->resource;
si_decompress_color_texture(sctx, tex, view->u.tex.level,
view->u.tex.level);
if (sctx->ps_uses_fbfetch) {
struct pipe_surface *cb0 = sctx->framebuffer.state.cbufs[0];
si_decompress_color_texture(sctx,
- (struct r600_texture*)cb0->texture,
+ (struct si_texture*)cb0->texture,
cb0->u.tex.first_layer,
cb0->u.tex.last_layer);
}
unsigned first_layer, unsigned last_layer)
{
struct si_context *sctx = (struct si_context *)ctx;
- struct r600_texture *rtex = (struct r600_texture*)tex;
+ struct si_texture *stex = (struct si_texture*)tex;
- if (rtex->db_compatible) {
+ if (stex->db_compatible) {
planes &= PIPE_MASK_Z | PIPE_MASK_S;
- if (!rtex->surface.has_stencil)
+ if (!stex->surface.has_stencil)
planes &= ~PIPE_MASK_S;
/* If we've rendered into the framebuffer and it's a blitting
sctx->framebuffer.state.zsbuf->texture == tex)
si_update_fb_dirtiness_after_rendering(sctx);
- si_decompress_depth(sctx, rtex, planes,
+ si_decompress_depth(sctx, stex, planes,
level, level,
first_layer, last_layer);
- } else if (rtex->surface.fmask_size || rtex->cmask.size || rtex->dcc_offset) {
+ } else if (stex->surface.fmask_size || stex->cmask.size || stex->dcc_offset) {
/* If we've rendered into the framebuffer and it's a blitting
* source, make sure the decompression pass is invoked
* by dirtying the framebuffer.
}
}
- si_blit_decompress_color(sctx, rtex, level, level,
+ si_blit_decompress_color(sctx, stex, level, level,
first_layer, last_layer, false);
}
}
const struct pipe_box *src_box)
{
struct si_context *sctx = (struct si_context *)ctx;
- struct r600_texture *rsrc = (struct r600_texture*)src;
+ struct si_texture *ssrc = (struct si_texture*)src;
struct pipe_surface *dst_view, dst_templ;
struct pipe_sampler_view src_templ, *src_view;
unsigned dst_width, dst_height, src_width0, src_height0;
if (util_format_is_compressed(src->format) ||
util_format_is_compressed(dst->format)) {
- unsigned blocksize = rsrc->surface.bpe;
+ unsigned blocksize = ssrc->surface.bpe;
if (blocksize == 8)
src_templ.format = PIPE_FORMAT_R16G16B16A16_UINT; /* 64-bit block */
sbox.width = util_format_get_nblocksx(src->format, src_box->width);
src_box = &sbox;
} else {
- unsigned blocksize = rsrc->surface.bpe;
+ unsigned blocksize = ssrc->surface.bpe;
switch (blocksize) {
case 1:
const struct pipe_blit_info *info)
{
struct si_context *sctx = (struct si_context*)ctx;
- struct r600_texture *src = (struct r600_texture*)info->src.resource;
- struct r600_texture *dst = (struct r600_texture*)info->dst.resource;
- MAYBE_UNUSED struct r600_texture *rtmp;
+ struct si_texture *src = (struct si_texture*)info->src.resource;
+ struct si_texture *dst = (struct si_texture*)info->dst.resource;
+ MAYBE_UNUSED struct si_texture *stmp;
unsigned dst_width = u_minify(info->dst.resource->width0, info->dst.level);
unsigned dst_height = u_minify(info->dst.resource->height0, info->dst.level);
enum pipe_format format = info->src.format;
tmp = ctx->screen->resource_create(ctx->screen, &templ);
if (!tmp)
return false;
- rtmp = (struct r600_texture*)tmp;
+ stmp = (struct si_texture*)tmp;
- assert(!rtmp->surface.is_linear);
- assert(src->surface.micro_tile_mode == rtmp->surface.micro_tile_mode);
+ assert(!stmp->surface.is_linear);
+ assert(src->surface.micro_tile_mode == stmp->surface.micro_tile_mode);
/* resolve */
si_do_CB_resolve(sctx, info, tmp, 0, 0, format);
const struct pipe_blit_info *info)
{
struct si_context *sctx = (struct si_context*)ctx;
- struct r600_texture *rdst = (struct r600_texture *)info->dst.resource;
+ struct si_texture *dst = (struct si_texture *)info->dst.resource;
if (do_hardware_msaa_resolve(ctx, info)) {
return;
* resource_copy_region can't do this yet, because dma_copy calls it
* on failure (recursion).
*/
- if (rdst->surface.is_linear &&
+ if (dst->surface.is_linear &&
sctx->dma_copy &&
util_can_blit_via_copy_region(info, false)) {
sctx->dma_copy(ctx, info->dst.resource, info->dst.level,
unsigned first_layer, unsigned last_layer)
{
struct si_context *sctx = (struct si_context*)ctx;
- struct r600_texture *rtex = (struct r600_texture *)tex;
+ struct si_texture *stex = (struct si_texture *)tex;
if (!util_blitter_is_copy_supported(sctx->blitter, tex, tex))
return false;
/* Clear dirty_level_mask for the levels that will be overwritten. */
assert(base_level < last_level);
- rtex->dirty_level_mask &= ~u_bit_consecutive(base_level + 1,
+ stex->dirty_level_mask &= ~u_bit_consecutive(base_level + 1,
last_level - base_level);
- sctx->generate_mipmap_for_depth = rtex->is_depth;
+ sctx->generate_mipmap_for_depth = stex->is_depth;
si_blitter_begin(sctx, SI_BLIT | SI_DISABLE_RENDER_COND);
util_blitter_generate_mipmap(sctx->blitter, tex, format,
struct pipe_resource *res)
{
struct si_context *sctx = (struct si_context*)ctx;
- struct r600_texture *rtex = (struct r600_texture*)res;
+ struct si_texture *tex = (struct si_texture*)res;
assert(res->target != PIPE_BUFFER);
- assert(!rtex->dcc_separate_buffer || rtex->dcc_gather_statistics);
+ assert(!tex->dcc_separate_buffer || tex->dcc_gather_statistics);
/* st/dri calls flush twice per frame (not a bug), this prevents double
* decompression. */
- if (rtex->dcc_separate_buffer && !rtex->separate_dcc_dirty)
+ if (tex->dcc_separate_buffer && !tex->separate_dcc_dirty)
return;
- if (!rtex->is_depth && (rtex->cmask.size || rtex->dcc_offset)) {
- si_blit_decompress_color(sctx, rtex, 0, res->last_level,
+ if (!tex->is_depth && (tex->cmask.size || tex->dcc_offset)) {
+ si_blit_decompress_color(sctx, tex, 0, res->last_level,
0, util_max_layer(res, 0),
- rtex->dcc_separate_buffer != NULL);
+ tex->dcc_separate_buffer != NULL);
}
/* Always do the analysis even if DCC is disabled at the moment. */
- if (rtex->dcc_gather_statistics && rtex->separate_dcc_dirty) {
- rtex->separate_dcc_dirty = false;
- vi_separate_dcc_process_and_reset_stats(ctx, rtex);
+ if (tex->dcc_gather_statistics && tex->separate_dcc_dirty) {
+ tex->separate_dcc_dirty = false;
+ vi_separate_dcc_process_and_reset_stats(ctx, tex);
}
}
-void si_decompress_dcc(struct si_context *sctx, struct r600_texture *rtex)
+void si_decompress_dcc(struct si_context *sctx, struct si_texture *tex)
{
- if (!rtex->dcc_offset)
+ if (!tex->dcc_offset)
return;
- si_blit_decompress_color(sctx, rtex, 0, rtex->buffer.b.b.last_level,
- 0, util_max_layer(&rtex->buffer.b.b, 0),
+ si_blit_decompress_color(sctx, tex, 0, tex->buffer.b.b.last_level,
+ 0, util_max_layer(&tex->buffer.b.b, 0),
true);
}
struct r600_resource *res,
uint64_t size, unsigned alignment)
{
- struct r600_texture *rtex = (struct r600_texture*)res;
+ struct si_texture *tex = (struct si_texture*)res;
res->bo_size = size;
res->bo_alignment = alignment;
}
/* Tiled textures are unmappable. Always put them in VRAM. */
- if ((res->b.b.target != PIPE_BUFFER && !rtex->surface.is_linear) ||
+ if ((res->b.b.target != PIPE_BUFFER && !tex->surface.is_linear) ||
res->b.b.flags & SI_RESOURCE_FLAG_UNMAPPABLE) {
res->domains = RADEON_DOMAIN_VRAM;
res->flags |= RADEON_FLAG_NO_CPU_ACCESS |
};
static void si_alloc_separate_cmask(struct si_screen *sscreen,
- struct r600_texture *rtex)
+ struct si_texture *tex)
{
- if (rtex->cmask_buffer)
+ if (tex->cmask_buffer)
return;
- assert(rtex->cmask.size == 0);
+ assert(tex->cmask.size == 0);
- si_texture_get_cmask_info(sscreen, rtex, &rtex->cmask);
- if (!rtex->cmask.size)
+ si_texture_get_cmask_info(sscreen, tex, &tex->cmask);
+ if (!tex->cmask.size)
return;
- rtex->cmask_buffer =
+ tex->cmask_buffer =
si_aligned_buffer_create(&sscreen->b,
SI_RESOURCE_FLAG_UNMAPPABLE,
PIPE_USAGE_DEFAULT,
- rtex->cmask.size,
- rtex->cmask.alignment);
- if (rtex->cmask_buffer == NULL) {
- rtex->cmask.size = 0;
+ tex->cmask.size,
+ tex->cmask.alignment);
+ if (tex->cmask_buffer == NULL) {
+ tex->cmask.size = 0;
return;
}
/* update colorbuffer state bits */
- rtex->cmask.base_address_reg = rtex->cmask_buffer->gpu_address >> 8;
+ tex->cmask.base_address_reg = tex->cmask_buffer->gpu_address >> 8;
- rtex->cb_color_info |= S_028C70_FAST_CLEAR(1);
+ tex->cb_color_info |= S_028C70_FAST_CLEAR(1);
p_atomic_inc(&sscreen->compressed_colortex_counter);
}
-static bool si_set_clear_color(struct r600_texture *rtex,
+static bool si_set_clear_color(struct si_texture *tex,
enum pipe_format surface_format,
const union pipe_color_union *color)
{
memset(&uc, 0, sizeof(uc));
- if (rtex->surface.bpe == 16) {
+ if (tex->surface.bpe == 16) {
/* DCC fast clear only:
* CLEAR_WORD0 = R = G = B
* CLEAR_WORD1 = A
util_pack_color(color->f, surface_format, &uc);
}
- if (memcmp(rtex->color_clear_value, &uc, 2 * sizeof(uint32_t)) == 0)
+ if (memcmp(tex->color_clear_value, &uc, 2 * sizeof(uint32_t)) == 0)
return false;
- memcpy(rtex->color_clear_value, &uc, 2 * sizeof(uint32_t));
+ memcpy(tex->color_clear_value, &uc, 2 * sizeof(uint32_t));
return true;
}
}
void vi_dcc_clear_level(struct si_context *sctx,
- struct r600_texture *rtex,
+ struct si_texture *tex,
unsigned level, unsigned clear_value)
{
struct pipe_resource *dcc_buffer;
uint64_t dcc_offset, clear_size;
- assert(vi_dcc_enabled(rtex, level));
+ assert(vi_dcc_enabled(tex, level));
- if (rtex->dcc_separate_buffer) {
- dcc_buffer = &rtex->dcc_separate_buffer->b.b;
+ if (tex->dcc_separate_buffer) {
+ dcc_buffer = &tex->dcc_separate_buffer->b.b;
dcc_offset = 0;
} else {
- dcc_buffer = &rtex->buffer.b.b;
- dcc_offset = rtex->dcc_offset;
+ dcc_buffer = &tex->buffer.b.b;
+ dcc_offset = tex->dcc_offset;
}
if (sctx->chip_class >= GFX9) {
/* Mipmap level clears aren't implemented. */
- assert(rtex->buffer.b.b.last_level == 0);
+ assert(tex->buffer.b.b.last_level == 0);
/* 4x and 8x MSAA needs a sophisticated compute shader for
* the clear. See AMDVLK. */
- assert(rtex->num_color_samples <= 2);
- clear_size = rtex->surface.dcc_size;
+ assert(tex->num_color_samples <= 2);
+ clear_size = tex->surface.dcc_size;
} else {
- unsigned num_layers = util_num_layers(&rtex->buffer.b.b, level);
+ unsigned num_layers = util_num_layers(&tex->buffer.b.b, level);
/* If this is 0, fast clear isn't possible. (can occur with MSAA) */
- assert(rtex->surface.u.legacy.level[level].dcc_fast_clear_size);
+ assert(tex->surface.u.legacy.level[level].dcc_fast_clear_size);
/* Layered 4x and 8x MSAA DCC fast clears need to clear
* dcc_fast_clear_size bytes for each layer. A compute shader
* would be more efficient than separate per-layer clear operations.
*/
- assert(rtex->num_color_samples <= 2 || num_layers == 1);
+ assert(tex->num_color_samples <= 2 || num_layers == 1);
- dcc_offset += rtex->surface.u.legacy.level[level].dcc_offset;
- clear_size = rtex->surface.u.legacy.level[level].dcc_fast_clear_size *
+ dcc_offset += tex->surface.u.legacy.level[level].dcc_offset;
+ clear_size = tex->surface.u.legacy.level[level].dcc_fast_clear_size *
num_layers;
}
* src and dst micro tile modes match.
*/
static void si_set_optimal_micro_tile_mode(struct si_screen *sscreen,
- struct r600_texture *rtex)
+ struct si_texture *tex)
{
- if (rtex->buffer.b.is_shared ||
- rtex->buffer.b.b.nr_samples <= 1 ||
- rtex->surface.micro_tile_mode == rtex->last_msaa_resolve_target_micro_mode)
+ if (tex->buffer.b.is_shared ||
+ tex->buffer.b.b.nr_samples <= 1 ||
+ tex->surface.micro_tile_mode == tex->last_msaa_resolve_target_micro_mode)
return;
assert(sscreen->info.chip_class >= GFX9 ||
- rtex->surface.u.legacy.level[0].mode == RADEON_SURF_MODE_2D);
- assert(rtex->buffer.b.b.last_level == 0);
+ tex->surface.u.legacy.level[0].mode == RADEON_SURF_MODE_2D);
+ assert(tex->buffer.b.b.last_level == 0);
if (sscreen->info.chip_class >= GFX9) {
/* 4K or larger tiles only. 0 is linear. 1-3 are 256B tiles. */
- assert(rtex->surface.u.gfx9.surf.swizzle_mode >= 4);
+ assert(tex->surface.u.gfx9.surf.swizzle_mode >= 4);
/* If you do swizzle_mode % 4, you'll get:
* 0 = Depth
*
* Depth-sample order isn't allowed:
*/
- assert(rtex->surface.u.gfx9.surf.swizzle_mode % 4 != 0);
+ assert(tex->surface.u.gfx9.surf.swizzle_mode % 4 != 0);
- switch (rtex->last_msaa_resolve_target_micro_mode) {
+ switch (tex->last_msaa_resolve_target_micro_mode) {
case RADEON_MICRO_MODE_DISPLAY:
- rtex->surface.u.gfx9.surf.swizzle_mode &= ~0x3;
- rtex->surface.u.gfx9.surf.swizzle_mode += 2; /* D */
+ tex->surface.u.gfx9.surf.swizzle_mode &= ~0x3;
+ tex->surface.u.gfx9.surf.swizzle_mode += 2; /* D */
break;
case RADEON_MICRO_MODE_THIN:
- rtex->surface.u.gfx9.surf.swizzle_mode &= ~0x3;
- rtex->surface.u.gfx9.surf.swizzle_mode += 1; /* S */
+ tex->surface.u.gfx9.surf.swizzle_mode &= ~0x3;
+ tex->surface.u.gfx9.surf.swizzle_mode += 1; /* S */
break;
case RADEON_MICRO_MODE_ROTATED:
- rtex->surface.u.gfx9.surf.swizzle_mode &= ~0x3;
- rtex->surface.u.gfx9.surf.swizzle_mode += 3; /* R */
+ tex->surface.u.gfx9.surf.swizzle_mode &= ~0x3;
+ tex->surface.u.gfx9.surf.swizzle_mode += 3; /* R */
break;
default: /* depth */
assert(!"unexpected micro mode");
* any definitions for them either. They are all 2D_TILED_THIN1
* modes with different bpp and micro tile mode.
*/
- switch (rtex->last_msaa_resolve_target_micro_mode) {
+ switch (tex->last_msaa_resolve_target_micro_mode) {
case RADEON_MICRO_MODE_DISPLAY:
- rtex->surface.u.legacy.tiling_index[0] = 10;
+ tex->surface.u.legacy.tiling_index[0] = 10;
break;
case RADEON_MICRO_MODE_THIN:
- rtex->surface.u.legacy.tiling_index[0] = 14;
+ tex->surface.u.legacy.tiling_index[0] = 14;
break;
case RADEON_MICRO_MODE_ROTATED:
- rtex->surface.u.legacy.tiling_index[0] = 28;
+ tex->surface.u.legacy.tiling_index[0] = 28;
break;
default: /* depth, thick */
assert(!"unexpected micro mode");
return;
}
} else { /* SI */
- switch (rtex->last_msaa_resolve_target_micro_mode) {
+ switch (tex->last_msaa_resolve_target_micro_mode) {
case RADEON_MICRO_MODE_DISPLAY:
- switch (rtex->surface.bpe) {
+ switch (tex->surface.bpe) {
case 1:
- rtex->surface.u.legacy.tiling_index[0] = 10;
+ tex->surface.u.legacy.tiling_index[0] = 10;
break;
case 2:
- rtex->surface.u.legacy.tiling_index[0] = 11;
+ tex->surface.u.legacy.tiling_index[0] = 11;
break;
default: /* 4, 8 */
- rtex->surface.u.legacy.tiling_index[0] = 12;
+ tex->surface.u.legacy.tiling_index[0] = 12;
break;
}
break;
case RADEON_MICRO_MODE_THIN:
- switch (rtex->surface.bpe) {
+ switch (tex->surface.bpe) {
case 1:
- rtex->surface.u.legacy.tiling_index[0] = 14;
+ tex->surface.u.legacy.tiling_index[0] = 14;
break;
case 2:
- rtex->surface.u.legacy.tiling_index[0] = 15;
+ tex->surface.u.legacy.tiling_index[0] = 15;
break;
case 4:
- rtex->surface.u.legacy.tiling_index[0] = 16;
+ tex->surface.u.legacy.tiling_index[0] = 16;
break;
default: /* 8, 16 */
- rtex->surface.u.legacy.tiling_index[0] = 17;
+ tex->surface.u.legacy.tiling_index[0] = 17;
break;
}
break;
}
}
- rtex->surface.micro_tile_mode = rtex->last_msaa_resolve_target_micro_mode;
+ tex->surface.micro_tile_mode = tex->last_msaa_resolve_target_micro_mode;
p_atomic_inc(&sscreen->dirty_tex_counter);
}
return;
for (i = 0; i < fb->nr_cbufs; i++) {
- struct r600_texture *tex;
+ struct si_texture *tex;
unsigned clear_bit = PIPE_CLEAR_COLOR0 << i;
if (!fb->cbufs[i])
if (level > 0)
continue;
- tex = (struct r600_texture *)fb->cbufs[i]->texture;
+ tex = (struct si_texture *)fb->cbufs[i]->texture;
/* TODO: GFX9: Implement DCC fast clear for level 0 of
* mipmapped textures. Mipmapped DCC has to clear a rectangular
struct si_context *sctx = (struct si_context *)ctx;
struct pipe_framebuffer_state *fb = &sctx->framebuffer.state;
struct pipe_surface *zsbuf = fb->zsbuf;
- struct r600_texture *zstex =
- zsbuf ? (struct r600_texture*)zsbuf->texture : NULL;
+ struct si_texture *zstex =
+ zsbuf ? (struct si_texture*)zsbuf->texture : NULL;
if (buffers & PIPE_CLEAR_COLOR) {
si_do_fast_color_clear(sctx, &buffers, color);
/* These buffers cannot use fast clear, make sure to disable expansion. */
for (unsigned i = 0; i < fb->nr_cbufs; i++) {
- struct r600_texture *tex;
+ struct si_texture *tex;
/* If not clearing this buffer, skip. */
if (!(buffers & (PIPE_CLEAR_COLOR0 << i)) || !fb->cbufs[i])
continue;
- tex = (struct r600_texture *)fb->cbufs[i]->texture;
+ tex = (struct si_texture *)fb->cbufs[i]->texture;
if (tex->surface.fmask_size == 0)
tex->dirty_level_mask &= ~(1 << fb->cbufs[i]->u.tex.level);
}
const void *data)
{
struct pipe_screen *screen = pipe->screen;
- struct r600_texture *rtex = (struct r600_texture*)tex;
+ struct si_texture *stex = (struct si_texture*)tex;
struct pipe_surface tmpl = {{0}};
struct pipe_surface *sf;
const struct util_format_description *desc =
if (!sf)
return;
- if (rtex->is_depth) {
+ if (stex->is_depth) {
unsigned clear;
float depth;
uint8_t stencil = 0;
clear = PIPE_CLEAR_DEPTH;
desc->unpack_z_float(&depth, 0, data, 0, 1, 1);
- if (rtex->surface.has_stencil) {
+ if (stex->surface.has_stencil) {
clear |= PIPE_CLEAR_STENCIL;
desc->unpack_s_8uint(&stencil, 0, data, 0, 1, 1);
}
static void si_dump_framebuffer(struct si_context *sctx, struct u_log_context *log)
{
struct pipe_framebuffer_state *state = &sctx->framebuffer.state;
- struct r600_texture *rtex;
+ struct si_texture *tex;
int i;
for (i = 0; i < state->nr_cbufs; i++) {
if (!state->cbufs[i])
continue;
- rtex = (struct r600_texture*)state->cbufs[i]->texture;
+ tex = (struct si_texture*)state->cbufs[i]->texture;
u_log_printf(log, COLOR_YELLOW "Color buffer %i:" COLOR_RESET "\n", i);
- si_print_texture_info(sctx->screen, rtex, log);
+ si_print_texture_info(sctx->screen, tex, log);
u_log_printf(log, "\n");
}
if (state->zsbuf) {
- rtex = (struct r600_texture*)state->zsbuf->texture;
+ tex = (struct si_texture*)state->zsbuf->texture;
u_log_printf(log, COLOR_YELLOW "Depth-stencil buffer:" COLOR_RESET "\n");
- si_print_texture_info(sctx->screen, rtex, log);
+ si_print_texture_info(sctx->screen, tex, log);
u_log_printf(log, "\n");
}
}
bool is_stencil_sampler,
bool check_mem)
{
- struct r600_resource *rres;
- struct r600_texture *rtex;
+ struct si_texture *tex = (struct si_texture*)resource;
enum radeon_bo_priority priority;
if (!resource)
return;
- if (resource->target != PIPE_BUFFER) {
- struct r600_texture *tex = (struct r600_texture*)resource;
-
- if (tex->is_depth && !si_can_sample_zs(tex, is_stencil_sampler))
- resource = &tex->flushed_depth_texture->buffer.b.b;
- }
-
- rres = r600_resource(resource);
- priority = si_get_sampler_view_priority(rres);
+ /* Use the flushed depth texture if direct sampling is unsupported. */
+ if (resource->target != PIPE_BUFFER &&
+ tex->is_depth && !si_can_sample_zs(tex, is_stencil_sampler))
+ tex = tex->flushed_depth_texture;
- radeon_add_to_gfx_buffer_list_check_mem(sctx, rres, usage, priority,
+ priority = si_get_sampler_view_priority(&tex->buffer);
+ radeon_add_to_gfx_buffer_list_check_mem(sctx, &tex->buffer, usage, priority,
check_mem);
if (resource->target == PIPE_BUFFER)
return;
- /* Now add separate DCC or HTILE. */
- rtex = (struct r600_texture*)resource;
- if (rtex->dcc_separate_buffer) {
- radeon_add_to_gfx_buffer_list_check_mem(sctx, rtex->dcc_separate_buffer,
+ /* Add separate DCC. */
+ if (tex->dcc_separate_buffer) {
+ radeon_add_to_gfx_buffer_list_check_mem(sctx, tex->dcc_separate_buffer,
usage, RADEON_PRIO_DCC, check_mem);
}
}
* \param state descriptor to update
*/
void si_set_mutable_tex_desc_fields(struct si_screen *sscreen,
- struct r600_texture *tex,
+ struct si_texture *tex,
const struct legacy_surf_level *base_level_info,
unsigned base_level, unsigned first_level,
unsigned block_width, bool is_stencil,
static void si_set_sampler_state_desc(struct si_sampler_state *sstate,
struct si_sampler_view *sview,
- struct r600_texture *tex,
+ struct si_texture *tex,
uint32_t *desc)
{
if (sview && sview->is_integer)
uint32_t *desc)
{
struct pipe_sampler_view *view = &sview->base;
- struct r600_texture *rtex = (struct r600_texture *)view->texture;
- bool is_buffer = rtex->buffer.b.b.target == PIPE_BUFFER;
+ struct si_texture *tex = (struct si_texture *)view->texture;
+ bool is_buffer = tex->buffer.b.b.target == PIPE_BUFFER;
if (unlikely(!is_buffer && sview->dcc_incompatible)) {
- if (vi_dcc_enabled(rtex, view->u.tex.first_level))
- if (!si_texture_disable_dcc(sctx, rtex))
- si_decompress_dcc(sctx, rtex);
+ if (vi_dcc_enabled(tex, view->u.tex.first_level))
+ if (!si_texture_disable_dcc(sctx, tex))
+ si_decompress_dcc(sctx, tex);
sview->dcc_incompatible = false;
}
- assert(rtex); /* views with texture == NULL aren't supported */
+ assert(tex); /* views with texture == NULL aren't supported */
memcpy(desc, sview->state, 8*4);
if (is_buffer) {
- si_set_buf_desc_address(&rtex->buffer,
+ si_set_buf_desc_address(&tex->buffer,
sview->base.u.buf.offset,
desc + 4);
} else {
- bool is_separate_stencil = rtex->db_compatible &&
+ bool is_separate_stencil = tex->db_compatible &&
sview->is_stencil_sampler;
- si_set_mutable_tex_desc_fields(sctx->screen, rtex,
+ si_set_mutable_tex_desc_fields(sctx->screen, tex,
sview->base_level_info,
sview->base_level,
sview->base.u.tex.first_level,
desc);
}
- if (!is_buffer && rtex->surface.fmask_size) {
+ if (!is_buffer && tex->surface.fmask_size) {
memcpy(desc + 8, sview->fmask_state, 8*4);
} else {
/* Disable FMASK and bind sampler state in [12:15]. */
if (sstate)
si_set_sampler_state_desc(sstate, sview,
- is_buffer ? NULL : rtex,
+ is_buffer ? NULL : tex,
desc + 12);
}
}
-static bool color_needs_decompression(struct r600_texture *rtex)
+static bool color_needs_decompression(struct si_texture *tex)
{
- return rtex->surface.fmask_size ||
- (rtex->dirty_level_mask &&
- (rtex->cmask.size || rtex->dcc_offset));
+ return tex->surface.fmask_size ||
+ (tex->dirty_level_mask &&
+ (tex->cmask.size || tex->dcc_offset));
}
-static bool depth_needs_decompression(struct r600_texture *rtex)
+static bool depth_needs_decompression(struct si_texture *tex)
{
/* If the depth/stencil texture is TC-compatible, no decompression
* will be done. The decompression function will only flush DB caches
* to make it coherent with shaders. That's necessary because the driver
* doesn't flush DB caches in any other case.
*/
- return rtex->db_compatible;
+ return tex->db_compatible;
}
static void si_set_sampler_view(struct si_context *sctx,
return;
if (view) {
- struct r600_texture *rtex = (struct r600_texture *)view->texture;
+ struct si_texture *tex = (struct si_texture *)view->texture;
si_set_sampler_view_desc(sctx, rview,
samplers->sampler_states[slot], desc);
- if (rtex->buffer.b.b.target == PIPE_BUFFER) {
- rtex->buffer.bind_history |= PIPE_BIND_SAMPLER_VIEW;
+ if (tex->buffer.b.b.target == PIPE_BUFFER) {
+ tex->buffer.bind_history |= PIPE_BIND_SAMPLER_VIEW;
samplers->needs_depth_decompress_mask &= ~(1u << slot);
samplers->needs_color_decompress_mask &= ~(1u << slot);
} else {
- if (depth_needs_decompression(rtex)) {
+ if (depth_needs_decompression(tex)) {
samplers->needs_depth_decompress_mask |= 1u << slot;
} else {
samplers->needs_depth_decompress_mask &= ~(1u << slot);
}
- if (color_needs_decompression(rtex)) {
+ if (color_needs_decompression(tex)) {
samplers->needs_color_decompress_mask |= 1u << slot;
} else {
samplers->needs_color_decompress_mask &= ~(1u << slot);
}
- if (rtex->dcc_offset &&
- p_atomic_read(&rtex->framebuffers_bound))
+ if (tex->dcc_offset &&
+ p_atomic_read(&tex->framebuffers_bound))
sctx->need_check_render_feedback = true;
}
struct pipe_resource *res = samplers->views[i]->texture;
if (res && res->target != PIPE_BUFFER) {
- struct r600_texture *rtex = (struct r600_texture *)res;
+ struct si_texture *tex = (struct si_texture *)res;
- if (color_needs_decompression(rtex)) {
+ if (color_needs_decompression(tex)) {
samplers->needs_color_decompress_mask |= 1u << i;
} else {
samplers->needs_color_decompress_mask &= ~(1u << i);
si_set_buf_desc_address(res, view->u.buf.offset, desc + 4);
} else {
static const unsigned char swizzle[4] = { 0, 1, 2, 3 };
- struct r600_texture *tex = (struct r600_texture *)res;
+ struct si_texture *tex = (struct si_texture *)res;
unsigned level = view->u.tex.level;
unsigned width, height, depth, hw_level;
bool uses_dcc = vi_dcc_enabled(tex, level);
images->needs_color_decompress_mask &= ~(1 << slot);
res->bind_history |= PIPE_BIND_SHADER_IMAGE;
} else {
- struct r600_texture *tex = (struct r600_texture *)res;
+ struct si_texture *tex = (struct si_texture *)res;
unsigned level = view->u.tex.level;
if (color_needs_decompression(tex)) {
struct pipe_resource *res = images->views[i].resource;
if (res && res->target != PIPE_BUFFER) {
- struct r600_texture *rtex = (struct r600_texture *)res;
+ struct si_texture *tex = (struct si_texture *)res;
- if (color_needs_decompression(rtex)) {
+ if (color_needs_decompression(tex)) {
images->needs_color_decompress_mask |= 1 << i;
} else {
images->needs_color_decompress_mask &= ~(1 << i);
si_update_ps_iter_samples(sctx);
if (surf) {
- struct r600_texture *tex = (struct r600_texture*)surf->texture;
+ struct si_texture *tex = (struct si_texture*)surf->texture;
struct pipe_image_view view;
assert(tex);
struct si_sampler_view *sview =
(struct si_sampler_view *)samplers->views[slot];
- struct r600_texture *tex = NULL;
+ struct si_texture *tex = NULL;
if (sview && sview->base.texture &&
sview->base.texture->target != PIPE_BUFFER)
- tex = (struct r600_texture *)sview->base.texture;
+ tex = (struct si_texture *)sview->base.texture;
if (tex && tex->surface.fmask_size)
continue;
util_dynarray_foreach(&sctx->resident_tex_handles,
struct si_texture_handle *, tex_handle) {
struct pipe_resource *res = (*tex_handle)->view->texture;
- struct r600_texture *rtex;
+ struct si_texture *tex;
if (!res || res->target == PIPE_BUFFER)
continue;
- rtex = (struct r600_texture *)res;
- if (!color_needs_decompression(rtex))
+ tex = (struct si_texture *)res;
+ if (!color_needs_decompression(tex))
continue;
util_dynarray_append(&sctx->resident_tex_needs_color_decompress,
struct si_image_handle *, img_handle) {
struct pipe_image_view *view = &(*img_handle)->view;
struct pipe_resource *res = view->resource;
- struct r600_texture *rtex;
+ struct si_texture *tex;
if (!res || res->target == PIPE_BUFFER)
continue;
- rtex = (struct r600_texture *)res;
- if (!color_needs_decompression(rtex))
+ tex = (struct si_texture *)res;
+ if (!color_needs_decompression(tex))
continue;
util_dynarray_append(&sctx->resident_img_needs_color_decompress,
if (resident) {
if (sview->base.texture->target != PIPE_BUFFER) {
- struct r600_texture *rtex =
- (struct r600_texture *)sview->base.texture;
+ struct si_texture *tex =
+ (struct si_texture *)sview->base.texture;
- if (depth_needs_decompression(rtex)) {
+ if (depth_needs_decompression(tex)) {
util_dynarray_append(
&sctx->resident_tex_needs_depth_decompress,
struct si_texture_handle *,
tex_handle);
}
- if (color_needs_decompression(rtex)) {
+ if (color_needs_decompression(tex)) {
util_dynarray_append(
&sctx->resident_tex_needs_color_decompress,
struct si_texture_handle *,
tex_handle);
}
- if (rtex->dcc_offset &&
- p_atomic_read(&rtex->framebuffers_bound))
+ if (tex->dcc_offset &&
+ p_atomic_read(&tex->framebuffers_bound))
sctx->need_check_render_feedback = true;
si_update_bindless_texture_descriptor(sctx, tex_handle);
if (resident) {
if (res->b.b.target != PIPE_BUFFER) {
- struct r600_texture *rtex = (struct r600_texture *)res;
+ struct si_texture *tex = (struct si_texture *)res;
unsigned level = view->u.tex.level;
- if (color_needs_decompression(rtex)) {
+ if (color_needs_decompression(tex)) {
util_dynarray_append(
&sctx->resident_img_needs_color_decompress,
struct si_image_handle *,
img_handle);
}
- if (vi_dcc_enabled(rtex, level) &&
- p_atomic_read(&rtex->framebuffers_bound))
+ if (vi_dcc_enabled(tex, level) &&
+ p_atomic_read(&tex->framebuffers_bound))
sctx->need_check_render_feedback = true;
si_update_bindless_image_descriptor(sctx, img_handle);
unsigned bpp)
{
struct radeon_cmdbuf *cs = ctx->dma_cs;
- struct r600_texture *rsrc = (struct r600_texture*)src;
- struct r600_texture *rdst = (struct r600_texture*)dst;
- unsigned dst_mode = rdst->surface.u.legacy.level[dst_level].mode;
+ struct si_texture *ssrc = (struct si_texture*)src;
+ struct si_texture *sdst = (struct si_texture*)dst;
+ unsigned dst_mode = sdst->surface.u.legacy.level[dst_level].mode;
bool detile = dst_mode == RADEON_SURF_MODE_LINEAR_ALIGNED;
- struct r600_texture *rlinear = detile ? rdst : rsrc;
- struct r600_texture *rtiled = detile ? rsrc : rdst;
+ struct si_texture *linear = detile ? sdst : ssrc;
+ struct si_texture *tiled = detile ? ssrc : sdst;
unsigned linear_lvl = detile ? dst_level : src_level;
unsigned tiled_lvl = detile ? src_level : dst_level;
struct radeon_info *info = &ctx->screen->info;
- unsigned index = rtiled->surface.u.legacy.tiling_index[tiled_lvl];
+ unsigned index = tiled->surface.u.legacy.tiling_index[tiled_lvl];
unsigned tile_mode = info->si_tile_mode_array[index];
unsigned array_mode, lbpp, pitch_tile_max, slice_tile_max, size;
unsigned ncopy, height, cheight, i;
uint64_t base, addr;
unsigned pipe_config;
- assert(dst_mode != rsrc->surface.u.legacy.level[src_level].mode);
+ assert(dst_mode != ssrc->surface.u.legacy.level[src_level].mode);
sub_cmd = SI_DMA_COPY_TILED;
lbpp = util_logbase2(bpp);
tiled_y = detile ? src_y : dst_y;
tiled_z = detile ? src_z : dst_z;
- assert(!util_format_is_depth_and_stencil(rtiled->buffer.b.b.format));
+ assert(!util_format_is_depth_and_stencil(tiled->buffer.b.b.format));
array_mode = G_009910_ARRAY_MODE(tile_mode);
- slice_tile_max = (rtiled->surface.u.legacy.level[tiled_lvl].nblk_x *
- rtiled->surface.u.legacy.level[tiled_lvl].nblk_y) / (8*8) - 1;
+ slice_tile_max = (tiled->surface.u.legacy.level[tiled_lvl].nblk_x *
+ tiled->surface.u.legacy.level[tiled_lvl].nblk_y) / (8*8) - 1;
/* linear height must be the same as the slice tile max height, it's ok even
* if the linear destination/source have smaller heigh as the size of the
* dma packet will be using the copy_height which is always smaller or equal
* to the linear height
*/
- height = rtiled->surface.u.legacy.level[tiled_lvl].nblk_y;
- base = rtiled->surface.u.legacy.level[tiled_lvl].offset;
- addr = rlinear->surface.u.legacy.level[linear_lvl].offset;
- addr += (uint64_t)rlinear->surface.u.legacy.level[linear_lvl].slice_size_dw * 4 * linear_z;
+ height = tiled->surface.u.legacy.level[tiled_lvl].nblk_y;
+ base = tiled->surface.u.legacy.level[tiled_lvl].offset;
+ addr = linear->surface.u.legacy.level[linear_lvl].offset;
+ addr += (uint64_t)linear->surface.u.legacy.level[linear_lvl].slice_size_dw * 4 * linear_z;
addr += linear_y * pitch + linear_x * bpp;
bank_h = G_009910_BANK_HEIGHT(tile_mode);
bank_w = G_009910_BANK_WIDTH(tile_mode);
mt_aspect = G_009910_MACRO_TILE_ASPECT(tile_mode);
/* Non-depth modes don't have TILE_SPLIT set. */
- tile_split = util_logbase2(rtiled->surface.u.legacy.tile_split >> 6);
+ tile_split = util_logbase2(tiled->surface.u.legacy.tile_split >> 6);
nbanks = G_009910_NUM_BANKS(tile_mode);
- base += rtiled->buffer.gpu_address;
- addr += rlinear->buffer.gpu_address;
+ base += tiled->buffer.gpu_address;
+ addr += linear->buffer.gpu_address;
pipe_config = G_009910_PIPE_CONFIG(tile_mode);
mt = G_009910_MICRO_TILE_MODE(tile_mode);
size = copy_height * pitch;
ncopy = DIV_ROUND_UP(size, SI_DMA_COPY_MAX_DWORD_ALIGNED_SIZE);
- si_need_dma_space(ctx, ncopy * 9, &rdst->buffer, &rsrc->buffer);
+ si_need_dma_space(ctx, ncopy * 9, &sdst->buffer, &ssrc->buffer);
for (i = 0; i < ncopy; i++) {
cheight = copy_height;
const struct pipe_box *src_box)
{
struct si_context *sctx = (struct si_context *)ctx;
- struct r600_texture *rsrc = (struct r600_texture*)src;
- struct r600_texture *rdst = (struct r600_texture*)dst;
+ struct si_texture *ssrc = (struct si_texture*)src;
+ struct si_texture *sdst = (struct si_texture*)dst;
unsigned dst_pitch, src_pitch, bpp, dst_mode, src_mode;
unsigned src_w, dst_w;
unsigned src_x, src_y;
goto fallback;
if (src_box->depth > 1 ||
- !si_prepare_for_dma_blit(sctx, rdst, dst_level, dstx, dsty,
- dstz, rsrc, src_level, src_box))
+ !si_prepare_for_dma_blit(sctx, sdst, dst_level, dstx, dsty,
+ dstz, ssrc, src_level, src_box))
goto fallback;
src_x = util_format_get_nblocksx(src->format, src_box->x);
src_y = util_format_get_nblocksy(src->format, src_box->y);
dst_y = util_format_get_nblocksy(src->format, dst_y);
- bpp = rdst->surface.bpe;
- dst_pitch = rdst->surface.u.legacy.level[dst_level].nblk_x * rdst->surface.bpe;
- src_pitch = rsrc->surface.u.legacy.level[src_level].nblk_x * rsrc->surface.bpe;
- src_w = u_minify(rsrc->buffer.b.b.width0, src_level);
- dst_w = u_minify(rdst->buffer.b.b.width0, dst_level);
+ bpp = sdst->surface.bpe;
+ dst_pitch = sdst->surface.u.legacy.level[dst_level].nblk_x * sdst->surface.bpe;
+ src_pitch = ssrc->surface.u.legacy.level[src_level].nblk_x * ssrc->surface.bpe;
+ src_w = u_minify(ssrc->buffer.b.b.width0, src_level);
+ dst_w = u_minify(sdst->buffer.b.b.width0, dst_level);
- dst_mode = rdst->surface.u.legacy.level[dst_level].mode;
- src_mode = rsrc->surface.u.legacy.level[src_level].mode;
+ dst_mode = sdst->surface.u.legacy.level[dst_level].mode;
+ src_mode = ssrc->surface.u.legacy.level[src_level].mode;
if (src_pitch != dst_pitch || src_box->x || dst_x || src_w != dst_w ||
src_box->width != src_w ||
- src_box->height != u_minify(rsrc->buffer.b.b.height0, src_level) ||
- src_box->height != u_minify(rdst->buffer.b.b.height0, dst_level) ||
- rsrc->surface.u.legacy.level[src_level].nblk_y !=
- rdst->surface.u.legacy.level[dst_level].nblk_y) {
+ src_box->height != u_minify(ssrc->buffer.b.b.height0, src_level) ||
+ src_box->height != u_minify(sdst->buffer.b.b.height0, dst_level) ||
+ ssrc->surface.u.legacy.level[src_level].nblk_y !=
+ sdst->surface.u.legacy.level[dst_level].nblk_y) {
/* FIXME si can do partial blit */
goto fallback;
}
* dst_x/y == 0
* dst_pitch == src_pitch
*/
- src_offset= rsrc->surface.u.legacy.level[src_level].offset;
- src_offset += (uint64_t)rsrc->surface.u.legacy.level[src_level].slice_size_dw * 4 * src_box->z;
+ src_offset= ssrc->surface.u.legacy.level[src_level].offset;
+ src_offset += (uint64_t)ssrc->surface.u.legacy.level[src_level].slice_size_dw * 4 * src_box->z;
src_offset += src_y * src_pitch + src_x * bpp;
- dst_offset = rdst->surface.u.legacy.level[dst_level].offset;
- dst_offset += (uint64_t)rdst->surface.u.legacy.level[dst_level].slice_size_dw * 4 * dst_z;
+ dst_offset = sdst->surface.u.legacy.level[dst_level].offset;
+ dst_offset += (uint64_t)sdst->surface.u.legacy.level[dst_level].slice_size_dw * 4 * dst_z;
dst_offset += dst_y * dst_pitch + dst_x * bpp;
si_dma_copy_buffer(sctx, dst, src, dst_offset, src_offset,
- (uint64_t)rsrc->surface.u.legacy.level[src_level].slice_size_dw * 4);
+ (uint64_t)ssrc->surface.u.legacy.level[src_level].slice_size_dw * 4);
} else {
si_dma_copy_tile(sctx, dst, dst_level, dst_x, dst_y, dst_z,
src, src_level, src_x, src_y, src_box->z,
- src_box->height / rsrc->surface.blk_h,
+ src_box->height / ssrc->surface.blk_h,
dst_pitch, bpp);
}
return;
sctx->b.destroy_query(&sctx->b,
sctx->dcc_stats[i].ps_stats[j]);
- r600_texture_reference(&sctx->dcc_stats[i].tex, NULL);
+ si_texture_reference(&sctx->dcc_stats[i].tex, NULL);
}
if (sctx->query_result_shader)
uint64_t base_address_reg;
};
-struct r600_texture {
+struct si_texture {
struct r600_resource buffer;
struct radeon_surf surface;
uint64_t size;
- struct r600_texture *flushed_depth_texture;
+ struct si_texture *flushed_depth_texture;
/* Colorbuffer compression and fast clear. */
uint64_t fmask_offset;
bool render_cond_force_off; /* for u_blitter */
/* Statistics gathering for the DCC enablement heuristic. It can't be
- * in r600_texture because r600_texture can be shared by multiple
+ * in si_texture because si_texture can be shared by multiple
* contexts. This is for back buffers only. We shouldn't get too many
* of those.
*
* enabled by DCC stat gathering.
*/
struct {
- struct r600_texture *tex;
+ struct si_texture *tex;
/* Query queue: 0 = usually active, 1 = waiting, 2 = readback. */
struct pipe_query *ps_stats[3];
/* If all slots are used and another slot is needed,
struct pipe_resource *src,
unsigned src_level,
const struct pipe_box *src_box);
-void si_decompress_dcc(struct si_context *sctx, struct r600_texture *rtex);
+void si_decompress_dcc(struct si_context *sctx, struct si_texture *tex);
void si_blit_decompress_depth(struct pipe_context *ctx,
- struct r600_texture *texture,
- struct r600_texture *staging,
+ struct si_texture *texture,
+ struct si_texture *staging,
unsigned first_level, unsigned last_level,
unsigned first_layer, unsigned last_layer,
unsigned first_sample, unsigned last_sample);
enum pipe_format si_simplify_cb_format(enum pipe_format format);
bool vi_alpha_is_on_msb(enum pipe_format format);
void vi_dcc_clear_level(struct si_context *sctx,
- struct r600_texture *rtex,
+ struct si_texture *tex,
unsigned level, unsigned clear_value);
void si_init_clear_functions(struct si_context *sctx);
void si_update_vs_viewport_state(struct si_context *ctx);
void si_init_viewport_functions(struct si_context *ctx);
-/* r600_texture.c */
+/* si_texture.c */
bool si_prepare_for_dma_blit(struct si_context *sctx,
- struct r600_texture *rdst,
+ struct si_texture *dst,
unsigned dst_level, unsigned dstx,
unsigned dsty, unsigned dstz,
- struct r600_texture *rsrc,
+ struct si_texture *src,
unsigned src_level,
const struct pipe_box *src_box);
void si_texture_get_cmask_info(struct si_screen *sscreen,
- struct r600_texture *rtex,
+ struct si_texture *tex,
struct r600_cmask_info *out);
void si_eliminate_fast_color_clear(struct si_context *sctx,
- struct r600_texture *rtex);
+ struct si_texture *tex);
void si_texture_discard_cmask(struct si_screen *sscreen,
- struct r600_texture *rtex);
+ struct si_texture *tex);
bool si_init_flushed_depth_texture(struct pipe_context *ctx,
struct pipe_resource *texture,
- struct r600_texture **staging);
+ struct si_texture **staging);
void si_print_texture_info(struct si_screen *sscreen,
- struct r600_texture *rtex, struct u_log_context *log);
+ struct si_texture *tex, struct u_log_context *log);
struct pipe_resource *si_texture_create(struct pipe_screen *screen,
const struct pipe_resource *templ);
bool vi_dcc_formats_compatible(enum pipe_format format1,
unsigned width, unsigned height);
unsigned si_translate_colorswap(enum pipe_format format, bool do_endian_swap);
void vi_separate_dcc_try_enable(struct si_context *sctx,
- struct r600_texture *tex);
+ struct si_texture *tex);
void vi_separate_dcc_start_query(struct si_context *sctx,
- struct r600_texture *tex);
+ struct si_texture *tex);
void vi_separate_dcc_stop_query(struct si_context *sctx,
- struct r600_texture *tex);
+ struct si_texture *tex);
void vi_separate_dcc_process_and_reset_stats(struct pipe_context *ctx,
- struct r600_texture *tex);
+ struct si_texture *tex);
bool si_texture_disable_dcc(struct si_context *sctx,
- struct r600_texture *rtex);
+ struct si_texture *tex);
void si_init_screen_texture_functions(struct si_screen *sscreen);
void si_init_context_texture_functions(struct si_context *sctx);
}
static inline void
-r600_texture_reference(struct r600_texture **ptr, struct r600_texture *res)
+si_texture_reference(struct si_texture **ptr, struct si_texture *res)
{
pipe_resource_reference((struct pipe_resource **)ptr, &res->buffer.b.b);
}
static inline bool
-vi_dcc_enabled(struct r600_texture *tex, unsigned level)
+vi_dcc_enabled(struct si_texture *tex, unsigned level)
{
return tex->dcc_offset && level < tex->surface.num_dcc_levels;
}
static inline unsigned
-si_tile_mode_index(struct r600_texture *rtex, unsigned level, bool stencil)
+si_tile_mode_index(struct si_texture *tex, unsigned level, bool stencil)
{
if (stencil)
- return rtex->surface.u.legacy.stencil_tiling_index[level];
+ return tex->surface.u.legacy.stencil_tiling_index[level];
else
- return rtex->surface.u.legacy.tiling_index[level];
+ return tex->surface.u.legacy.tiling_index[level];
}
static inline void
}
static inline bool
-si_can_sample_zs(struct r600_texture *tex, bool stencil_sampler)
+si_can_sample_zs(struct si_texture *tex, bool stencil_sampler)
{
return (stencil_sampler && tex->can_sample_s) ||
(!stencil_sampler && tex->can_sample_z);
}
static inline bool
-si_htile_enabled(struct r600_texture *tex, unsigned level)
+si_htile_enabled(struct si_texture *tex, unsigned level)
{
return tex->htile_offset && level == 0;
}
static inline bool
-vi_tc_compat_htile_enabled(struct r600_texture *tex, unsigned level)
+vi_tc_compat_htile_enabled(struct si_texture *tex, unsigned level)
{
assert(!tex->tc_compatible_htile || tex->htile_offset);
return tex->tc_compatible_htile && level == 0;
}
}
-static unsigned si_tex_dim(struct si_screen *sscreen, struct r600_texture *rtex,
+static unsigned si_tex_dim(struct si_screen *sscreen, struct si_texture *tex,
unsigned view_target, unsigned nr_samples)
{
- unsigned res_target = rtex->buffer.b.b.target;
+ unsigned res_target = tex->buffer.b.b.target;
if (view_target == PIPE_TEXTURE_CUBE ||
view_target == PIPE_TEXTURE_CUBE_ARRAY)
if ((res_target == PIPE_TEXTURE_1D ||
res_target == PIPE_TEXTURE_1D_ARRAY) &&
sscreen->info.chip_class >= GFX9 &&
- rtex->surface.u.gfx9.resource_type == RADEON_RESOURCE_2D) {
+ tex->surface.u.gfx9.resource_type == RADEON_RESOURCE_2D) {
if (res_target == PIPE_TEXTURE_1D)
res_target = PIPE_TEXTURE_2D;
else
static void si_initialize_color_surface(struct si_context *sctx,
struct r600_surface *surf)
{
- struct r600_texture *rtex = (struct r600_texture*)surf->base.texture;
+ struct si_texture *tex = (struct si_texture*)surf->base.texture;
unsigned color_info, color_attrib;
unsigned format, swap, ntype, endian;
const struct util_format_description *desc;
color_attrib = S_028C74_FORCE_DST_ALPHA_1(desc->swizzle[3] == PIPE_SWIZZLE_1 ||
util_format_is_intensity(surf->base.format));
- if (rtex->buffer.b.b.nr_samples > 1) {
- unsigned log_samples = util_logbase2(rtex->buffer.b.b.nr_samples);
- unsigned log_fragments = util_logbase2(rtex->num_color_samples);
+ if (tex->buffer.b.b.nr_samples > 1) {
+ unsigned log_samples = util_logbase2(tex->buffer.b.b.nr_samples);
+ unsigned log_fragments = util_logbase2(tex->num_color_samples);
color_attrib |= S_028C74_NUM_SAMPLES(log_samples) |
S_028C74_NUM_FRAGMENTS(log_fragments);
- if (rtex->surface.fmask_size) {
+ if (tex->surface.fmask_size) {
color_info |= S_028C70_COMPRESSION(1);
- unsigned fmask_bankh = util_logbase2(rtex->surface.u.legacy.fmask.bankh);
+ unsigned fmask_bankh = util_logbase2(tex->surface.u.legacy.fmask.bankh);
if (sctx->chip_class == SI) {
/* due to a hw bug, FMASK_BANK_HEIGHT must be set on SI too */
if (!sctx->screen->info.has_dedicated_vram)
min_compressed_block_size = V_028C78_MIN_BLOCK_SIZE_64B;
- if (rtex->num_color_samples > 1) {
- if (rtex->surface.bpe == 1)
+ if (tex->num_color_samples > 1) {
+ if (tex->surface.bpe == 1)
max_uncompressed_block_size = V_028C78_MAX_BLOCK_SIZE_64B;
- else if (rtex->surface.bpe == 2)
+ else if (tex->surface.bpe == 2)
max_uncompressed_block_size = V_028C78_MAX_BLOCK_SIZE_128B;
}
}
/* This must be set for fast clear to work without FMASK. */
- if (!rtex->surface.fmask_size && sctx->chip_class == SI) {
- unsigned bankh = util_logbase2(rtex->surface.u.legacy.bankh);
+ if (!tex->surface.fmask_size && sctx->chip_class == SI) {
+ unsigned bankh = util_logbase2(tex->surface.u.legacy.bankh);
color_attrib |= S_028C74_FMASK_BANK_HEIGHT(bankh);
}
S_028C6C_SLICE_MAX(surf->base.u.tex.last_layer);
if (sctx->chip_class >= GFX9) {
- unsigned mip0_depth = util_max_layer(&rtex->buffer.b.b, 0);
+ unsigned mip0_depth = util_max_layer(&tex->buffer.b.b, 0);
color_view |= S_028C6C_MIP_LEVEL(surf->base.u.tex.level);
color_attrib |= S_028C74_MIP0_DEPTH(mip0_depth) |
- S_028C74_RESOURCE_TYPE(rtex->surface.u.gfx9.resource_type);
+ S_028C74_RESOURCE_TYPE(tex->surface.u.gfx9.resource_type);
surf->cb_color_attrib2 = S_028C68_MIP0_WIDTH(surf->width0 - 1) |
S_028C68_MIP0_HEIGHT(surf->height0 - 1) |
- S_028C68_MAX_MIP(rtex->buffer.b.b.last_level);
+ S_028C68_MAX_MIP(tex->buffer.b.b.last_level);
}
surf->cb_color_view = color_view;
surf->cb_color_attrib = color_attrib;
/* Determine pixel shader export format */
- si_choose_spi_color_formats(surf, format, swap, ntype, rtex->is_depth);
+ si_choose_spi_color_formats(surf, format, swap, ntype, tex->is_depth);
surf->color_initialized = true;
}
static void si_init_depth_surface(struct si_context *sctx,
struct r600_surface *surf)
{
- struct r600_texture *rtex = (struct r600_texture*)surf->base.texture;
+ struct si_texture *tex = (struct si_texture*)surf->base.texture;
unsigned level = surf->base.u.tex.level;
unsigned format, stencil_format;
uint32_t z_info, s_info;
- format = si_translate_dbformat(rtex->db_render_format);
- stencil_format = rtex->surface.has_stencil ?
+ format = si_translate_dbformat(tex->db_render_format);
+ stencil_format = tex->surface.has_stencil ?
V_028044_STENCIL_8 : V_028044_STENCIL_INVALID;
assert(format != V_028040_Z_INVALID);
if (format == V_028040_Z_INVALID)
- PRINT_ERR("Invalid DB format: %d, disabling DB.\n", rtex->buffer.b.b.format);
+ PRINT_ERR("Invalid DB format: %d, disabling DB.\n", tex->buffer.b.b.format);
surf->db_depth_view = S_028008_SLICE_START(surf->base.u.tex.first_layer) |
S_028008_SLICE_MAX(surf->base.u.tex.last_layer);
surf->db_htile_surface = 0;
if (sctx->chip_class >= GFX9) {
- assert(rtex->surface.u.gfx9.surf_offset == 0);
- surf->db_depth_base = rtex->buffer.gpu_address >> 8;
- surf->db_stencil_base = (rtex->buffer.gpu_address +
- rtex->surface.u.gfx9.stencil_offset) >> 8;
+ assert(tex->surface.u.gfx9.surf_offset == 0);
+ surf->db_depth_base = tex->buffer.gpu_address >> 8;
+ surf->db_stencil_base = (tex->buffer.gpu_address +
+ tex->surface.u.gfx9.stencil_offset) >> 8;
z_info = S_028038_FORMAT(format) |
- S_028038_NUM_SAMPLES(util_logbase2(rtex->buffer.b.b.nr_samples)) |
- S_028038_SW_MODE(rtex->surface.u.gfx9.surf.swizzle_mode) |
- S_028038_MAXMIP(rtex->buffer.b.b.last_level);
+ S_028038_NUM_SAMPLES(util_logbase2(tex->buffer.b.b.nr_samples)) |
+ S_028038_SW_MODE(tex->surface.u.gfx9.surf.swizzle_mode) |
+ S_028038_MAXMIP(tex->buffer.b.b.last_level);
s_info = S_02803C_FORMAT(stencil_format) |
- S_02803C_SW_MODE(rtex->surface.u.gfx9.stencil.swizzle_mode);
- surf->db_z_info2 = S_028068_EPITCH(rtex->surface.u.gfx9.surf.epitch);
- surf->db_stencil_info2 = S_02806C_EPITCH(rtex->surface.u.gfx9.stencil.epitch);
+ S_02803C_SW_MODE(tex->surface.u.gfx9.stencil.swizzle_mode);
+ surf->db_z_info2 = S_028068_EPITCH(tex->surface.u.gfx9.surf.epitch);
+ surf->db_stencil_info2 = S_02806C_EPITCH(tex->surface.u.gfx9.stencil.epitch);
surf->db_depth_view |= S_028008_MIPID(level);
- surf->db_depth_size = S_02801C_X_MAX(rtex->buffer.b.b.width0 - 1) |
- S_02801C_Y_MAX(rtex->buffer.b.b.height0 - 1);
+ surf->db_depth_size = S_02801C_X_MAX(tex->buffer.b.b.width0 - 1) |
+ S_02801C_Y_MAX(tex->buffer.b.b.height0 - 1);
- if (si_htile_enabled(rtex, level)) {
+ if (si_htile_enabled(tex, level)) {
z_info |= S_028038_TILE_SURFACE_ENABLE(1) |
S_028038_ALLOW_EXPCLEAR(1);
- if (rtex->tc_compatible_htile) {
+ if (tex->tc_compatible_htile) {
unsigned max_zplanes = 4;
- if (rtex->db_render_format == PIPE_FORMAT_Z16_UNORM &&
- rtex->buffer.b.b.nr_samples > 1)
+ if (tex->db_render_format == PIPE_FORMAT_Z16_UNORM &&
+ tex->buffer.b.b.nr_samples > 1)
max_zplanes = 2;
z_info |= S_028038_DECOMPRESS_ON_N_ZPLANES(max_zplanes + 1) |
s_info |= S_02803C_ITERATE_FLUSH(1);
}
- if (rtex->surface.has_stencil) {
+ if (tex->surface.has_stencil) {
/* Stencil buffer workaround ported from the SI-CI-VI code.
* See that for explanation.
*/
- s_info |= S_02803C_ALLOW_EXPCLEAR(rtex->buffer.b.b.nr_samples <= 1);
+ s_info |= S_02803C_ALLOW_EXPCLEAR(tex->buffer.b.b.nr_samples <= 1);
} else {
/* Use all HTILE for depth if there's no stencil. */
s_info |= S_02803C_TILE_STENCIL_DISABLE(1);
}
- surf->db_htile_data_base = (rtex->buffer.gpu_address +
- rtex->htile_offset) >> 8;
+ surf->db_htile_data_base = (tex->buffer.gpu_address +
+ tex->htile_offset) >> 8;
surf->db_htile_surface = S_028ABC_FULL_CACHE(1) |
- S_028ABC_PIPE_ALIGNED(rtex->surface.u.gfx9.htile.pipe_aligned) |
- S_028ABC_RB_ALIGNED(rtex->surface.u.gfx9.htile.rb_aligned);
+ S_028ABC_PIPE_ALIGNED(tex->surface.u.gfx9.htile.pipe_aligned) |
+ S_028ABC_RB_ALIGNED(tex->surface.u.gfx9.htile.rb_aligned);
}
} else {
/* SI-CI-VI */
- struct legacy_surf_level *levelinfo = &rtex->surface.u.legacy.level[level];
+ struct legacy_surf_level *levelinfo = &tex->surface.u.legacy.level[level];
assert(levelinfo->nblk_x % 8 == 0 && levelinfo->nblk_y % 8 == 0);
- surf->db_depth_base = (rtex->buffer.gpu_address +
- rtex->surface.u.legacy.level[level].offset) >> 8;
- surf->db_stencil_base = (rtex->buffer.gpu_address +
- rtex->surface.u.legacy.stencil_level[level].offset) >> 8;
+ surf->db_depth_base = (tex->buffer.gpu_address +
+ tex->surface.u.legacy.level[level].offset) >> 8;
+ surf->db_stencil_base = (tex->buffer.gpu_address +
+ tex->surface.u.legacy.stencil_level[level].offset) >> 8;
z_info = S_028040_FORMAT(format) |
- S_028040_NUM_SAMPLES(util_logbase2(rtex->buffer.b.b.nr_samples));
+ S_028040_NUM_SAMPLES(util_logbase2(tex->buffer.b.b.nr_samples));
s_info = S_028044_FORMAT(stencil_format);
- surf->db_depth_info = S_02803C_ADDR5_SWIZZLE_MASK(!rtex->tc_compatible_htile);
+ surf->db_depth_info = S_02803C_ADDR5_SWIZZLE_MASK(!tex->tc_compatible_htile);
if (sctx->chip_class >= CIK) {
struct radeon_info *info = &sctx->screen->info;
- unsigned index = rtex->surface.u.legacy.tiling_index[level];
- unsigned stencil_index = rtex->surface.u.legacy.stencil_tiling_index[level];
- unsigned macro_index = rtex->surface.u.legacy.macro_tile_index;
+ unsigned index = tex->surface.u.legacy.tiling_index[level];
+ unsigned stencil_index = tex->surface.u.legacy.stencil_tiling_index[level];
+ unsigned macro_index = tex->surface.u.legacy.macro_tile_index;
unsigned tile_mode = info->si_tile_mode_array[index];
unsigned stencil_tile_mode = info->si_tile_mode_array[stencil_index];
unsigned macro_mode = info->cik_macrotile_mode_array[macro_index];
z_info |= S_028040_TILE_SPLIT(G_009910_TILE_SPLIT(tile_mode));
s_info |= S_028044_TILE_SPLIT(G_009910_TILE_SPLIT(stencil_tile_mode));
} else {
- unsigned tile_mode_index = si_tile_mode_index(rtex, level, false);
+ unsigned tile_mode_index = si_tile_mode_index(tex, level, false);
z_info |= S_028040_TILE_MODE_INDEX(tile_mode_index);
- tile_mode_index = si_tile_mode_index(rtex, level, true);
+ tile_mode_index = si_tile_mode_index(tex, level, true);
s_info |= S_028044_TILE_MODE_INDEX(tile_mode_index);
}
surf->db_depth_slice = S_02805C_SLICE_TILE_MAX((levelinfo->nblk_x *
levelinfo->nblk_y) / 64 - 1);
- if (si_htile_enabled(rtex, level)) {
+ if (si_htile_enabled(tex, level)) {
z_info |= S_028040_TILE_SURFACE_ENABLE(1) |
S_028040_ALLOW_EXPCLEAR(1);
- if (rtex->surface.has_stencil) {
+ if (tex->surface.has_stencil) {
/* Workaround: For a not yet understood reason, the
* combination of MSAA, fast stencil clear and stencil
* decompress messes with subsequent stencil buffer
* Check piglit's arb_texture_multisample-stencil-clear
* test if you want to try changing this.
*/
- if (rtex->buffer.b.b.nr_samples <= 1)
+ if (tex->buffer.b.b.nr_samples <= 1)
s_info |= S_028044_ALLOW_EXPCLEAR(1);
- } else if (!rtex->tc_compatible_htile) {
+ } else if (!tex->tc_compatible_htile) {
/* Use all of the htile_buffer for depth if there's no stencil.
* This must not be set when TC-compatible HTILE is enabled
* due to a hw bug.
s_info |= S_028044_TILE_STENCIL_DISABLE(1);
}
- surf->db_htile_data_base = (rtex->buffer.gpu_address +
- rtex->htile_offset) >> 8;
+ surf->db_htile_data_base = (tex->buffer.gpu_address +
+ tex->htile_offset) >> 8;
surf->db_htile_surface = S_028ABC_FULL_CACHE(1);
- if (rtex->tc_compatible_htile) {
+ if (tex->tc_compatible_htile) {
surf->db_htile_surface |= S_028ABC_TC_COMPATIBLE(1);
/* 0 = full compression. N = only compress up to N-1 Z planes. */
- if (rtex->buffer.b.b.nr_samples <= 1)
+ if (tex->buffer.b.b.nr_samples <= 1)
z_info |= S_028040_DECOMPRESS_ON_N_ZPLANES(5);
- else if (rtex->buffer.b.b.nr_samples <= 4)
+ else if (tex->buffer.b.b.nr_samples <= 4)
z_info |= S_028040_DECOMPRESS_ON_N_ZPLANES(3);
else
z_info |= S_028040_DECOMPRESS_ON_N_ZPLANES(2);
if (sctx->framebuffer.state.zsbuf) {
struct pipe_surface *surf = sctx->framebuffer.state.zsbuf;
- struct r600_texture *rtex = (struct r600_texture *)surf->texture;
+ struct si_texture *tex = (struct si_texture *)surf->texture;
- rtex->dirty_level_mask |= 1 << surf->u.tex.level;
+ tex->dirty_level_mask |= 1 << surf->u.tex.level;
- if (rtex->surface.has_stencil)
- rtex->stencil_dirty_level_mask |= 1 << surf->u.tex.level;
+ if (tex->surface.has_stencil)
+ tex->stencil_dirty_level_mask |= 1 << surf->u.tex.level;
}
unsigned compressed_cb_mask = sctx->framebuffer.compressed_cb_mask;
while (compressed_cb_mask) {
unsigned i = u_bit_scan(&compressed_cb_mask);
struct pipe_surface *surf = sctx->framebuffer.state.cbufs[i];
- struct r600_texture *rtex = (struct r600_texture*)surf->texture;
+ struct si_texture *tex = (struct si_texture*)surf->texture;
- if (rtex->surface.fmask_size)
- rtex->dirty_level_mask |= 1 << surf->u.tex.level;
- if (rtex->dcc_gather_statistics)
- rtex->separate_dcc_dirty = true;
+ if (tex->surface.fmask_size)
+ tex->dirty_level_mask |= 1 << surf->u.tex.level;
+ if (tex->dcc_gather_statistics)
+ tex->separate_dcc_dirty = true;
}
}
{
for (int i = 0; i < state->nr_cbufs; ++i) {
struct r600_surface *surf = NULL;
- struct r600_texture *rtex;
+ struct si_texture *tex;
if (!state->cbufs[i])
continue;
surf = (struct r600_surface*)state->cbufs[i];
- rtex = (struct r600_texture*)surf->base.texture;
+ tex = (struct si_texture*)surf->base.texture;
- p_atomic_dec(&rtex->framebuffers_bound);
+ p_atomic_dec(&tex->framebuffers_bound);
}
}
struct si_context *sctx = (struct si_context *)ctx;
struct pipe_constant_buffer constbuf = {0};
struct r600_surface *surf = NULL;
- struct r600_texture *rtex;
+ struct si_texture *tex;
bool old_any_dst_linear = sctx->framebuffer.any_dst_linear;
unsigned old_nr_samples = sctx->framebuffer.nr_samples;
unsigned old_colorbuf_enabled_4bit = sctx->framebuffer.colorbuf_enabled_4bit;
bool old_has_zsbuf = !!sctx->framebuffer.state.zsbuf;
bool old_has_stencil =
old_has_zsbuf &&
- ((struct r600_texture*)sctx->framebuffer.state.zsbuf->texture)->surface.has_stencil;
+ ((struct si_texture*)sctx->framebuffer.state.zsbuf->texture)->surface.has_stencil;
bool unbound = false;
int i;
if (!sctx->framebuffer.state.cbufs[i])
continue;
- rtex = (struct r600_texture*)sctx->framebuffer.state.cbufs[i]->texture;
- if (rtex->dcc_gather_statistics)
- vi_separate_dcc_stop_query(sctx, rtex);
+ tex = (struct si_texture*)sctx->framebuffer.state.cbufs[i]->texture;
+ if (tex->dcc_gather_statistics)
+ vi_separate_dcc_stop_query(sctx, tex);
}
/* Disable DCC if the formats are incompatible. */
continue;
surf = (struct r600_surface*)state->cbufs[i];
- rtex = (struct r600_texture*)surf->base.texture;
+ tex = (struct si_texture*)surf->base.texture;
if (!surf->dcc_incompatible)
continue;
unbound = true;
}
- if (vi_dcc_enabled(rtex, surf->base.u.tex.level))
- if (!si_texture_disable_dcc(sctx, rtex))
- si_decompress_dcc(sctx, rtex);
+ if (vi_dcc_enabled(tex, surf->base.u.tex.level))
+ if (!si_texture_disable_dcc(sctx, tex))
+ si_decompress_dcc(sctx, tex);
surf->dcc_incompatible = false;
}
continue;
surf = (struct r600_surface*)state->cbufs[i];
- rtex = (struct r600_texture*)surf->base.texture;
+ tex = (struct si_texture*)surf->base.texture;
if (!surf->color_initialized) {
si_initialize_color_surface(sctx, surf);
if (surf->color_is_int10)
sctx->framebuffer.color_is_int10 |= 1 << i;
- if (rtex->surface.fmask_size)
+ if (tex->surface.fmask_size)
sctx->framebuffer.compressed_cb_mask |= 1 << i;
else
sctx->framebuffer.uncompressed_cb_mask |= 1 << i;
/* Don't update nr_color_samples for non-AA buffers.
* (e.g. destination of MSAA resolve)
*/
- if (rtex->buffer.b.b.nr_samples >= 2 &&
- rtex->num_color_samples < rtex->buffer.b.b.nr_samples) {
+ if (tex->buffer.b.b.nr_samples >= 2 &&
+ tex->num_color_samples < tex->buffer.b.b.nr_samples) {
sctx->framebuffer.nr_color_samples =
MIN2(sctx->framebuffer.nr_color_samples,
- rtex->num_color_samples);
+ tex->num_color_samples);
}
- if (rtex->surface.is_linear)
+ if (tex->surface.is_linear)
sctx->framebuffer.any_dst_linear = true;
- if (vi_dcc_enabled(rtex, surf->base.u.tex.level))
+ if (vi_dcc_enabled(tex, surf->base.u.tex.level))
sctx->framebuffer.CB_has_shader_readable_metadata = true;
si_context_add_resource_size(sctx, surf->base.texture);
- p_atomic_inc(&rtex->framebuffers_bound);
+ p_atomic_inc(&tex->framebuffers_bound);
- if (rtex->dcc_gather_statistics) {
+ if (tex->dcc_gather_statistics) {
/* Dirty tracking must be enabled for DCC usage analysis. */
sctx->framebuffer.compressed_cb_mask |= 1 << i;
- vi_separate_dcc_start_query(sctx, rtex);
+ vi_separate_dcc_start_query(sctx, tex);
}
}
- struct r600_texture *zstex = NULL;
+ struct si_texture *zstex = NULL;
if (state->zsbuf) {
surf = (struct r600_surface*)state->zsbuf;
- zstex = (struct r600_texture*)surf->base.texture;
+ zstex = (struct si_texture*)surf->base.texture;
if (!surf->depth_initialized) {
si_init_depth_surface(sctx, surf);
struct radeon_cmdbuf *cs = sctx->gfx_cs;
struct pipe_framebuffer_state *state = &sctx->framebuffer.state;
unsigned i, nr_cbufs = state->nr_cbufs;
- struct r600_texture *tex = NULL;
+ struct si_texture *tex = NULL;
struct r600_surface *cb = NULL;
unsigned cb_color_info = 0;
continue;
}
- tex = (struct r600_texture *)cb->base.texture;
+ tex = (struct si_texture *)cb->base.texture;
radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
&tex->buffer, RADEON_USAGE_READWRITE,
tex->buffer.b.b.nr_samples > 1 ?
/* ZS buffer. */
if (state->zsbuf && sctx->framebuffer.dirty_zsbuf) {
struct r600_surface *zb = (struct r600_surface*)state->zsbuf;
- struct r600_texture *rtex = (struct r600_texture*)zb->base.texture;
+ struct si_texture *tex = (struct si_texture*)zb->base.texture;
radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
- &rtex->buffer, RADEON_USAGE_READWRITE,
+ &tex->buffer, RADEON_USAGE_READWRITE,
zb->base.texture->nr_samples > 1 ?
RADEON_PRIO_DEPTH_BUFFER_MSAA :
RADEON_PRIO_DEPTH_BUFFER);
radeon_set_context_reg_seq(cs, R_028038_DB_Z_INFO, 10);
radeon_emit(cs, zb->db_z_info | /* DB_Z_INFO */
- S_028038_ZRANGE_PRECISION(rtex->depth_clear_value != 0));
+ S_028038_ZRANGE_PRECISION(tex->depth_clear_value != 0));
radeon_emit(cs, zb->db_stencil_info); /* DB_STENCIL_INFO */
radeon_emit(cs, zb->db_depth_base); /* DB_Z_READ_BASE */
radeon_emit(cs, S_028044_BASE_HI(zb->db_depth_base >> 32)); /* DB_Z_READ_BASE_HI */
radeon_set_context_reg_seq(cs, R_02803C_DB_DEPTH_INFO, 9);
radeon_emit(cs, zb->db_depth_info); /* DB_DEPTH_INFO */
radeon_emit(cs, zb->db_z_info | /* DB_Z_INFO */
- S_028040_ZRANGE_PRECISION(rtex->depth_clear_value != 0));
+ S_028040_ZRANGE_PRECISION(tex->depth_clear_value != 0));
radeon_emit(cs, zb->db_stencil_info); /* DB_STENCIL_INFO */
radeon_emit(cs, zb->db_depth_base); /* DB_Z_READ_BASE */
radeon_emit(cs, zb->db_stencil_base); /* DB_STENCIL_READ_BASE */
}
radeon_set_context_reg_seq(cs, R_028028_DB_STENCIL_CLEAR, 2);
- radeon_emit(cs, rtex->stencil_clear_value); /* R_028028_DB_STENCIL_CLEAR */
- radeon_emit(cs, fui(rtex->depth_clear_value)); /* R_02802C_DB_DEPTH_CLEAR */
+ radeon_emit(cs, tex->stencil_clear_value); /* R_028028_DB_STENCIL_CLEAR */
+ radeon_emit(cs, fui(tex->depth_clear_value)); /* R_02802C_DB_DEPTH_CLEAR */
radeon_set_context_reg(cs, R_028008_DB_DEPTH_VIEW, zb->db_depth_view);
radeon_set_context_reg(cs, R_028ABC_DB_HTILE_SURFACE, zb->db_htile_surface);
};
if (sctx->framebuffer.state.zsbuf) {
- struct r600_texture *zstex =
- (struct r600_texture*)sctx->framebuffer.state.zsbuf->texture;
+ struct si_texture *zstex =
+ (struct si_texture*)sctx->framebuffer.state.zsbuf->texture;
bool has_stencil = zstex->surface.has_stencil;
dsa_order_invariant = dsa->order_invariance[has_stencil];
if (!dsa_order_invariant.zs)
*/
void
si_make_texture_descriptor(struct si_screen *screen,
- struct r600_texture *tex,
+ struct si_texture *tex,
bool sampler,
enum pipe_texture_target target,
enum pipe_format pipe_format,
{
struct si_context *sctx = (struct si_context*)ctx;
struct si_sampler_view *view = CALLOC_STRUCT(si_sampler_view);
- struct r600_texture *tmp = (struct r600_texture*)texture;
+ struct si_texture *tex = (struct si_texture*)texture;
unsigned base_level, first_level, last_level;
unsigned char state_swizzle[4];
unsigned height, depth, width;
pipe_format = state->format;
/* Depth/stencil texturing sometimes needs separate texture. */
- if (tmp->is_depth && !si_can_sample_zs(tmp, view->is_stencil_sampler)) {
- if (!tmp->flushed_depth_texture &&
+ if (tex->is_depth && !si_can_sample_zs(tex, view->is_stencil_sampler)) {
+ if (!tex->flushed_depth_texture &&
!si_init_flushed_depth_texture(ctx, texture, NULL)) {
pipe_resource_reference(&view->base.texture, NULL);
FREE(view);
return NULL;
}
- assert(tmp->flushed_depth_texture);
+ assert(tex->flushed_depth_texture);
/* Override format for the case where the flushed texture
* contains only Z or only S.
*/
- if (tmp->flushed_depth_texture->buffer.b.b.format != tmp->buffer.b.b.format)
- pipe_format = tmp->flushed_depth_texture->buffer.b.b.format;
+ if (tex->flushed_depth_texture->buffer.b.b.format != tex->buffer.b.b.format)
+ pipe_format = tex->flushed_depth_texture->buffer.b.b.format;
- tmp = tmp->flushed_depth_texture;
+ tex = tex->flushed_depth_texture;
}
- surflevel = tmp->surface.u.legacy.level;
+ surflevel = tex->surface.u.legacy.level;
- if (tmp->db_compatible) {
+ if (tex->db_compatible) {
if (!view->is_stencil_sampler)
- pipe_format = tmp->db_render_format;
+ pipe_format = tex->db_render_format;
switch (pipe_format) {
case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
case PIPE_FORMAT_S8X24_UINT:
case PIPE_FORMAT_X32_S8X24_UINT:
pipe_format = PIPE_FORMAT_S8_UINT;
- surflevel = tmp->surface.u.legacy.stencil_level;
+ surflevel = tex->surface.u.legacy.stencil_level;
break;
default:;
}
state->u.tex.first_level,
state->format);
- si_make_texture_descriptor(sctx->screen, tmp, true,
+ si_make_texture_descriptor(sctx->screen, tex, true,
state->target, pipe_format, state_swizzle,
first_level, last_level,
state->u.tex.first_layer, last_layer,
struct si_screen;
struct si_shader;
struct si_shader_selector;
-struct r600_texture;
+struct si_texture;
struct si_qbo_state;
/* State atoms are callbacks which write a sequence of packets into a GPU
/* si_descriptors.c */
void si_set_mutable_tex_desc_fields(struct si_screen *sscreen,
- struct r600_texture *tex,
+ struct si_texture *tex,
const struct legacy_surf_level *base_level_info,
unsigned base_level, unsigned first_level,
unsigned block_width, bool is_stencil,
uint32_t *state);
void
si_make_texture_descriptor(struct si_screen *screen,
- struct r600_texture *tex,
+ struct si_texture *tex,
bool sampler,
enum pipe_texture_target target,
enum pipe_format pipe_format,
if (!(cb_target_enabled_4bit & (0xf << (i * 4))))
continue;
- struct r600_texture *rtex =
- (struct r600_texture*)sctx->framebuffer.state.cbufs[i]->texture;
- sum += rtex->surface.bpe;
+ struct si_texture *tex =
+ (struct si_texture*)sctx->framebuffer.state.cbufs[i]->texture;
+ sum += tex->surface.bpe;
}
/* Multiply the sum by some function of the number of samples. */
return size;
}
- struct r600_texture *rtex =
- (struct r600_texture*)sctx->framebuffer.state.zsbuf->texture;
+ struct si_texture *tex =
+ (struct si_texture*)sctx->framebuffer.state.zsbuf->texture;
unsigned depth_coeff = dsa->depth_enabled ? 5 : 0;
- unsigned stencil_coeff = rtex->surface.has_stencil &&
+ unsigned stencil_coeff = tex->surface.has_stencil &&
dsa->stencil_enabled ? 1 : 0;
unsigned sum = 4 * (depth_coeff + stencil_coeff) *
- rtex->buffer.b.b.nr_samples;
+ tex->buffer.b.b.nr_samples;
static const si_bin_size_subtable table[] = {
{
*/
for (i = 0; i < iterations; i++) {
struct pipe_resource tsrc = {}, tdst = {}, *src, *dst;
- struct r600_texture *rdst;
- struct r600_texture *rsrc;
+ struct si_texture *sdst;
+ struct si_texture *ssrc;
struct cpu_texture src_cpu, dst_cpu;
unsigned bpp, max_width, max_height, max_depth, j, num;
unsigned gfx_blits = 0, dma_blits = 0, max_tex_side_gen;
dst = screen->resource_create(screen, &tdst);
assert(src);
assert(dst);
- rdst = (struct r600_texture*)dst;
- rsrc = (struct r600_texture*)src;
+ sdst = (struct si_texture*)dst;
+ ssrc = (struct si_texture*)src;
alloc_cpu_texture(&src_cpu, &tsrc, bpp);
alloc_cpu_texture(&dst_cpu, &tdst, bpp);
printf("%4u: dst = (%5u x %5u x %u, %s), "
" src = (%5u x %5u x %u, %s), bpp = %2u, ",
i, tdst.width0, tdst.height0, tdst.array_size,
- array_mode_to_string(sscreen, &rdst->surface),
+ array_mode_to_string(sscreen, &sdst->surface),
tsrc.width0, tsrc.height0, tsrc.array_size,
- array_mode_to_string(sscreen, &rsrc->surface), bpp);
+ array_mode_to_string(sscreen, &ssrc->surface), bpp);
fflush(stdout);
/* set src pixels */
set_random_pixels(ctx, src, &src_cpu);
/* clear dst pixels */
- si_clear_buffer(sctx, dst, 0, rdst->surface.surf_size, 0, true);
+ si_clear_buffer(sctx, dst, 0, sdst->surface.surf_size, 0, true);
memset(dst_cpu.ptr, 0, dst_cpu.layer_stride * tdst.array_size);
/* preparation */
dstz = rand() % (tdst.array_size - depth + 1);
/* special code path to hit the tiled partial copies */
- if (!rsrc->surface.is_linear &&
- !rdst->surface.is_linear &&
+ if (!ssrc->surface.is_linear &&
+ !sdst->surface.is_linear &&
rand() & 1) {
if (max_width < 8 || max_height < 8)
continue;
}
/* special code path to hit out-of-bounds reads in L2T */
- if (rsrc->surface.is_linear &&
- !rdst->surface.is_linear &&
+ if (ssrc->surface.is_linear &&
+ !sdst->surface.is_linear &&
rand() % 4 == 0) {
srcx = 0;
srcy = 0;
bool si_prepare_for_dma_blit(struct si_context *sctx,
- struct r600_texture *rdst,
+ struct si_texture *dst,
unsigned dst_level, unsigned dstx,
unsigned dsty, unsigned dstz,
- struct r600_texture *rsrc,
+ struct si_texture *src,
unsigned src_level,
const struct pipe_box *src_box)
{
if (!sctx->dma_cs)
return false;
- if (rdst->surface.bpe != rsrc->surface.bpe)
+ if (dst->surface.bpe != src->surface.bpe)
return false;
/* MSAA: Blits don't exist in the real world. */
- if (rsrc->buffer.b.b.nr_samples > 1 ||
- rdst->buffer.b.b.nr_samples > 1)
+ if (src->buffer.b.b.nr_samples > 1 ||
+ dst->buffer.b.b.nr_samples > 1)
return false;
/* Depth-stencil surfaces:
* When dst is linear, the DB->CB copy preserves HTILE.
* When dst is tiled, the 3D path must be used to update HTILE.
*/
- if (rsrc->is_depth || rdst->is_depth)
+ if (src->is_depth || dst->is_depth)
return false;
/* DCC as:
* src: Use the 3D path. DCC decompression is expensive.
* dst: Use the 3D path to compress the pixels with DCC.
*/
- if (vi_dcc_enabled(rsrc, src_level) ||
- vi_dcc_enabled(rdst, dst_level))
+ if (vi_dcc_enabled(src, src_level) ||
+ vi_dcc_enabled(dst, dst_level))
return false;
/* CMASK as:
* dst: If overwriting the whole texture, discard CMASK and use
* SDMA. Otherwise, use the 3D path.
*/
- if (rdst->cmask.size && rdst->dirty_level_mask & (1 << dst_level)) {
+ if (dst->cmask.size && dst->dirty_level_mask & (1 << dst_level)) {
/* The CMASK clear is only enabled for the first level. */
assert(dst_level == 0);
- if (!util_texrange_covers_whole_level(&rdst->buffer.b.b, dst_level,
+ if (!util_texrange_covers_whole_level(&dst->buffer.b.b, dst_level,
dstx, dsty, dstz, src_box->width,
src_box->height, src_box->depth))
return false;
- si_texture_discard_cmask(sctx->screen, rdst);
+ si_texture_discard_cmask(sctx->screen, dst);
}
/* All requirements are met. Prepare textures for SDMA. */
- if (rsrc->cmask.size && rsrc->dirty_level_mask & (1 << src_level))
- sctx->b.flush_resource(&sctx->b, &rsrc->buffer.b.b);
+ if (src->cmask.size && src->dirty_level_mask & (1 << src_level))
+ sctx->b.flush_resource(&sctx->b, &src->buffer.b.b);
- assert(!(rsrc->dirty_level_mask & (1 << src_level)));
- assert(!(rdst->dirty_level_mask & (1 << dst_level)));
+ assert(!(src->dirty_level_mask & (1 << src_level)));
+ assert(!(dst->dirty_level_mask & (1 << dst_level)));
return true;
}
}
static unsigned si_texture_get_offset(struct si_screen *sscreen,
- struct r600_texture *rtex, unsigned level,
+ struct si_texture *tex, unsigned level,
const struct pipe_box *box,
unsigned *stride,
unsigned *layer_stride)
{
if (sscreen->info.chip_class >= GFX9) {
- *stride = rtex->surface.u.gfx9.surf_pitch * rtex->surface.bpe;
- *layer_stride = rtex->surface.u.gfx9.surf_slice_size;
+ *stride = tex->surface.u.gfx9.surf_pitch * tex->surface.bpe;
+ *layer_stride = tex->surface.u.gfx9.surf_slice_size;
if (!box)
return 0;
/* Each texture is an array of slices. Each slice is an array
* of mipmap levels. */
- return box->z * rtex->surface.u.gfx9.surf_slice_size +
- rtex->surface.u.gfx9.offset[level] +
- (box->y / rtex->surface.blk_h *
- rtex->surface.u.gfx9.surf_pitch +
- box->x / rtex->surface.blk_w) * rtex->surface.bpe;
+ return box->z * tex->surface.u.gfx9.surf_slice_size +
+ tex->surface.u.gfx9.offset[level] +
+ (box->y / tex->surface.blk_h *
+ tex->surface.u.gfx9.surf_pitch +
+ box->x / tex->surface.blk_w) * tex->surface.bpe;
} else {
- *stride = rtex->surface.u.legacy.level[level].nblk_x *
- rtex->surface.bpe;
- assert((uint64_t)rtex->surface.u.legacy.level[level].slice_size_dw * 4 <= UINT_MAX);
- *layer_stride = (uint64_t)rtex->surface.u.legacy.level[level].slice_size_dw * 4;
+ *stride = tex->surface.u.legacy.level[level].nblk_x *
+ tex->surface.bpe;
+ assert((uint64_t)tex->surface.u.legacy.level[level].slice_size_dw * 4 <= UINT_MAX);
+ *layer_stride = (uint64_t)tex->surface.u.legacy.level[level].slice_size_dw * 4;
if (!box)
- return rtex->surface.u.legacy.level[level].offset;
+ return tex->surface.u.legacy.level[level].offset;
/* Each texture is an array of mipmap levels. Each level is
* an array of slices. */
- return rtex->surface.u.legacy.level[level].offset +
- box->z * (uint64_t)rtex->surface.u.legacy.level[level].slice_size_dw * 4 +
- (box->y / rtex->surface.blk_h *
- rtex->surface.u.legacy.level[level].nblk_x +
- box->x / rtex->surface.blk_w) * rtex->surface.bpe;
+ return tex->surface.u.legacy.level[level].offset +
+ box->z * (uint64_t)tex->surface.u.legacy.level[level].slice_size_dw * 4 +
+ (box->y / tex->surface.blk_h *
+ tex->surface.u.legacy.level[level].nblk_x +
+ box->x / tex->surface.blk_w) * tex->surface.bpe;
}
}
}
static void si_texture_init_metadata(struct si_screen *sscreen,
- struct r600_texture *rtex,
+ struct si_texture *tex,
struct radeon_bo_metadata *metadata)
{
- struct radeon_surf *surface = &rtex->surface;
+ struct radeon_surf *surface = &tex->surface;
memset(metadata, 0, sizeof(*metadata));
}
void si_eliminate_fast_color_clear(struct si_context *sctx,
- struct r600_texture *rtex)
+ struct si_texture *tex)
{
struct si_screen *sscreen = sctx->screen;
struct pipe_context *ctx = &sctx->b;
mtx_lock(&sscreen->aux_context_lock);
unsigned n = sctx->num_decompress_calls;
- ctx->flush_resource(ctx, &rtex->buffer.b.b);
+ ctx->flush_resource(ctx, &tex->buffer.b.b);
/* Flush only if any fast clear elimination took place. */
if (n != sctx->num_decompress_calls)
}
void si_texture_discard_cmask(struct si_screen *sscreen,
- struct r600_texture *rtex)
+ struct si_texture *tex)
{
- if (!rtex->cmask.size)
+ if (!tex->cmask.size)
return;
- assert(rtex->buffer.b.b.nr_samples <= 1);
+ assert(tex->buffer.b.b.nr_samples <= 1);
/* Disable CMASK. */
- memset(&rtex->cmask, 0, sizeof(rtex->cmask));
- rtex->cmask.base_address_reg = rtex->buffer.gpu_address >> 8;
- rtex->dirty_level_mask = 0;
+ memset(&tex->cmask, 0, sizeof(tex->cmask));
+ tex->cmask.base_address_reg = tex->buffer.gpu_address >> 8;
+ tex->dirty_level_mask = 0;
- rtex->cb_color_info &= ~S_028C70_FAST_CLEAR(1);
+ tex->cb_color_info &= ~S_028C70_FAST_CLEAR(1);
- if (rtex->cmask_buffer != &rtex->buffer)
- r600_resource_reference(&rtex->cmask_buffer, NULL);
+ if (tex->cmask_buffer != &tex->buffer)
+ r600_resource_reference(&tex->cmask_buffer, NULL);
/* Notify all contexts about the change. */
p_atomic_inc(&sscreen->dirty_tex_counter);
p_atomic_inc(&sscreen->compressed_colortex_counter);
}
-static bool si_can_disable_dcc(struct r600_texture *rtex)
+static bool si_can_disable_dcc(struct si_texture *tex)
{
/* We can't disable DCC if it can be written by another process. */
- return rtex->dcc_offset &&
- (!rtex->buffer.b.is_shared ||
- !(rtex->buffer.external_usage & PIPE_HANDLE_USAGE_WRITE));
+ return tex->dcc_offset &&
+ (!tex->buffer.b.is_shared ||
+ !(tex->buffer.external_usage & PIPE_HANDLE_USAGE_WRITE));
}
static bool si_texture_discard_dcc(struct si_screen *sscreen,
- struct r600_texture *rtex)
+ struct si_texture *tex)
{
- if (!si_can_disable_dcc(rtex))
+ if (!si_can_disable_dcc(tex))
return false;
- assert(rtex->dcc_separate_buffer == NULL);
+ assert(tex->dcc_separate_buffer == NULL);
/* Disable DCC. */
- rtex->dcc_offset = 0;
+ tex->dcc_offset = 0;
/* Notify all contexts about the change. */
p_atomic_inc(&sscreen->dirty_tex_counter);
* if you don't.
*/
bool si_texture_disable_dcc(struct si_context *sctx,
- struct r600_texture *rtex)
+ struct si_texture *tex)
{
struct si_screen *sscreen = sctx->screen;
- if (!si_can_disable_dcc(rtex))
+ if (!si_can_disable_dcc(tex))
return false;
if (&sctx->b == sscreen->aux_context)
mtx_lock(&sscreen->aux_context_lock);
/* Decompress DCC. */
- si_decompress_dcc(sctx, rtex);
+ si_decompress_dcc(sctx, tex);
sctx->b.flush(&sctx->b, NULL, 0);
if (&sctx->b == sscreen->aux_context)
mtx_unlock(&sscreen->aux_context_lock);
- return si_texture_discard_dcc(sscreen, rtex);
+ return si_texture_discard_dcc(sscreen, tex);
}
static void si_reallocate_texture_inplace(struct si_context *sctx,
- struct r600_texture *rtex,
+ struct si_texture *tex,
unsigned new_bind_flag,
bool invalidate_storage)
{
struct pipe_screen *screen = sctx->b.screen;
- struct r600_texture *new_tex;
- struct pipe_resource templ = rtex->buffer.b.b;
+ struct si_texture *new_tex;
+ struct pipe_resource templ = tex->buffer.b.b;
unsigned i;
templ.bind |= new_bind_flag;
- if (rtex->buffer.b.is_shared)
+ if (tex->buffer.b.is_shared)
return;
if (new_bind_flag == PIPE_BIND_LINEAR) {
- if (rtex->surface.is_linear)
+ if (tex->surface.is_linear)
return;
/* This fails with MSAA, depth, and compressed textures. */
return;
}
- new_tex = (struct r600_texture*)screen->resource_create(screen, &templ);
+ new_tex = (struct si_texture*)screen->resource_create(screen, &templ);
if (!new_tex)
return;
util_num_layers(&templ, i), &box);
sctx->dma_copy(&sctx->b, &new_tex->buffer.b.b, i, 0, 0, 0,
- &rtex->buffer.b.b, i, &box);
+ &tex->buffer.b.b, i, &box);
}
}
if (new_bind_flag == PIPE_BIND_LINEAR) {
- si_texture_discard_cmask(sctx->screen, rtex);
- si_texture_discard_dcc(sctx->screen, rtex);
+ si_texture_discard_cmask(sctx->screen, tex);
+ si_texture_discard_dcc(sctx->screen, tex);
}
- /* Replace the structure fields of rtex. */
- rtex->buffer.b.b.bind = templ.bind;
- pb_reference(&rtex->buffer.buf, new_tex->buffer.buf);
- rtex->buffer.gpu_address = new_tex->buffer.gpu_address;
- rtex->buffer.vram_usage = new_tex->buffer.vram_usage;
- rtex->buffer.gart_usage = new_tex->buffer.gart_usage;
- rtex->buffer.bo_size = new_tex->buffer.bo_size;
- rtex->buffer.bo_alignment = new_tex->buffer.bo_alignment;
- rtex->buffer.domains = new_tex->buffer.domains;
- rtex->buffer.flags = new_tex->buffer.flags;
- rtex->size = new_tex->size;
- rtex->db_render_format = new_tex->db_render_format;
- rtex->db_compatible = new_tex->db_compatible;
- rtex->can_sample_z = new_tex->can_sample_z;
- rtex->can_sample_s = new_tex->can_sample_s;
- rtex->surface = new_tex->surface;
- rtex->fmask_offset = new_tex->fmask_offset;
- rtex->cmask = new_tex->cmask;
- rtex->cb_color_info = new_tex->cb_color_info;
- rtex->last_msaa_resolve_target_micro_mode = new_tex->last_msaa_resolve_target_micro_mode;
- rtex->htile_offset = new_tex->htile_offset;
- rtex->tc_compatible_htile = new_tex->tc_compatible_htile;
- rtex->depth_cleared = new_tex->depth_cleared;
- rtex->stencil_cleared = new_tex->stencil_cleared;
- rtex->dcc_gather_statistics = new_tex->dcc_gather_statistics;
- rtex->framebuffers_bound = new_tex->framebuffers_bound;
+ /* Replace the structure fields of tex. */
+ tex->buffer.b.b.bind = templ.bind;
+ pb_reference(&tex->buffer.buf, new_tex->buffer.buf);
+ tex->buffer.gpu_address = new_tex->buffer.gpu_address;
+ tex->buffer.vram_usage = new_tex->buffer.vram_usage;
+ tex->buffer.gart_usage = new_tex->buffer.gart_usage;
+ tex->buffer.bo_size = new_tex->buffer.bo_size;
+ tex->buffer.bo_alignment = new_tex->buffer.bo_alignment;
+ tex->buffer.domains = new_tex->buffer.domains;
+ tex->buffer.flags = new_tex->buffer.flags;
+ tex->size = new_tex->size;
+ tex->db_render_format = new_tex->db_render_format;
+ tex->db_compatible = new_tex->db_compatible;
+ tex->can_sample_z = new_tex->can_sample_z;
+ tex->can_sample_s = new_tex->can_sample_s;
+ tex->surface = new_tex->surface;
+ tex->fmask_offset = new_tex->fmask_offset;
+ tex->cmask = new_tex->cmask;
+ tex->cb_color_info = new_tex->cb_color_info;
+ tex->last_msaa_resolve_target_micro_mode = new_tex->last_msaa_resolve_target_micro_mode;
+ tex->htile_offset = new_tex->htile_offset;
+ tex->tc_compatible_htile = new_tex->tc_compatible_htile;
+ tex->depth_cleared = new_tex->depth_cleared;
+ tex->stencil_cleared = new_tex->stencil_cleared;
+ tex->dcc_gather_statistics = new_tex->dcc_gather_statistics;
+ tex->framebuffers_bound = new_tex->framebuffers_bound;
if (new_bind_flag == PIPE_BIND_LINEAR) {
- assert(!rtex->htile_offset);
- assert(!rtex->cmask.size);
- assert(!rtex->surface.fmask_size);
- assert(!rtex->dcc_offset);
- assert(!rtex->is_depth);
+ assert(!tex->htile_offset);
+ assert(!tex->cmask.size);
+ assert(!tex->surface.fmask_size);
+ assert(!tex->dcc_offset);
+ assert(!tex->is_depth);
}
- r600_texture_reference(&new_tex, NULL);
+ si_texture_reference(&new_tex, NULL);
p_atomic_inc(&sctx->screen->dirty_tex_counter);
}
}
static void si_query_opaque_metadata(struct si_screen *sscreen,
- struct r600_texture *rtex,
+ struct si_texture *tex,
struct radeon_bo_metadata *md)
{
- struct pipe_resource *res = &rtex->buffer.b.b;
+ struct pipe_resource *res = &tex->buffer.b.b;
static const unsigned char swizzle[] = {
PIPE_SWIZZLE_X,
PIPE_SWIZZLE_Y,
if (!sscreen->info.has_bo_metadata)
return;
- assert(rtex->dcc_separate_buffer == NULL);
- assert(rtex->surface.fmask_size == 0);
+ assert(tex->dcc_separate_buffer == NULL);
+ assert(tex->surface.fmask_size == 0);
/* Metadata image format format version 1:
* [0] = 1 (metadata format identifier)
/* TILE_MODE_INDEX is ambiguous without a PCI ID. */
md->metadata[1] = si_get_bo_metadata_word1(sscreen);
- si_make_texture_descriptor(sscreen, rtex, true,
+ si_make_texture_descriptor(sscreen, tex, true,
res->target, res->format,
swizzle, 0, res->last_level, 0,
is_array ? res->array_size - 1 : 0,
res->width0, res->height0, res->depth0,
desc, NULL);
- si_set_mutable_tex_desc_fields(sscreen, rtex, &rtex->surface.u.legacy.level[0],
- 0, 0, rtex->surface.blk_w, false, desc);
+ si_set_mutable_tex_desc_fields(sscreen, tex, &tex->surface.u.legacy.level[0],
+ 0, 0, tex->surface.blk_w, false, desc);
/* Clear the base address and set the relative DCC offset. */
desc[0] = 0;
desc[1] &= C_008F14_BASE_ADDRESS_HI;
- desc[7] = rtex->dcc_offset >> 8;
+ desc[7] = tex->dcc_offset >> 8;
/* Dwords [2:9] contain the image descriptor. */
memcpy(&md->metadata[2], desc, sizeof(desc));
/* Dwords [10:..] contain the mipmap level offsets. */
if (sscreen->info.chip_class <= VI) {
for (i = 0; i <= res->last_level; i++)
- md->metadata[10+i] = rtex->surface.u.legacy.level[i].offset >> 8;
+ md->metadata[10+i] = tex->surface.u.legacy.level[i].offset >> 8;
md->size_metadata += (1 + res->last_level) * 4;
}
}
static void si_apply_opaque_metadata(struct si_screen *sscreen,
- struct r600_texture *rtex,
+ struct si_texture *tex,
struct radeon_bo_metadata *md)
{
uint32_t *desc = &md->metadata[2];
md->metadata[0] != 0 &&
md->metadata[1] == si_get_bo_metadata_word1(sscreen) &&
G_008F28_COMPRESSION_EN(desc[6])) {
- rtex->dcc_offset = (uint64_t)desc[7] << 8;
+ tex->dcc_offset = (uint64_t)desc[7] << 8;
return;
}
/* Disable DCC. These are always set by texture_from_handle and must
* be cleared here.
*/
- rtex->dcc_offset = 0;
+ tex->dcc_offset = 0;
}
static boolean si_texture_get_handle(struct pipe_screen* screen,
struct si_screen *sscreen = (struct si_screen*)screen;
struct si_context *sctx;
struct r600_resource *res = r600_resource(resource);
- struct r600_texture *rtex = (struct r600_texture*)resource;
+ struct si_texture *tex = (struct si_texture*)resource;
struct radeon_bo_metadata metadata;
bool update_metadata = false;
unsigned stride, offset, slice_size;
/* This is not supported now, but it might be required for OpenCL
* interop in the future.
*/
- if (resource->nr_samples > 1 || rtex->is_depth)
+ if (resource->nr_samples > 1 || tex->is_depth)
return false;
/* Move a suballocated texture into a non-suballocated allocation. */
if (sscreen->ws->buffer_is_suballocated(res->buf) ||
- rtex->surface.tile_swizzle ||
- (rtex->buffer.flags & RADEON_FLAG_NO_INTERPROCESS_SHARING &&
+ tex->surface.tile_swizzle ||
+ (tex->buffer.flags & RADEON_FLAG_NO_INTERPROCESS_SHARING &&
sscreen->info.has_local_buffers &&
whandle->type != WINSYS_HANDLE_TYPE_KMS)) {
assert(!res->b.is_shared);
- si_reallocate_texture_inplace(sctx, rtex,
+ si_reallocate_texture_inplace(sctx, tex,
PIPE_BIND_SHARED, false);
flush = true;
assert(res->b.b.bind & PIPE_BIND_SHARED);
assert(res->flags & RADEON_FLAG_NO_SUBALLOC);
assert(!(res->flags & RADEON_FLAG_NO_INTERPROCESS_SHARING));
- assert(rtex->surface.tile_swizzle == 0);
+ assert(tex->surface.tile_swizzle == 0);
}
/* Since shader image stores don't support DCC on VI,
* disable it for external clients that want write
* access.
*/
- if (usage & PIPE_HANDLE_USAGE_WRITE && rtex->dcc_offset) {
- if (si_texture_disable_dcc(sctx, rtex)) {
+ if (usage & PIPE_HANDLE_USAGE_WRITE && tex->dcc_offset) {
+ if (si_texture_disable_dcc(sctx, tex)) {
update_metadata = true;
/* si_texture_disable_dcc flushes the context */
flush = false;
}
if (!(usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH) &&
- (rtex->cmask.size || rtex->dcc_offset)) {
+ (tex->cmask.size || tex->dcc_offset)) {
/* Eliminate fast clear (both CMASK and DCC) */
- si_eliminate_fast_color_clear(sctx, rtex);
+ si_eliminate_fast_color_clear(sctx, tex);
/* eliminate_fast_color_clear flushes the context */
flush = false;
/* Disable CMASK if flush_resource isn't going
* to be called.
*/
- if (rtex->cmask.size)
- si_texture_discard_cmask(sscreen, rtex);
+ if (tex->cmask.size)
+ si_texture_discard_cmask(sscreen, tex);
}
/* Set metadata. */
if (!res->b.is_shared || update_metadata) {
- si_texture_init_metadata(sscreen, rtex, &metadata);
- si_query_opaque_metadata(sscreen, rtex, &metadata);
+ si_texture_init_metadata(sscreen, tex, &metadata);
+ si_query_opaque_metadata(sscreen, tex, &metadata);
sscreen->ws->buffer_set_metadata(res->buf, &metadata);
}
if (sscreen->info.chip_class >= GFX9) {
- offset = rtex->surface.u.gfx9.surf_offset;
- stride = rtex->surface.u.gfx9.surf_pitch *
- rtex->surface.bpe;
- slice_size = rtex->surface.u.gfx9.surf_slice_size;
+ offset = tex->surface.u.gfx9.surf_offset;
+ stride = tex->surface.u.gfx9.surf_pitch *
+ tex->surface.bpe;
+ slice_size = tex->surface.u.gfx9.surf_slice_size;
} else {
- offset = rtex->surface.u.legacy.level[0].offset;
- stride = rtex->surface.u.legacy.level[0].nblk_x *
- rtex->surface.bpe;
- slice_size = (uint64_t)rtex->surface.u.legacy.level[0].slice_size_dw * 4;
+ offset = tex->surface.u.legacy.level[0].offset;
+ stride = tex->surface.u.legacy.level[0].nblk_x *
+ tex->surface.bpe;
+ slice_size = (uint64_t)tex->surface.u.legacy.level[0].slice_size_dw * 4;
}
} else {
/* Buffer exports are for the OpenCL interop. */
/* Move a suballocated buffer into a non-suballocated allocation. */
if (sscreen->ws->buffer_is_suballocated(res->buf) ||
/* A DMABUF export always fails if the BO is local. */
- (rtex->buffer.flags & RADEON_FLAG_NO_INTERPROCESS_SHARING &&
+ (tex->buffer.flags & RADEON_FLAG_NO_INTERPROCESS_SHARING &&
sscreen->info.has_local_buffers)) {
assert(!res->b.is_shared);
static void si_texture_destroy(struct pipe_screen *screen,
struct pipe_resource *ptex)
{
- struct r600_texture *rtex = (struct r600_texture*)ptex;
- struct r600_resource *resource = &rtex->buffer;
+ struct si_texture *tex = (struct si_texture*)ptex;
+ struct r600_resource *resource = &tex->buffer;
- r600_texture_reference(&rtex->flushed_depth_texture, NULL);
+ si_texture_reference(&tex->flushed_depth_texture, NULL);
- if (rtex->cmask_buffer != &rtex->buffer) {
- r600_resource_reference(&rtex->cmask_buffer, NULL);
+ if (tex->cmask_buffer != &tex->buffer) {
+ r600_resource_reference(&tex->cmask_buffer, NULL);
}
pb_reference(&resource->buf, NULL);
- r600_resource_reference(&rtex->dcc_separate_buffer, NULL);
- r600_resource_reference(&rtex->last_dcc_separate_buffer, NULL);
- FREE(rtex);
+ r600_resource_reference(&tex->dcc_separate_buffer, NULL);
+ r600_resource_reference(&tex->last_dcc_separate_buffer, NULL);
+ FREE(tex);
}
static const struct u_resource_vtbl si_texture_vtbl;
void si_texture_get_cmask_info(struct si_screen *sscreen,
- struct r600_texture *rtex,
+ struct si_texture *tex,
struct r600_cmask_info *out)
{
unsigned pipe_interleave_bytes = sscreen->info.pipe_interleave_bytes;
unsigned cl_width, cl_height;
if (sscreen->info.chip_class >= GFX9) {
- out->alignment = rtex->surface.u.gfx9.cmask_alignment;
- out->size = rtex->surface.u.gfx9.cmask_size;
+ out->alignment = tex->surface.u.gfx9.cmask_alignment;
+ out->size = tex->surface.u.gfx9.cmask_size;
return;
}
unsigned base_align = num_pipes * pipe_interleave_bytes;
- unsigned width = align(rtex->buffer.b.b.width0, cl_width*8);
- unsigned height = align(rtex->buffer.b.b.height0, cl_height*8);
+ unsigned width = align(tex->buffer.b.b.width0, cl_width*8);
+ unsigned height = align(tex->buffer.b.b.height0, cl_height*8);
unsigned slice_elements = (width * height) / (8*8);
/* Each element of CMASK is a nibble. */
out->slice_tile_max -= 1;
out->alignment = MAX2(256, base_align);
- out->size = util_num_layers(&rtex->buffer.b.b, 0) *
+ out->size = util_num_layers(&tex->buffer.b.b, 0) *
align(slice_bytes, base_align);
}
static void si_texture_allocate_cmask(struct si_screen *sscreen,
- struct r600_texture *rtex)
+ struct si_texture *tex)
{
- si_texture_get_cmask_info(sscreen, rtex, &rtex->cmask);
+ si_texture_get_cmask_info(sscreen, tex, &tex->cmask);
- rtex->cmask.offset = align64(rtex->size, rtex->cmask.alignment);
- rtex->size = rtex->cmask.offset + rtex->cmask.size;
+ tex->cmask.offset = align64(tex->size, tex->cmask.alignment);
+ tex->size = tex->cmask.offset + tex->cmask.size;
- rtex->cb_color_info |= S_028C70_FAST_CLEAR(1);
+ tex->cb_color_info |= S_028C70_FAST_CLEAR(1);
}
static void si_texture_get_htile_size(struct si_screen *sscreen,
- struct r600_texture *rtex)
+ struct si_texture *tex)
{
unsigned cl_width, cl_height, width, height;
unsigned slice_elements, slice_bytes, pipe_interleave_bytes, base_align;
assert(sscreen->info.chip_class <= VI);
- rtex->surface.htile_size = 0;
+ tex->surface.htile_size = 0;
- if (rtex->surface.u.legacy.level[0].mode == RADEON_SURF_MODE_1D &&
+ if (tex->surface.u.legacy.level[0].mode == RADEON_SURF_MODE_1D &&
!sscreen->info.htile_cmask_support_1d_tiling)
return;
return;
}
- width = align(rtex->buffer.b.b.width0, cl_width * 8);
- height = align(rtex->buffer.b.b.height0, cl_height * 8);
+ width = align(tex->buffer.b.b.width0, cl_width * 8);
+ height = align(tex->buffer.b.b.height0, cl_height * 8);
slice_elements = (width * height) / (8 * 8);
slice_bytes = slice_elements * 4;
pipe_interleave_bytes = sscreen->info.pipe_interleave_bytes;
base_align = num_pipes * pipe_interleave_bytes;
- rtex->surface.htile_alignment = base_align;
- rtex->surface.htile_size =
- util_num_layers(&rtex->buffer.b.b, 0) *
+ tex->surface.htile_alignment = base_align;
+ tex->surface.htile_size =
+ util_num_layers(&tex->buffer.b.b, 0) *
align(slice_bytes, base_align);
}
static void si_texture_allocate_htile(struct si_screen *sscreen,
- struct r600_texture *rtex)
+ struct si_texture *tex)
{
- if (sscreen->info.chip_class <= VI && !rtex->tc_compatible_htile)
- si_texture_get_htile_size(sscreen, rtex);
+ if (sscreen->info.chip_class <= VI && !tex->tc_compatible_htile)
+ si_texture_get_htile_size(sscreen, tex);
- if (!rtex->surface.htile_size)
+ if (!tex->surface.htile_size)
return;
- rtex->htile_offset = align(rtex->size, rtex->surface.htile_alignment);
- rtex->size = rtex->htile_offset + rtex->surface.htile_size;
+ tex->htile_offset = align(tex->size, tex->surface.htile_alignment);
+ tex->size = tex->htile_offset + tex->surface.htile_size;
}
void si_print_texture_info(struct si_screen *sscreen,
- struct r600_texture *rtex, struct u_log_context *log)
+ struct si_texture *tex, struct u_log_context *log)
{
int i;
u_log_printf(log, " Info: npix_x=%u, npix_y=%u, npix_z=%u, blk_w=%u, "
"blk_h=%u, array_size=%u, last_level=%u, "
"bpe=%u, nsamples=%u, flags=0x%x, %s\n",
- rtex->buffer.b.b.width0, rtex->buffer.b.b.height0,
- rtex->buffer.b.b.depth0, rtex->surface.blk_w,
- rtex->surface.blk_h,
- rtex->buffer.b.b.array_size, rtex->buffer.b.b.last_level,
- rtex->surface.bpe, rtex->buffer.b.b.nr_samples,
- rtex->surface.flags, util_format_short_name(rtex->buffer.b.b.format));
+ tex->buffer.b.b.width0, tex->buffer.b.b.height0,
+ tex->buffer.b.b.depth0, tex->surface.blk_w,
+ tex->surface.blk_h,
+ tex->buffer.b.b.array_size, tex->buffer.b.b.last_level,
+ tex->surface.bpe, tex->buffer.b.b.nr_samples,
+ tex->surface.flags, util_format_short_name(tex->buffer.b.b.format));
if (sscreen->info.chip_class >= GFX9) {
u_log_printf(log, " Surf: size=%"PRIu64", slice_size=%"PRIu64", "
"alignment=%u, swmode=%u, epitch=%u, pitch=%u\n",
- rtex->surface.surf_size,
- rtex->surface.u.gfx9.surf_slice_size,
- rtex->surface.surf_alignment,
- rtex->surface.u.gfx9.surf.swizzle_mode,
- rtex->surface.u.gfx9.surf.epitch,
- rtex->surface.u.gfx9.surf_pitch);
-
- if (rtex->surface.fmask_size) {
+ tex->surface.surf_size,
+ tex->surface.u.gfx9.surf_slice_size,
+ tex->surface.surf_alignment,
+ tex->surface.u.gfx9.surf.swizzle_mode,
+ tex->surface.u.gfx9.surf.epitch,
+ tex->surface.u.gfx9.surf_pitch);
+
+ if (tex->surface.fmask_size) {
u_log_printf(log, " FMASK: offset=%"PRIu64", size=%"PRIu64", "
"alignment=%u, swmode=%u, epitch=%u\n",
- rtex->fmask_offset,
- rtex->surface.fmask_size,
- rtex->surface.fmask_alignment,
- rtex->surface.u.gfx9.fmask.swizzle_mode,
- rtex->surface.u.gfx9.fmask.epitch);
+ tex->fmask_offset,
+ tex->surface.fmask_size,
+ tex->surface.fmask_alignment,
+ tex->surface.u.gfx9.fmask.swizzle_mode,
+ tex->surface.u.gfx9.fmask.epitch);
}
- if (rtex->cmask.size) {
+ if (tex->cmask.size) {
u_log_printf(log, " CMask: offset=%"PRIu64", size=%"PRIu64", "
"alignment=%u, rb_aligned=%u, pipe_aligned=%u\n",
- rtex->cmask.offset,
- rtex->surface.u.gfx9.cmask_size,
- rtex->surface.u.gfx9.cmask_alignment,
- rtex->surface.u.gfx9.cmask.rb_aligned,
- rtex->surface.u.gfx9.cmask.pipe_aligned);
+ tex->cmask.offset,
+ tex->surface.u.gfx9.cmask_size,
+ tex->surface.u.gfx9.cmask_alignment,
+ tex->surface.u.gfx9.cmask.rb_aligned,
+ tex->surface.u.gfx9.cmask.pipe_aligned);
}
- if (rtex->htile_offset) {
+ if (tex->htile_offset) {
u_log_printf(log, " HTile: offset=%"PRIu64", size=%u, alignment=%u, "
"rb_aligned=%u, pipe_aligned=%u\n",
- rtex->htile_offset,
- rtex->surface.htile_size,
- rtex->surface.htile_alignment,
- rtex->surface.u.gfx9.htile.rb_aligned,
- rtex->surface.u.gfx9.htile.pipe_aligned);
+ tex->htile_offset,
+ tex->surface.htile_size,
+ tex->surface.htile_alignment,
+ tex->surface.u.gfx9.htile.rb_aligned,
+ tex->surface.u.gfx9.htile.pipe_aligned);
}
- if (rtex->dcc_offset) {
+ if (tex->dcc_offset) {
u_log_printf(log, " DCC: offset=%"PRIu64", size=%u, "
"alignment=%u, pitch_max=%u, num_dcc_levels=%u\n",
- rtex->dcc_offset, rtex->surface.dcc_size,
- rtex->surface.dcc_alignment,
- rtex->surface.u.gfx9.dcc_pitch_max,
- rtex->surface.num_dcc_levels);
+ tex->dcc_offset, tex->surface.dcc_size,
+ tex->surface.dcc_alignment,
+ tex->surface.u.gfx9.dcc_pitch_max,
+ tex->surface.num_dcc_levels);
}
- if (rtex->surface.u.gfx9.stencil_offset) {
+ if (tex->surface.u.gfx9.stencil_offset) {
u_log_printf(log, " Stencil: offset=%"PRIu64", swmode=%u, epitch=%u\n",
- rtex->surface.u.gfx9.stencil_offset,
- rtex->surface.u.gfx9.stencil.swizzle_mode,
- rtex->surface.u.gfx9.stencil.epitch);
+ tex->surface.u.gfx9.stencil_offset,
+ tex->surface.u.gfx9.stencil.swizzle_mode,
+ tex->surface.u.gfx9.stencil.epitch);
}
return;
}
u_log_printf(log, " Layout: size=%"PRIu64", alignment=%u, bankw=%u, "
"bankh=%u, nbanks=%u, mtilea=%u, tilesplit=%u, pipeconfig=%u, scanout=%u\n",
- rtex->surface.surf_size, rtex->surface.surf_alignment, rtex->surface.u.legacy.bankw,
- rtex->surface.u.legacy.bankh, rtex->surface.u.legacy.num_banks, rtex->surface.u.legacy.mtilea,
- rtex->surface.u.legacy.tile_split, rtex->surface.u.legacy.pipe_config,
- (rtex->surface.flags & RADEON_SURF_SCANOUT) != 0);
+ tex->surface.surf_size, tex->surface.surf_alignment, tex->surface.u.legacy.bankw,
+ tex->surface.u.legacy.bankh, tex->surface.u.legacy.num_banks, tex->surface.u.legacy.mtilea,
+ tex->surface.u.legacy.tile_split, tex->surface.u.legacy.pipe_config,
+ (tex->surface.flags & RADEON_SURF_SCANOUT) != 0);
- if (rtex->surface.fmask_size)
+ if (tex->surface.fmask_size)
u_log_printf(log, " FMask: offset=%"PRIu64", size=%"PRIu64", alignment=%u, pitch_in_pixels=%u, "
"bankh=%u, slice_tile_max=%u, tile_mode_index=%u\n",
- rtex->fmask_offset, rtex->surface.fmask_size, rtex->surface.fmask_alignment,
- rtex->surface.u.legacy.fmask.pitch_in_pixels,
- rtex->surface.u.legacy.fmask.bankh,
- rtex->surface.u.legacy.fmask.slice_tile_max,
- rtex->surface.u.legacy.fmask.tiling_index);
+ tex->fmask_offset, tex->surface.fmask_size, tex->surface.fmask_alignment,
+ tex->surface.u.legacy.fmask.pitch_in_pixels,
+ tex->surface.u.legacy.fmask.bankh,
+ tex->surface.u.legacy.fmask.slice_tile_max,
+ tex->surface.u.legacy.fmask.tiling_index);
- if (rtex->cmask.size)
+ if (tex->cmask.size)
u_log_printf(log, " CMask: offset=%"PRIu64", size=%"PRIu64", alignment=%u, "
"slice_tile_max=%u\n",
- rtex->cmask.offset, rtex->cmask.size, rtex->cmask.alignment,
- rtex->cmask.slice_tile_max);
+ tex->cmask.offset, tex->cmask.size, tex->cmask.alignment,
+ tex->cmask.slice_tile_max);
- if (rtex->htile_offset)
+ if (tex->htile_offset)
u_log_printf(log, " HTile: offset=%"PRIu64", size=%u, "
"alignment=%u, TC_compatible = %u\n",
- rtex->htile_offset, rtex->surface.htile_size,
- rtex->surface.htile_alignment,
- rtex->tc_compatible_htile);
+ tex->htile_offset, tex->surface.htile_size,
+ tex->surface.htile_alignment,
+ tex->tc_compatible_htile);
- if (rtex->dcc_offset) {
+ if (tex->dcc_offset) {
u_log_printf(log, " DCC: offset=%"PRIu64", size=%u, alignment=%u\n",
- rtex->dcc_offset, rtex->surface.dcc_size,
- rtex->surface.dcc_alignment);
- for (i = 0; i <= rtex->buffer.b.b.last_level; i++)
+ tex->dcc_offset, tex->surface.dcc_size,
+ tex->surface.dcc_alignment);
+ for (i = 0; i <= tex->buffer.b.b.last_level; i++)
u_log_printf(log, " DCCLevel[%i]: enabled=%u, offset=%u, "
"fast_clear_size=%u\n",
- i, i < rtex->surface.num_dcc_levels,
- rtex->surface.u.legacy.level[i].dcc_offset,
- rtex->surface.u.legacy.level[i].dcc_fast_clear_size);
+ i, i < tex->surface.num_dcc_levels,
+ tex->surface.u.legacy.level[i].dcc_offset,
+ tex->surface.u.legacy.level[i].dcc_fast_clear_size);
}
- for (i = 0; i <= rtex->buffer.b.b.last_level; i++)
+ for (i = 0; i <= tex->buffer.b.b.last_level; i++)
u_log_printf(log, " Level[%i]: offset=%"PRIu64", slice_size=%"PRIu64", "
"npix_x=%u, npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "
"mode=%u, tiling_index = %u\n",
- i, rtex->surface.u.legacy.level[i].offset,
- (uint64_t)rtex->surface.u.legacy.level[i].slice_size_dw * 4,
- u_minify(rtex->buffer.b.b.width0, i),
- u_minify(rtex->buffer.b.b.height0, i),
- u_minify(rtex->buffer.b.b.depth0, i),
- rtex->surface.u.legacy.level[i].nblk_x,
- rtex->surface.u.legacy.level[i].nblk_y,
- rtex->surface.u.legacy.level[i].mode,
- rtex->surface.u.legacy.tiling_index[i]);
-
- if (rtex->surface.has_stencil) {
+ i, tex->surface.u.legacy.level[i].offset,
+ (uint64_t)tex->surface.u.legacy.level[i].slice_size_dw * 4,
+ u_minify(tex->buffer.b.b.width0, i),
+ u_minify(tex->buffer.b.b.height0, i),
+ u_minify(tex->buffer.b.b.depth0, i),
+ tex->surface.u.legacy.level[i].nblk_x,
+ tex->surface.u.legacy.level[i].nblk_y,
+ tex->surface.u.legacy.level[i].mode,
+ tex->surface.u.legacy.tiling_index[i]);
+
+ if (tex->surface.has_stencil) {
u_log_printf(log, " StencilLayout: tilesplit=%u\n",
- rtex->surface.u.legacy.stencil_tile_split);
- for (i = 0; i <= rtex->buffer.b.b.last_level; i++) {
+ tex->surface.u.legacy.stencil_tile_split);
+ for (i = 0; i <= tex->buffer.b.b.last_level; i++) {
u_log_printf(log, " StencilLevel[%i]: offset=%"PRIu64", "
"slice_size=%"PRIu64", npix_x=%u, "
"npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "
"mode=%u, tiling_index = %u\n",
- i, rtex->surface.u.legacy.stencil_level[i].offset,
- (uint64_t)rtex->surface.u.legacy.stencil_level[i].slice_size_dw * 4,
- u_minify(rtex->buffer.b.b.width0, i),
- u_minify(rtex->buffer.b.b.height0, i),
- u_minify(rtex->buffer.b.b.depth0, i),
- rtex->surface.u.legacy.stencil_level[i].nblk_x,
- rtex->surface.u.legacy.stencil_level[i].nblk_y,
- rtex->surface.u.legacy.stencil_level[i].mode,
- rtex->surface.u.legacy.stencil_tiling_index[i]);
+ i, tex->surface.u.legacy.stencil_level[i].offset,
+ (uint64_t)tex->surface.u.legacy.stencil_level[i].slice_size_dw * 4,
+ u_minify(tex->buffer.b.b.width0, i),
+ u_minify(tex->buffer.b.b.height0, i),
+ u_minify(tex->buffer.b.b.depth0, i),
+ tex->surface.u.legacy.stencil_level[i].nblk_x,
+ tex->surface.u.legacy.stencil_level[i].nblk_y,
+ tex->surface.u.legacy.stencil_level[i].mode,
+ tex->surface.u.legacy.stencil_tiling_index[i]);
}
}
}
-/* Common processing for r600_texture_create and r600_texture_from_handle */
-static struct r600_texture *
+/* Common processing for si_texture_create and si_texture_from_handle */
+static struct si_texture *
si_texture_create_object(struct pipe_screen *screen,
const struct pipe_resource *base,
unsigned num_color_samples,
struct pb_buffer *buf,
struct radeon_surf *surface)
{
- struct r600_texture *rtex;
+ struct si_texture *tex;
struct r600_resource *resource;
struct si_screen *sscreen = (struct si_screen*)screen;
- rtex = CALLOC_STRUCT(r600_texture);
- if (!rtex)
+ tex = CALLOC_STRUCT(si_texture);
+ if (!tex)
return NULL;
- resource = &rtex->buffer;
+ resource = &tex->buffer;
resource->b.b = *base;
resource->b.b.next = NULL;
resource->b.vtbl = &si_texture_vtbl;
resource->b.b.screen = screen;
/* don't include stencil-only formats which we don't support for rendering */
- rtex->is_depth = util_format_has_depth(util_format_description(rtex->buffer.b.b.format));
+ tex->is_depth = util_format_has_depth(util_format_description(tex->buffer.b.b.format));
- rtex->surface = *surface;
- rtex->size = rtex->surface.surf_size;
- rtex->num_color_samples = num_color_samples;
+ tex->surface = *surface;
+ tex->size = tex->surface.surf_size;
+ tex->num_color_samples = num_color_samples;
- rtex->tc_compatible_htile = rtex->surface.htile_size != 0 &&
- (rtex->surface.flags &
- RADEON_SURF_TC_COMPATIBLE_HTILE);
+ tex->tc_compatible_htile = tex->surface.htile_size != 0 &&
+ (tex->surface.flags &
+ RADEON_SURF_TC_COMPATIBLE_HTILE);
/* TC-compatible HTILE:
* - VI only supports Z32_FLOAT.
* - GFX9 only supports Z32_FLOAT and Z16_UNORM. */
- if (rtex->tc_compatible_htile) {
+ if (tex->tc_compatible_htile) {
if (sscreen->info.chip_class >= GFX9 &&
base->format == PIPE_FORMAT_Z16_UNORM)
- rtex->db_render_format = base->format;
+ tex->db_render_format = base->format;
else {
- rtex->db_render_format = PIPE_FORMAT_Z32_FLOAT;
- rtex->upgraded_depth = base->format != PIPE_FORMAT_Z32_FLOAT &&
+ tex->db_render_format = PIPE_FORMAT_Z32_FLOAT;
+ tex->upgraded_depth = base->format != PIPE_FORMAT_Z32_FLOAT &&
base->format != PIPE_FORMAT_Z32_FLOAT_S8X24_UINT;
}
} else {
- rtex->db_render_format = base->format;
+ tex->db_render_format = base->format;
}
/* Applies to GCN. */
- rtex->last_msaa_resolve_target_micro_mode = rtex->surface.micro_tile_mode;
+ tex->last_msaa_resolve_target_micro_mode = tex->surface.micro_tile_mode;
/* Disable separate DCC at the beginning. DRI2 doesn't reuse buffers
* between frames, so the only thing that can enable separate DCC
* with DRI2 is multiple slow clears within a frame.
*/
- rtex->ps_draw_ratio = 0;
+ tex->ps_draw_ratio = 0;
- if (rtex->is_depth) {
+ if (tex->is_depth) {
if (sscreen->info.chip_class >= GFX9) {
- rtex->can_sample_z = true;
- rtex->can_sample_s = true;
+ tex->can_sample_z = true;
+ tex->can_sample_s = true;
} else {
- rtex->can_sample_z = !rtex->surface.u.legacy.depth_adjusted;
- rtex->can_sample_s = !rtex->surface.u.legacy.stencil_adjusted;
+ tex->can_sample_z = !tex->surface.u.legacy.depth_adjusted;
+ tex->can_sample_s = !tex->surface.u.legacy.stencil_adjusted;
}
if (!(base->flags & (SI_RESOURCE_FLAG_TRANSFER |
SI_RESOURCE_FLAG_FLUSHED_DEPTH))) {
- rtex->db_compatible = true;
+ tex->db_compatible = true;
if (!(sscreen->debug_flags & DBG(NO_HYPERZ)))
- si_texture_allocate_htile(sscreen, rtex);
+ si_texture_allocate_htile(sscreen, tex);
}
} else {
if (base->nr_samples > 1 &&
!buf &&
!(sscreen->debug_flags & DBG(NO_FMASK))) {
/* Allocate FMASK. */
- rtex->fmask_offset = align64(rtex->size,
- rtex->surface.fmask_alignment);
- rtex->size = rtex->fmask_offset + rtex->surface.fmask_size;
+ tex->fmask_offset = align64(tex->size,
+ tex->surface.fmask_alignment);
+ tex->size = tex->fmask_offset + tex->surface.fmask_size;
- si_texture_allocate_cmask(sscreen, rtex);
- rtex->cmask_buffer = &rtex->buffer;
+ si_texture_allocate_cmask(sscreen, tex);
+ tex->cmask_buffer = &tex->buffer;
- if (!rtex->surface.fmask_size || !rtex->cmask.size) {
- FREE(rtex);
+ if (!tex->surface.fmask_size || !tex->cmask.size) {
+ FREE(tex);
return NULL;
}
}
* If it's not present, it will be disabled by
* apply_opaque_metadata later.
*/
- if (rtex->surface.dcc_size &&
+ if (tex->surface.dcc_size &&
(buf || !(sscreen->debug_flags & DBG(NO_DCC))) &&
- !(rtex->surface.flags & RADEON_SURF_SCANOUT)) {
+ !(tex->surface.flags & RADEON_SURF_SCANOUT)) {
/* Reserve space for the DCC buffer. */
- rtex->dcc_offset = align64(rtex->size, rtex->surface.dcc_alignment);
- rtex->size = rtex->dcc_offset + rtex->surface.dcc_size;
+ tex->dcc_offset = align64(tex->size, tex->surface.dcc_alignment);
+ tex->size = tex->dcc_offset + tex->surface.dcc_size;
}
}
/* Now create the backing buffer. */
if (!buf) {
- si_init_resource_fields(sscreen, resource, rtex->size,
- rtex->surface.surf_alignment);
+ si_init_resource_fields(sscreen, resource, tex->size,
+ tex->surface.surf_alignment);
if (!si_alloc_resource(sscreen, resource)) {
- FREE(rtex);
+ FREE(tex);
return NULL;
}
} else {
resource->gart_usage = buf->size;
}
- if (rtex->cmask.size) {
+ if (tex->cmask.size) {
/* Initialize the cmask to 0xCC (= compressed state). */
- si_screen_clear_buffer(sscreen, &rtex->cmask_buffer->b.b,
- rtex->cmask.offset, rtex->cmask.size,
+ si_screen_clear_buffer(sscreen, &tex->cmask_buffer->b.b,
+ tex->cmask.offset, tex->cmask.size,
0xCCCCCCCC);
}
- if (rtex->htile_offset) {
+ if (tex->htile_offset) {
uint32_t clear_value = 0;
- if (sscreen->info.chip_class >= GFX9 || rtex->tc_compatible_htile)
+ if (sscreen->info.chip_class >= GFX9 || tex->tc_compatible_htile)
clear_value = 0x0000030F;
- si_screen_clear_buffer(sscreen, &rtex->buffer.b.b,
- rtex->htile_offset,
- rtex->surface.htile_size,
+ si_screen_clear_buffer(sscreen, &tex->buffer.b.b,
+ tex->htile_offset,
+ tex->surface.htile_size,
clear_value);
}
/* Initialize DCC only if the texture is not being imported. */
- if (!buf && rtex->dcc_offset) {
- si_screen_clear_buffer(sscreen, &rtex->buffer.b.b,
- rtex->dcc_offset,
- rtex->surface.dcc_size,
+ if (!buf && tex->dcc_offset) {
+ si_screen_clear_buffer(sscreen, &tex->buffer.b.b,
+ tex->dcc_offset,
+ tex->surface.dcc_size,
0xFFFFFFFF);
}
/* Initialize the CMASK base register value. */
- rtex->cmask.base_address_reg =
- (rtex->buffer.gpu_address + rtex->cmask.offset) >> 8;
+ tex->cmask.base_address_reg =
+ (tex->buffer.gpu_address + tex->cmask.offset) >> 8;
if (sscreen->debug_flags & DBG(VM)) {
fprintf(stderr, "VM start=0x%"PRIX64" end=0x%"PRIX64" | Texture %ix%ix%i, %i levels, %i samples, %s\n",
- rtex->buffer.gpu_address,
- rtex->buffer.gpu_address + rtex->buffer.buf->size,
+ tex->buffer.gpu_address,
+ tex->buffer.gpu_address + tex->buffer.buf->size,
base->width0, base->height0, util_num_layers(base, 0), base->last_level+1,
base->nr_samples ? base->nr_samples : 1, util_format_short_name(base->format));
}
puts("Texture:");
struct u_log_context log;
u_log_context_init(&log);
- si_print_texture_info(sscreen, rtex, &log);
+ si_print_texture_info(sscreen, tex, &log);
u_log_new_page_print(&log, stdout);
fflush(stdout);
u_log_context_destroy(&log);
}
- return rtex;
+ return tex;
}
static enum radeon_surf_mode
struct radeon_surf surface = {};
int r;
struct radeon_bo_metadata metadata = {};
- struct r600_texture *rtex;
+ struct si_texture *tex;
bool is_scanout;
/* Support only 2D textures without mipmaps */
return NULL;
}
- rtex = si_texture_create_object(screen, templ, num_color_samples,
+ tex = si_texture_create_object(screen, templ, num_color_samples,
buf, &surface);
- if (!rtex)
+ if (!tex)
return NULL;
- rtex->buffer.b.is_shared = true;
- rtex->buffer.external_usage = usage;
+ tex->buffer.b.is_shared = true;
+ tex->buffer.external_usage = usage;
- si_apply_opaque_metadata(sscreen, rtex, &metadata);
+ si_apply_opaque_metadata(sscreen, tex, &metadata);
- assert(rtex->surface.tile_swizzle == 0);
- return &rtex->buffer.b.b;
+ assert(tex->surface.tile_swizzle == 0);
+ return &tex->buffer.b.b;
}
bool si_init_flushed_depth_texture(struct pipe_context *ctx,
struct pipe_resource *texture,
- struct r600_texture **staging)
+ struct si_texture **staging)
{
- struct r600_texture *rtex = (struct r600_texture*)texture;
+ struct si_texture *tex = (struct si_texture*)texture;
struct pipe_resource resource;
- struct r600_texture **flushed_depth_texture = staging ?
- staging : &rtex->flushed_depth_texture;
+ struct si_texture **flushed_depth_texture = staging ?
+ staging : &tex->flushed_depth_texture;
enum pipe_format pipe_format = texture->format;
if (!staging) {
- if (rtex->flushed_depth_texture)
+ if (tex->flushed_depth_texture)
return true; /* it's ready */
- if (!rtex->can_sample_z && rtex->can_sample_s) {
+ if (!tex->can_sample_z && tex->can_sample_s) {
switch (pipe_format) {
case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
/* Save memory by not allocating the S plane. */
break;
default:;
}
- } else if (!rtex->can_sample_s && rtex->can_sample_z) {
+ } else if (!tex->can_sample_s && tex->can_sample_z) {
assert(util_format_has_stencil(util_format_description(pipe_format)));
/* DB->CB copies to an 8bpp surface don't work. */
if (staging)
resource.flags |= SI_RESOURCE_FLAG_TRANSFER;
- *flushed_depth_texture = (struct r600_texture *)ctx->screen->resource_create(ctx->screen, &resource);
+ *flushed_depth_texture = (struct si_texture *)ctx->screen->resource_create(ctx->screen, &resource);
if (*flushed_depth_texture == NULL) {
PRINT_ERR("failed to create temporary texture to hold flushed depth\n");
return false;
}
static bool si_can_invalidate_texture(struct si_screen *sscreen,
- struct r600_texture *rtex,
+ struct si_texture *tex,
unsigned transfer_usage,
const struct pipe_box *box)
{
- return !rtex->buffer.b.is_shared &&
+ return !tex->buffer.b.is_shared &&
!(transfer_usage & PIPE_TRANSFER_READ) &&
- rtex->buffer.b.b.last_level == 0 &&
- util_texrange_covers_whole_level(&rtex->buffer.b.b, 0,
+ tex->buffer.b.b.last_level == 0 &&
+ util_texrange_covers_whole_level(&tex->buffer.b.b, 0,
box->x, box->y, box->z,
box->width, box->height,
box->depth);
}
static void si_texture_invalidate_storage(struct si_context *sctx,
- struct r600_texture *rtex)
+ struct si_texture *tex)
{
struct si_screen *sscreen = sctx->screen;
/* There is no point in discarding depth and tiled buffers. */
- assert(!rtex->is_depth);
- assert(rtex->surface.is_linear);
+ assert(!tex->is_depth);
+ assert(tex->surface.is_linear);
/* Reallocate the buffer in the same pipe_resource. */
- si_alloc_resource(sscreen, &rtex->buffer);
+ si_alloc_resource(sscreen, &tex->buffer);
/* Initialize the CMASK base address (needed even without CMASK). */
- rtex->cmask.base_address_reg =
- (rtex->buffer.gpu_address + rtex->cmask.offset) >> 8;
+ tex->cmask.base_address_reg =
+ (tex->buffer.gpu_address + tex->cmask.offset) >> 8;
p_atomic_inc(&sscreen->dirty_tex_counter);
- sctx->num_alloc_tex_transfer_bytes += rtex->size;
+ sctx->num_alloc_tex_transfer_bytes += tex->size;
}
static void *si_texture_transfer_map(struct pipe_context *ctx,
struct pipe_transfer **ptransfer)
{
struct si_context *sctx = (struct si_context*)ctx;
- struct r600_texture *rtex = (struct r600_texture*)texture;
+ struct si_texture *tex = (struct si_texture*)texture;
struct r600_transfer *trans;
struct r600_resource *buf;
unsigned offset = 0;
assert(box->width && box->height && box->depth);
/* Depth textures use staging unconditionally. */
- if (!rtex->is_depth) {
+ if (!tex->is_depth) {
/* Degrade the tile mode if we get too many transfers on APUs.
* On dGPUs, the staging texture is always faster.
* Only count uploads that are at least 4x4 pixels large.
if (!sctx->screen->info.has_dedicated_vram &&
level == 0 &&
box->width >= 4 && box->height >= 4 &&
- p_atomic_inc_return(&rtex->num_level0_transfers) == 10) {
+ p_atomic_inc_return(&tex->num_level0_transfers) == 10) {
bool can_invalidate =
- si_can_invalidate_texture(sctx->screen, rtex,
+ si_can_invalidate_texture(sctx->screen, tex,
usage, box);
- si_reallocate_texture_inplace(sctx, rtex,
+ si_reallocate_texture_inplace(sctx, tex,
PIPE_BIND_LINEAR,
can_invalidate);
}
* Use the staging texture for uploads if the underlying BO
* is busy.
*/
- if (!rtex->surface.is_linear)
+ if (!tex->surface.is_linear)
use_staging_texture = true;
else if (usage & PIPE_TRANSFER_READ)
use_staging_texture =
- rtex->buffer.domains & RADEON_DOMAIN_VRAM ||
- rtex->buffer.flags & RADEON_FLAG_GTT_WC;
+ tex->buffer.domains & RADEON_DOMAIN_VRAM ||
+ tex->buffer.flags & RADEON_FLAG_GTT_WC;
/* Write & linear only: */
- else if (si_rings_is_buffer_referenced(sctx, rtex->buffer.buf,
+ else if (si_rings_is_buffer_referenced(sctx, tex->buffer.buf,
RADEON_USAGE_READWRITE) ||
- !sctx->ws->buffer_wait(rtex->buffer.buf, 0,
+ !sctx->ws->buffer_wait(tex->buffer.buf, 0,
RADEON_USAGE_READWRITE)) {
/* It's busy. */
- if (si_can_invalidate_texture(sctx->screen, rtex,
+ if (si_can_invalidate_texture(sctx->screen, tex,
usage, box))
- si_texture_invalidate_storage(sctx, rtex);
+ si_texture_invalidate_storage(sctx, tex);
else
use_staging_texture = true;
}
trans->b.b.usage = usage;
trans->b.b.box = *box;
- if (rtex->is_depth) {
- struct r600_texture *staging_depth;
+ if (tex->is_depth) {
+ struct si_texture *staging_depth;
- if (rtex->buffer.b.b.nr_samples > 1) {
+ if (tex->buffer.b.b.nr_samples > 1) {
/* MSAA depth buffers need to be converted to single sample buffers.
*
* Mapping MSAA depth buffers can occur if ReadPixels is called
}
si_copy_region_with_blit(ctx, temp, 0, 0, 0, 0, texture, level, box);
- si_blit_decompress_depth(ctx, (struct r600_texture*)temp, staging_depth,
+ si_blit_decompress_depth(ctx, (struct si_texture*)temp, staging_depth,
0, 0, 0, box->depth, 0, 0);
pipe_resource_reference(&temp, NULL);
}
goto fail_trans;
}
- si_blit_decompress_depth(ctx, rtex, staging_depth,
+ si_blit_decompress_depth(ctx, tex, staging_depth,
level, level,
box->z, box->z + box->depth - 1,
0, 0);
buf = trans->staging;
} else if (use_staging_texture) {
struct pipe_resource resource;
- struct r600_texture *staging;
+ struct si_texture *staging;
si_init_temp_resource_from_box(&resource, texture, box, level,
SI_RESOURCE_FLAG_TRANSFER);
PIPE_USAGE_STAGING : PIPE_USAGE_STREAM;
/* Create the temporary texture. */
- staging = (struct r600_texture*)ctx->screen->resource_create(ctx->screen, &resource);
+ staging = (struct si_texture*)ctx->screen->resource_create(ctx->screen, &resource);
if (!staging) {
PRINT_ERR("failed to create temporary texture to hold untiled copy\n");
goto fail_trans;
buf = trans->staging;
} else {
/* the resource is mapped directly */
- offset = si_texture_get_offset(sctx->screen, rtex, level, box,
+ offset = si_texture_get_offset(sctx->screen, tex, level, box,
&trans->b.b.stride,
&trans->b.b.layer_stride);
- buf = &rtex->buffer;
+ buf = &tex->buffer;
}
if (!(map = si_buffer_map_sync_with_rings(sctx, buf, usage)))
struct si_context *sctx = (struct si_context*)ctx;
struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
struct pipe_resource *texture = transfer->resource;
- struct r600_texture *rtex = (struct r600_texture*)texture;
+ struct si_texture *tex = (struct si_texture*)texture;
if ((transfer->usage & PIPE_TRANSFER_WRITE) && rtransfer->staging) {
- if (rtex->is_depth && rtex->buffer.b.b.nr_samples <= 1) {
+ if (tex->is_depth && tex->buffer.b.b.nr_samples <= 1) {
ctx->resource_copy_region(ctx, texture, transfer->level,
transfer->box.x, transfer->box.y, transfer->box.z,
&rtransfer->staging->b.b, transfer->level,
unsigned level,
enum pipe_format view_format)
{
- struct r600_texture *rtex = (struct r600_texture *)tex;
+ struct si_texture *stex = (struct si_texture *)tex;
- return vi_dcc_enabled(rtex, level) &&
+ return vi_dcc_enabled(stex, level) &&
!vi_dcc_formats_compatible(tex->format, view_format);
}
unsigned level,
enum pipe_format view_format)
{
- struct r600_texture *rtex = (struct r600_texture *)tex;
+ struct si_texture *stex = (struct si_texture *)tex;
if (vi_dcc_formats_are_incompatible(tex, level, view_format))
- if (!si_texture_disable_dcc(sctx, (struct r600_texture*)tex))
- si_decompress_dcc(sctx, rtex);
+ if (!si_texture_disable_dcc(sctx, stex))
+ si_decompress_dcc(sctx, stex);
}
struct pipe_surface *si_create_surface_custom(struct pipe_context *pipe,
sctx->dcc_stats[slot].ps_stats[i] = NULL;
}
- r600_texture_reference(&sctx->dcc_stats[slot].tex, NULL);
+ si_texture_reference(&sctx->dcc_stats[slot].tex, NULL);
}
/**
* Return the per-context slot where DCC statistics queries for the texture live.
*/
static unsigned vi_get_context_dcc_stats_index(struct si_context *sctx,
- struct r600_texture *tex)
+ struct si_texture *tex)
{
int i, empty_slot = -1;
}
/* Add the texture to the new slot. */
- r600_texture_reference(&sctx->dcc_stats[empty_slot].tex, tex);
+ si_texture_reference(&sctx->dcc_stats[empty_slot].tex, tex);
sctx->dcc_stats[empty_slot].last_use_timestamp = os_time_get();
return empty_slot;
}
* Called when binding a color buffer.
*/
void vi_separate_dcc_start_query(struct si_context *sctx,
- struct r600_texture *tex)
+ struct si_texture *tex)
{
unsigned i = vi_get_context_dcc_stats_index(sctx, tex);
* Called when unbinding a color buffer.
*/
void vi_separate_dcc_stop_query(struct si_context *sctx,
- struct r600_texture *tex)
+ struct si_texture *tex)
{
unsigned i = vi_get_context_dcc_stats_index(sctx, tex);
sctx->dcc_stats[i].query_active = false;
}
-static bool vi_should_enable_separate_dcc(struct r600_texture *tex)
+static bool vi_should_enable_separate_dcc(struct si_texture *tex)
{
/* The minimum number of fullscreen draws per frame that is required
* to enable DCC. */
/* Called by fast clear. */
void vi_separate_dcc_try_enable(struct si_context *sctx,
- struct r600_texture *tex)
+ struct si_texture *tex)
{
/* The intent is to use this with shared displayable back buffers,
* but it's not strictly limited only to them.
* takes place.
*/
void vi_separate_dcc_process_and_reset_stats(struct pipe_context *ctx,
- struct r600_texture *tex)
+ struct si_texture *tex)
{
struct si_context *sctx = (struct si_context*)ctx;
struct pipe_query *tmp;
int r;
struct si_screen *sscreen = (struct si_screen*)screen;
struct r600_memory_object *memobj = (struct r600_memory_object *)_memobj;
- struct r600_texture *rtex;
+ struct si_texture *tex;
struct radeon_surf surface = {};
struct radeon_bo_metadata metadata = {};
enum radeon_surf_mode array_mode;
if (r)
return NULL;
- rtex = si_texture_create_object(screen, templ, num_color_samples,
+ tex = si_texture_create_object(screen, templ, num_color_samples,
memobj->buf, &surface);
- if (!rtex)
+ if (!tex)
return NULL;
- /* r600_texture_create_object doesn't increment refcount of
+ /* si_texture_create_object doesn't increment refcount of
* memobj->buf, so increment it here.
*/
pb_reference(&buf, memobj->buf);
- rtex->buffer.b.is_shared = true;
- rtex->buffer.external_usage = PIPE_HANDLE_USAGE_READ_WRITE;
+ tex->buffer.b.is_shared = true;
+ tex->buffer.external_usage = PIPE_HANDLE_USAGE_READ_WRITE;
- si_apply_opaque_metadata(sscreen, rtex, &metadata);
+ si_apply_opaque_metadata(sscreen, tex, &metadata);
- return &rtex->buffer.b.b;
+ return &tex->buffer.b.b;
}
static bool si_check_resource_capability(struct pipe_screen *screen,
struct pipe_resource *resource,
unsigned bind)
{
- struct r600_texture *tex = (struct r600_texture*)resource;
+ struct si_texture *tex = (struct si_texture*)resource;
/* Buffers only support the linear flag. */
if (resource->target == PIPE_BUFFER)
const struct pipe_video_buffer *tmpl)
{
struct si_context *ctx = (struct si_context *)pipe;
- struct r600_texture *resources[VL_NUM_COMPONENTS] = {};
+ struct si_texture *resources[VL_NUM_COMPONENTS] = {};
struct radeon_surf *surfaces[VL_NUM_COMPONENTS] = {};
struct pb_buffer **pbs[VL_NUM_COMPONENTS] = {};
const enum pipe_format *resource_formats;
vl_video_buffer_template(&templ, &vidtemplate,
resource_formats[i], 1,
array_size, PIPE_USAGE_DEFAULT, i);
- /* Set PIPE_BIND_SHARED to avoid reallocation in r600_texture_get_handle,
+ /* Set PIPE_BIND_SHARED to avoid reallocation in si_texture_get_handle,
* which can't handle joined surfaces. */
/* TODO: get tiling working */
templ.bind = PIPE_BIND_LINEAR | PIPE_BIND_SHARED;
- resources[i] = (struct r600_texture *)
+ resources[i] = (struct si_texture *)
pipe->screen->resource_create(pipe->screen, &templ);
if (!resources[i])
goto error;
error:
for (i = 0; i < VL_NUM_COMPONENTS; ++i)
- r600_texture_reference(&resources[i], NULL);
+ si_texture_reference(&resources[i], NULL);
return NULL;
}
static struct pb_buffer* si_uvd_set_dtb(struct ruvd_msg *msg, struct vl_video_buffer *buf)
{
struct si_screen *sscreen = (struct si_screen*)buf->base.context->screen;
- struct r600_texture *luma = (struct r600_texture *)buf->resources[0];
- struct r600_texture *chroma = (struct r600_texture *)buf->resources[1];
+ struct si_texture *luma = (struct si_texture *)buf->resources[0];
+ struct si_texture *chroma = (struct si_texture *)buf->resources[1];
enum ruvd_surface_type type = (sscreen->info.chip_class >= GFX9) ?
RUVD_SURFACE_TYPE_GFX9 :
RUVD_SURFACE_TYPE_LEGACY;
struct pb_buffer **handle,
struct radeon_surf **surface)
{
- struct r600_texture *res = (struct r600_texture *)resource;
+ struct si_texture *res = (struct si_texture *)resource;
if (handle)
*handle = res->buffer.buf;