if (res->b.b.flags & SI_RESOURCE_FLAG_DRIVER_INTERNAL)
res->flags |= RADEON_FLAG_DRIVER_INTERNAL;
+ if (res->b.b.flags & PIPE_RESOURCE_FLAG_SPARSE)
+ res->flags |= RADEON_FLAG_SPARSE;
+
/* For higher throughput and lower latency over PCIe assuming sequential access.
* Only CP DMA and optimized compute benefit from this.
* GFX8 and older don't support RADEON_FLAG_UNCACHED.
si_init_resource_fields(sscreen, buf, templ->width0, alignment);
- if (templ->flags & PIPE_RESOURCE_FLAG_SPARSE)
- buf->flags |= RADEON_FLAG_SPARSE;
-
buf->b.buffer_id_unique = util_idalloc_mt_alloc(&sscreen->buffer_ids);
if (!si_alloc_resource(sscreen, buf)) {
surface->u.gfx9.swizzle_mode = ADDR_SW_64KB_R_X;
}
+ if (ptex->flags & PIPE_RESOURCE_FLAG_SPARSE) {
+ flags |=
+ RADEON_SURF_PRT |
+ RADEON_SURF_NO_FMASK |
+ RADEON_SURF_NO_HTILE |
+ RADEON_SURF_DISABLE_DCC;
+ }
+
surface->modifier = modifier;
r = sscreen->ws->surface_init(sscreen->ws, ptex, flags, bpe, array_mode, surface);
radeon_bo_reference(sscreen->ws, &resource->buf, plane0->buffer.buf);
resource->gpu_address = plane0->buffer.gpu_address;
} else if (!(surface->flags & RADEON_SURF_IMPORTED)) {
+ if (base->flags & PIPE_RESOURCE_FLAG_SPARSE)
+ resource->b.b.flags |= SI_RESOURCE_FLAG_UNMAPPABLE;
+
/* Create the backing buffer. */
si_init_resource_fields(sscreen, resource, alloc_size, alignment);
is_flushed_depth, tc_compatible_htile))
return NULL;
+ plane_templ[i].nr_sparse_levels = surface[i].first_mip_tail_level;
+
plane_offset[i] = align64(total_size, 1 << surface[i].surf_alignment_log2);
total_size = plane_offset[i] + surface[i].total_size;
max_alignment = MAX2(max_alignment, 1 << surface[i].surf_alignment_log2);