/**
* Secure context
*/
- bool (*ws_is_secure)(struct radeon_winsys *ws);
+ bool (*ws_uses_secure_bo)(struct radeon_winsys *ws);
bool (*cs_is_secure)(struct radeon_cmdbuf *cs);
void (*cs_set_secure)(struct radeon_cmdbuf *cs, bool secure);
};
else
res->flags |= RADEON_FLAG_NO_INTERPROCESS_SHARING;
- if (sscreen->ws->ws_is_secure(sscreen->ws)) {
- if (res->b.b.bind & (PIPE_BIND_SCANOUT | PIPE_BIND_DEPTH_STENCIL))
- res->flags |= RADEON_FLAG_ENCRYPTED;
- if (res->b.b.flags & PIPE_RESOURCE_FLAG_ENCRYPTED)
- res->flags |= RADEON_FLAG_ENCRYPTED;
- }
+ /* Force scanout/depth/stencil buffer allocation to be encrypted */
+ if (sscreen->debug_flags & DBG(TMZ) &&
+ res->b.b.bind & (PIPE_BIND_SCANOUT | PIPE_BIND_DEPTH_STENCIL))
+ res->flags |= RADEON_FLAG_ENCRYPTED;
+
+ if (res->b.b.flags & PIPE_RESOURCE_FLAG_ENCRYPTED)
+ res->flags |= RADEON_FLAG_ENCRYPTED;
if (sscreen->debug_flags & DBG(NO_WC))
res->flags &= ~RADEON_FLAG_GTT_WC;
si_need_gfx_cs_space(sctx);
/* If we're using a secure context, determine if cs must be secure or not */
- if (unlikely(sctx->ws->ws_is_secure(sctx->ws))) {
+ if (unlikely(sctx->ws->ws_uses_secure_bo(sctx->ws))) {
bool secure = si_compute_resources_check_encrypted(sctx);
if (secure != sctx->ws->cs_is_secure(sctx->gfx_cs)) {
si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
}
/* TMZ handling */
- if (unlikely(sctx->ws->ws_is_secure(sctx->ws) &&
+ if (unlikely(sctx->ws->ws_uses_secure_bo(sctx->ws) &&
!(user_flags & SI_CPDMA_SKIP_TMZ))) {
bool secure = src && (si_resource(src)->flags & RADEON_FLAG_ENCRYPTED);
assert(!secure || (!dst || (si_resource(dst)->flags & RADEON_FLAG_ENCRYPTED)));
assert(size % 4 == 0);
if (!cs || dst->flags & PIPE_RESOURCE_FLAG_SPARSE ||
- sctx->screen->debug_flags & DBG(NO_SDMA_CLEARS) || sctx->ws->ws_is_secure(sctx->ws)) {
+ sctx->screen->debug_flags & DBG(NO_SDMA_CLEARS) ||
+ sctx->ws->ws_uses_secure_bo(sctx->ws)) {
sctx->b.clear_buffer(&sctx->b, dst, offset, size, &clear_value, 4);
return;
}
si_flush_gfx_cs(ctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
bool use_secure_cmd = false;
- /* if TMZ is supported and enabled */
- if (ctx->ws->ws_is_secure(ctx->ws)) {
+ if (unlikely(ctx->ws->ws_uses_secure_bo(ctx->ws))) {
if (src && src->flags & RADEON_FLAG_ENCRYPTED) {
assert(!dst || (dst->flags & RADEON_FLAG_ENCRYPTED));
use_secure_cmd = true;
{"nodccmsaa", DBG(NO_DCC_MSAA), "Disable DCC for MSAA"},
{"nofmask", DBG(NO_FMASK), "Disable MSAA compression"},
+ {"tmz", DBG(TMZ), "Force allocation of scanout/depth/stencil buffer as encrypted"},
+
DEBUG_NAMED_VALUE_END /* must be last */
};
if (sscreen->debug_flags & DBG(NO_GFX))
sscreen->info.has_graphics = false;
+ if ((sscreen->debug_flags & DBG(TMZ)) &&
+ !sscreen->info.has_tmz_support) {
+ fprintf(stderr, "radeonsi: requesting TMZ features but TMZ is not supported\n");
+ FREE(sscreen);
+ return NULL;
+ }
+
+
/* Set functions first. */
sscreen->b.context_create = si_pipe_create_context;
sscreen->b.destroy = si_destroy_screen;
DBG_NO_DCC_MSAA,
DBG_NO_FMASK,
+ DBG_TMZ,
+
DBG_COUNT
};
si_need_gfx_cs_space(sctx);
/* If we're using a secure context, determine if cs must be secure or not */
- if (unlikely(sctx->ws->ws_is_secure(sctx->ws))) {
+ if (unlikely(sctx->ws->ws_uses_secure_bo(sctx->ws))) {
bool secure = si_gfx_resources_check_encrypted(sctx);
if (secure != sctx->ws->cs_is_secure(sctx->gfx_cs)) {
si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
{
for (unsigned i = 0; i < NUM_SLAB_ALLOCATORS; i++) {
pb_slabs_reclaim(&ws->bo_slabs[i]);
- if (ws->secure)
- pb_slabs_reclaim(&ws->bo_slabs_encrypted[i]);
+ if (ws->info.has_tmz_support)
+ pb_slabs_reclaim(&ws->bo_slabs_encrypted[i]);
}
pb_cache_release_all_buffers(&ws->bo_cache);
if (ws->zero_all_vram_allocs &&
(request.preferred_heap & AMDGPU_GEM_DOMAIN_VRAM))
request.flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
- if ((flags & RADEON_FLAG_ENCRYPTED) && ws->secure)
+ if ((flags & RADEON_FLAG_ENCRYPTED) &&
+ ws->info.has_tmz_support) {
request.flags |= AMDGPU_GEM_CREATE_ENCRYPTED;
+ if (!(flags & RADEON_FLAG_DRIVER_INTERNAL))
+ ws->uses_secure_bos = true;
+ }
+
r = amdgpu_bo_alloc(ws->dev, &request, &buf_handle);
if (r) {
fprintf(stderr, "amdgpu: Failed to allocate a buffer:\n");
static struct pb_slabs *get_slabs(struct amdgpu_winsys *ws, uint64_t size,
enum radeon_bo_flag flags)
{
- struct pb_slabs *bo_slabs = ((flags & RADEON_FLAG_ENCRYPTED) && ws->secure) ?
+ struct pb_slabs *bo_slabs = ((flags & RADEON_FLAG_ENCRYPTED) && ws->info.has_tmz_support) ?
ws->bo_slabs_encrypted : ws->bo_slabs;
/* Find the correct slab allocator for the given size. */
for (unsigned i = 0; i < NUM_SLAB_ALLOCATORS; i++) {
if (encrypted)
flags |= RADEON_FLAG_ENCRYPTED;
- struct pb_slabs *slabs = (flags & RADEON_FLAG_ENCRYPTED && ws->secure) ?
+ struct pb_slabs *slabs = ((flags & RADEON_FLAG_ENCRYPTED) && ws->info.has_tmz_support) ?
ws->bo_slabs_encrypted : ws->bo_slabs;
/* Determine the slab buffer size. */
/* Sparse buffers must have NO_CPU_ACCESS set. */
assert(!(flags & RADEON_FLAG_SPARSE) || flags & RADEON_FLAG_NO_CPU_ACCESS);
- struct pb_slabs *slabs = (flags & RADEON_FLAG_ENCRYPTED && ws->secure) ?
+ struct pb_slabs *slabs = ((flags & RADEON_FLAG_ENCRYPTED) && ws->info.has_tmz_support) ?
ws->bo_slabs_encrypted : ws->bo_slabs;
struct pb_slabs *last_slab = &slabs[NUM_SLAB_ALLOCATORS - 1];
unsigned max_slab_entry_size = 1 << (last_slab->min_order + last_slab->num_orders - 1);
chunks[num_chunks].chunk_data = (uintptr_t)&cs->ib[IB_MAIN];
num_chunks++;
- if (ws->secure && cs->secure) {
+ if (cs->secure) {
+ assert(ws->uses_secure_bos);
cs->ib[IB_PREAMBLE].flags |= AMDGPU_IB_FLAGS_SECURE;
cs->ib[IB_MAIN].flags |= AMDGPU_IB_FLAGS_SECURE;
} else {
ws->zero_all_vram_allocs = strstr(debug_get_option("R600_DEBUG", ""), "zerovram") != NULL ||
strstr(debug_get_option("AMD_DEBUG", ""), "zerovram") != NULL ||
driQueryOptionb(config->options, "radeonsi_zerovram");
- ws->secure = strstr(debug_get_option("AMD_DEBUG", ""), "tmz");
-
- if (ws->secure) {
- fprintf(stderr, "=== TMZ usage enabled ===\n");
- }
return true;
return a == b;
}
-static bool amdgpu_ws_is_secure(struct radeon_winsys *rws)
+static bool amdgpu_ws_uses_secure_bo(struct radeon_winsys *rws)
{
struct amdgpu_winsys *ws = amdgpu_winsys(rws);
- return ws->secure;
+ return ws->uses_secure_bos;
}
static bool amdgpu_cs_is_secure(struct radeon_cmdbuf *rcs)
return NULL;
}
- if (aws->secure && !pb_slabs_init(&aws->bo_slabs_encrypted[i],
- min_order, max_order,
- RADEON_MAX_SLAB_HEAPS,
- aws,
- amdgpu_bo_can_reclaim_slab,
- amdgpu_bo_slab_alloc_encrypted,
- amdgpu_bo_slab_free)) {
+ if (aws->info.has_tmz_support &&
+ !pb_slabs_init(&aws->bo_slabs_encrypted[i],
+ min_order, max_order,
+ RADEON_MAX_SLAB_HEAPS,
+ aws,
+ amdgpu_bo_can_reclaim_slab,
+ amdgpu_bo_slab_alloc_encrypted,
+ amdgpu_bo_slab_free)) {
amdgpu_winsys_destroy(&ws->base);
simple_mtx_unlock(&dev_tab_mutex);
return NULL;
ws->base.query_value = amdgpu_query_value;
ws->base.read_registers = amdgpu_read_registers;
ws->base.pin_threads_to_L3_cache = amdgpu_pin_threads_to_L3_cache;
- ws->base.ws_is_secure = amdgpu_ws_is_secure;
+ ws->base.ws_uses_secure_bo = amdgpu_ws_uses_secure_bo;
ws->base.cs_is_secure = amdgpu_cs_is_secure;
ws->base.cs_set_secure = amdgpu_cs_set_secure;
bool debug_all_bos;
bool reserve_vmid;
bool zero_all_vram_allocs;
- bool secure;
+ bool uses_secure_bos;
/* List of all allocated buffers */
simple_mtx_t global_bo_list_lock;
}
}
-static bool radeon_ws_is_secure(struct radeon_winsys* ws)
+static bool radeon_ws_uses_secure_bo(struct radeon_winsys* ws)
{
return false;
}
ws->base.cs_request_feature = radeon_cs_request_feature;
ws->base.query_value = radeon_query_value;
ws->base.read_registers = radeon_read_registers;
- ws->base.ws_is_secure = radeon_ws_is_secure;
- ws->base.cs_is_secure = radeon_cs_is_secure;
- ws->base.cs_set_secure = radeon_cs_set_secure;
+ ws->base.ws_uses_secure_bo = radeon_ws_uses_secure_bo;
+ ws->base.cs_is_secure = radeon_cs_is_secure;
+ ws->base.cs_set_secure = radeon_cs_set_secure;
radeon_drm_bo_init_functions(ws);
radeon_drm_cs_init_functions(ws);