* dispatches from the current IB to finish. */
#define RADEON_FLUSH_START_NEXT_GFX_IB_NOW (1u << 31)
+/* Toggle the secure submission boolean after the flush */
+#define RADEON_FLUSH_TOGGLE_SECURE_SUBMISSION (1u << 30)
+
#define RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW \
(PIPE_FLUSH_ASYNC | RADEON_FLUSH_START_NEXT_GFX_IB_NOW)
*/
bool (*ws_uses_secure_bo)(struct radeon_winsys *ws);
bool (*cs_is_secure)(struct radeon_cmdbuf *cs);
- void (*cs_set_secure)(struct radeon_cmdbuf *cs, bool secure);
};
static inline bool radeon_emitted(struct radeon_cmdbuf *cs, unsigned num_dw)
if (unlikely(sctx->ws->ws_uses_secure_bo(sctx->ws))) {
bool secure = si_compute_resources_check_encrypted(sctx);
if (secure != sctx->ws->cs_is_secure(sctx->gfx_cs)) {
- si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
- sctx->ws->cs_set_secure(sctx->gfx_cs, secure);
+ si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW |
+ RADEON_FLUSH_TOGGLE_SECURE_SUBMISSION,
+ NULL);
}
}
bool secure = src && (si_resource(src)->flags & RADEON_FLAG_ENCRYPTED);
assert(!secure || (!dst || (si_resource(dst)->flags & RADEON_FLAG_ENCRYPTED)));
if (secure != sctx->ws->cs_is_secure(sctx->gfx_cs)) {
- si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
- sctx->ws->cs_set_secure(sctx->gfx_cs, secure);
+ si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW |
+ RADEON_FLUSH_TOGGLE_SECURE_SUBMISSION, NULL);
}
}
if (!cs || dst->flags & PIPE_RESOURCE_FLAG_SPARSE ||
sctx->screen->debug_flags & DBG(NO_SDMA_CLEARS) ||
- sctx->ws->ws_uses_secure_bo(sctx->ws)) {
+ unlikely(sctx->ws->ws_uses_secure_bo(sctx->ws))) {
sctx->b.clear_buffer(&sctx->b, dst, offset, size, &clear_value, 4);
return;
}
!ws->cs_check_space(ctx->sdma_cs, num_dw, false) ||
ctx->sdma_cs->used_vram + ctx->sdma_cs->used_gart > 64 * 1024 * 1024 ||
!radeon_cs_memory_below_limit(ctx->screen, ctx->sdma_cs, vram, gtt))) {
- si_flush_dma_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
+ si_flush_dma_cs(ctx, PIPE_FLUSH_ASYNC | RADEON_FLUSH_TOGGLE_SECURE_SUBMISSION, NULL);
+ assert(ctx->ws->cs_is_secure(ctx->sdma_cs) == use_secure_cmd);
assert((num_dw + ctx->sdma_cs->current.cdw) <= ctx->sdma_cs->current.max_dw);
}
- ctx->ws->cs_set_secure(ctx->sdma_cs, use_secure_cmd);
/* Wait for idle if either buffer has been used in the IB before to
* prevent read-after-write hazards.
struct radeon_saved_cs saved;
bool check_vm = (ctx->screen->debug_flags & DBG(CHECK_VM)) != 0;
- if (!radeon_emitted(cs, 0)) {
+ if (!radeon_emitted(cs, 0) &&
+ !(flags & RADEON_FLUSH_TOGGLE_SECURE_SUBMISSION)) {
if (fence)
ctx->ws->fence_reference(fence, ctx->last_sdma_fence);
return;
}
/* Drop this flush if it's a no-op. */
- if (!radeon_emitted(cs, ctx->initial_gfx_cs_size) && (!wait_flags || !ctx->gfx_last_ib_is_busy))
+ if (!radeon_emitted(cs, ctx->initial_gfx_cs_size) &&
+ (!wait_flags || !ctx->gfx_last_ib_is_busy) &&
+ !(flags & RADEON_FLUSH_TOGGLE_SECURE_SUBMISSION))
return;
if (ctx->b.get_device_reset_status(&ctx->b) != PIPE_NO_RESET)
if (unlikely(sctx->ws->ws_uses_secure_bo(sctx->ws))) {
bool secure = si_gfx_resources_check_encrypted(sctx);
if (secure != sctx->ws->cs_is_secure(sctx->gfx_cs)) {
- si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
- sctx->ws->cs_set_secure(sctx->gfx_cs, secure);
+ si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW |
+ RADEON_FLUSH_TOGGLE_SECURE_SUBMISSION, NULL);
}
}
return true;
}
-void amdgpu_cs_submit_ib(void *job, int thread_index)
+static void amdgpu_cs_submit_ib(void *job, int thread_index)
{
struct amdgpu_cs *acs = (struct amdgpu_cs*)job;
struct amdgpu_winsys *ws = acs->ctx->ws;
/* Submit. */
util_queue_add_job(&ws->cs_queue, cs, &cs->flush_completed,
amdgpu_cs_submit_ib, NULL, 0);
+
+ if (flags & RADEON_FLUSH_TOGGLE_SECURE_SUBMISSION)
+ cs->csc->secure = !cs->cst->secure;
+ else
+ cs->csc->secure = cs->cst->secure;
+
/* The submission has been queued, unlock the fence now. */
simple_mtx_unlock(&ws->bo_fence_lock);
error_code = cur->error_code;
}
} else {
+ if (flags & RADEON_FLUSH_TOGGLE_SECURE_SUBMISSION)
+ cs->csc->secure = !cs->csc->secure;
amdgpu_cs_context_cleanup(cs->csc);
}
struct pipe_fence_handle **fences);
void amdgpu_cs_sync_flush(struct radeon_cmdbuf *rcs);
void amdgpu_cs_init_functions(struct amdgpu_screen_winsys *ws);
-void amdgpu_cs_submit_ib(void *job, int thread_index);
#endif
return cs->csc->secure;
}
-static void amdgpu_cs_set_secure(struct radeon_cmdbuf *rcs, bool secure)
-{
- struct amdgpu_cs *cs = amdgpu_cs(rcs);
- cs->csc->secure = secure;
-}
-
PUBLIC struct radeon_winsys *
amdgpu_winsys_create(int fd, const struct pipe_screen_config *config,
radeon_screen_create_t screen_create)
ws->base.pin_threads_to_L3_cache = amdgpu_pin_threads_to_L3_cache;
ws->base.ws_uses_secure_bo = amdgpu_ws_uses_secure_bo;
ws->base.cs_is_secure = amdgpu_cs_is_secure;
- ws->base.cs_set_secure = amdgpu_cs_set_secure;
amdgpu_bo_init_functions(ws);
amdgpu_cs_init_functions(ws);
return false;
}
-static void radeon_cs_set_secure(struct radeon_cmdbuf* cs, bool enable)
-{
-}
-
PUBLIC struct radeon_winsys *
radeon_drm_winsys_create(int fd, const struct pipe_screen_config *config,
radeon_screen_create_t screen_create)
ws->base.read_registers = radeon_read_registers;
ws->base.ws_uses_secure_bo = radeon_ws_uses_secure_bo;
ws->base.cs_is_secure = radeon_cs_is_secure;
- ws->base.cs_set_secure = radeon_cs_set_secure;
radeon_drm_bo_init_functions(ws);
radeon_drm_cs_init_functions(ws);