to apply it to all command streams of each context.
Reviewed-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/24732>
slab_create_child(&r300->pool_transfers, &r300screen->pool_transfers);
- r300->ctx = rws->ctx_create(rws, RADEON_CTX_PRIORITY_MEDIUM);
+ r300->ctx = rws->ctx_create(rws, RADEON_CTX_PRIORITY_MEDIUM, false);
if (!r300->ctx)
goto fail;
- if (!rws->cs_create(&r300->cs, r300->ctx, AMD_IP_GFX, r300_flush_callback, r300, false))
+ if (!rws->cs_create(&r300->cs, r300->ctx, AMD_IP_GFX, r300_flush_callback, r300))
goto fail;
if (!r300screen->caps.has_tcl) {
}
ws->cs_create(&rctx->b.gfx.cs, rctx->b.ctx, AMD_IP_GFX,
- r600_context_gfx_flush, rctx, false);
+ r600_context_gfx_flush, rctx);
rctx->b.gfx.flush = r600_context_gfx_flush;
u_suballocator_init(&rctx->allocator_fetch_shader, &rctx->b.b, 64 * 1024,
if (!rctx->b.const_uploader)
return false;
- rctx->ctx = rctx->ws->ctx_create(rctx->ws, RADEON_CTX_PRIORITY_MEDIUM);
+ rctx->ctx = rctx->ws->ctx_create(rctx->ws, RADEON_CTX_PRIORITY_MEDIUM, false);
if (!rctx->ctx)
return false;
if (rscreen->info.ip[AMD_IP_SDMA].num_queues && !(rscreen->debug_flags & DBG_NO_ASYNC_DMA)) {
rctx->ws->cs_create(&rctx->dma.cs, rctx->ctx, AMD_IP_SDMA,
- r600_flush_dma_ring, rctx, false);
+ r600_flush_dma_ring, rctx);
rctx->dma.flush = r600_flush_dma_ring;
}
dec->screen = context->screen;
dec->ws = ws;
- if (!ws->cs_create(&dec->cs, rctx->ctx, AMD_IP_UVD, NULL, NULL, false)) {
+ if (!ws->cs_create(&dec->cs, rctx->ctx, AMD_IP_UVD, NULL, NULL)) {
RVID_ERR("Can't get command submission context.\n");
goto error;
}
enc->screen = context->screen;
enc->ws = ws;
- if (!ws->cs_create(&enc->cs, rctx->ctx, AMD_IP_VCE, rvce_cs_flush, enc, false)) {
+ if (!ws->cs_create(&enc->cs, rctx->ctx, AMD_IP_VCE, rvce_cs_flush, enc)) {
RVID_ERR("Can't get command submission context.\n");
goto error;
}
dec->screen = context->screen;
dec->ws = ws;
- if (!ws->cs_create(&dec->cs, sctx->ctx, AMD_IP_UVD, NULL, NULL, false)) {
+ if (!ws->cs_create(&dec->cs, sctx->ctx, AMD_IP_UVD, NULL, NULL)) {
RVID_ERR("Can't get command submission context.\n");
goto error;
}
enc->screen = context->screen;
enc->ws = ws;
- if (!ws->cs_create(&enc->cs, sctx->ctx, AMD_IP_UVD_ENC, radeon_uvd_enc_cs_flush, enc, false)) {
+ if (!ws->cs_create(&enc->cs, sctx->ctx, AMD_IP_UVD_ENC, radeon_uvd_enc_cs_flush, enc)) {
RVID_ERR("Can't get command submission context.\n");
goto error;
}
enc->screen = context->screen;
enc->ws = ws;
- if (!ws->cs_create(&enc->cs, sctx->ctx, AMD_IP_VCE, rvce_cs_flush, enc, false)) {
+ if (!ws->cs_create(&enc->cs, sctx->ctx, AMD_IP_VCE, rvce_cs_flush, enc)) {
RVID_ERR("Can't get command submission context.\n");
goto error;
}
dec->sq.ib_total_size_in_dw = NULL;
dec->sq.ib_checksum = NULL;
- if (!ws->cs_create(&dec->cs, sctx->ctx, ring, NULL, NULL, false)) {
+ if (!ws->cs_create(&dec->cs, sctx->ctx, ring, NULL, NULL)) {
RVID_ERR("Can't get command submission context.\n");
goto error;
}
goto err;
for (i = 0; i < dec->njctx; i++) {
/* Initialize the context handle and the command stream. */
- dec->jctx[i] = dec->ws->ctx_create(dec->ws, RADEON_CTX_PRIORITY_MEDIUM);
+ dec->jctx[i] = dec->ws->ctx_create(dec->ws, RADEON_CTX_PRIORITY_MEDIUM,
+ sctx->context_flags & PIPE_CONTEXT_LOSE_CONTEXT_ON_RESET);
if (!sctx->ctx)
goto error;
- if (!dec->ws->cs_create(&dec->jcs[i], dec->jctx[i], ring, NULL, NULL, false)) {
+ if (!dec->ws->cs_create(&dec->jcs[i], dec->jctx[i], ring, NULL, NULL)) {
RVID_ERR("Can't get additional command submission context for mJPEG.\n");
goto error;
}
if (!ws->cs_create(&enc->cs,
(sctx->vcn_has_ctx) ? ((struct si_context *)enc->ectx)->ctx : sctx->ctx,
- AMD_IP_VCN_ENC, radeon_enc_cs_flush, enc, false)) {
+ AMD_IP_VCN_ENC, radeon_enc_cs_flush, enc)) {
RVID_ERR("Can't get command submission context.\n");
goto error;
}
priority = RADEON_CTX_PRIORITY_MEDIUM;
}
+ bool allow_context_lost = flags & PIPE_CONTEXT_LOSE_CONTEXT_ON_RESET;
+
/* Initialize the context handle and the command stream. */
- sctx->ctx = sctx->ws->ctx_create(sctx->ws, priority);
+ sctx->ctx = sctx->ws->ctx_create(sctx->ws, priority, allow_context_lost);
if (!sctx->ctx && priority != RADEON_CTX_PRIORITY_MEDIUM) {
/* Context priority should be treated as a hint. If context creation
* fails with the requested priority, for example because the caller
* fallback to normal priority.
*/
priority = RADEON_CTX_PRIORITY_MEDIUM;
- sctx->ctx = sctx->ws->ctx_create(sctx->ws, priority);
+ sctx->ctx = sctx->ws->ctx_create(sctx->ws, priority, allow_context_lost);
}
if (!sctx->ctx) {
fprintf(stderr, "radeonsi: can't create radeon_winsys_ctx\n");
}
ws->cs_create(&sctx->gfx_cs, sctx->ctx, sctx->has_graphics ? AMD_IP_GFX : AMD_IP_COMPUTE,
- (void *)si_flush_gfx_cs, sctx,
- flags & PIPE_CONTEXT_LOSE_CONTEXT_ON_RESET);
+ (void *)si_flush_gfx_cs, sctx);
/* Initialize private allocators. */
u_suballocator_init(&sctx->allocator_zeroed_memory, &sctx->b, 128 * 1024, 0,
struct pb_buffer *gds_bo[ARRAY_SIZE(cs)];
for (unsigned i = 0; i < ARRAY_SIZE(cs); i++) {
- ws->cs_create(&cs[i], sctx->ctx, AMD_IP_COMPUTE, NULL, NULL, false);
+ ws->cs_create(&cs[i], sctx->ctx, AMD_IP_COMPUTE, NULL, NULL);
gds_bo[i] = ws->buffer_create(ws, alloc_size, alignment, domain, 0);
assert(gds_bo[i]);
}
return false;
sctx->sdma_cs = CALLOC_STRUCT(radeon_cmdbuf);
- if (ws->cs_create(sctx->sdma_cs, sctx->ctx, AMD_IP_SDMA,
- NULL, NULL, true))
+ if (ws->cs_create(sctx->sdma_cs, sctx->ctx, AMD_IP_SDMA, NULL, NULL))
return false;
}
/* Thread trace start CS (only handles AMD_IP_GFX). */
sctx->sqtt->start_cs[AMD_IP_GFX] = CALLOC_STRUCT(radeon_cmdbuf);
if (!ws->cs_create(sctx->sqtt->start_cs[AMD_IP_GFX], sctx->ctx, AMD_IP_GFX,
- NULL, NULL, 0)) {
+ NULL, NULL)) {
free(sctx->sqtt->start_cs[AMD_IP_GFX]);
sctx->sqtt->start_cs[AMD_IP_GFX] = NULL;
return;
/* Thread trace stop CS. */
sctx->sqtt->stop_cs[AMD_IP_GFX] = CALLOC_STRUCT(radeon_cmdbuf);
if (!ws->cs_create(sctx->sqtt->stop_cs[AMD_IP_GFX], sctx->ctx, AMD_IP_GFX,
- NULL, NULL, 0)) {
+ NULL, NULL)) {
free(sctx->sqtt->start_cs[AMD_IP_GFX]);
sctx->sqtt->start_cs[AMD_IP_GFX] = NULL;
free(sctx->sqtt->stop_cs[AMD_IP_GFX]);
/**
* Create a command submission context.
* Various command streams can be submitted to the same context.
+ *
+ * \param allow_context_lost If true, lost contexts skip command submission and report
+ * the reset status.
+ * If false, losing the context results in undefined behavior.
*/
struct radeon_winsys_ctx *(*ctx_create)(struct radeon_winsys *ws,
- enum radeon_ctx_priority priority);
+ enum radeon_ctx_priority priority,
+ bool allow_context_lost);
/**
* Destroy a context.
struct radeon_winsys_ctx *ctx, enum amd_ip_type amd_ip_type,
void (*flush)(void *ctx, unsigned flags,
struct pipe_fence_handle **fence),
- void *flush_ctx, bool allow_context_lost);
+ void *flush_ctx);
/**
* Set up and enable mid command buffer preemption for the command stream.
}
static struct radeon_winsys_ctx *amdgpu_ctx_create(struct radeon_winsys *ws,
- enum radeon_ctx_priority priority)
+ enum radeon_ctx_priority priority,
+ bool allow_context_lost)
{
struct amdgpu_ctx *ctx = CALLOC_STRUCT(amdgpu_ctx);
int r;
ctx->ws = amdgpu_winsys(ws);
ctx->refcount = 1;
ctx->initial_num_total_rejected_cs = ctx->ws->num_total_rejected_cs;
+ ctx->allow_context_lost = allow_context_lost;
r = amdgpu_cs_ctx_create2(ctx->ws->dev, amdgpu_priority, &ctx->ctx);
if (r) {
enum amd_ip_type ip_type,
void (*flush)(void *ctx, unsigned flags,
struct pipe_fence_handle **fence),
- void *flush_ctx,
- bool allow_context_lost)
+ void *flush_ctx)
{
struct amdgpu_ctx *ctx = (struct amdgpu_ctx*)rwctx;
struct amdgpu_cs *cs;
cs->flush_cs = flush;
cs->flush_data = flush_ctx;
cs->ip_type = ip_type;
- cs->allow_context_lost = allow_context_lost;
cs->noop = ctx->ws->noop_cs;
cs->has_chaining = ctx->ws->info.gfx_level >= GFX7 &&
(ip_type == AMD_IP_GFX || ip_type == AMD_IP_COMPUTE);
cleanup:
if (unlikely(r)) {
- if (!acs->allow_context_lost) {
+ if (!acs->ctx->allow_context_lost) {
/* Non-robust contexts are allowed to terminate the process. The only alternative is
* to skip command submission, which would look like a freeze because nothing is drawn,
* which is not a useful state to be in under any circumstances.
amdgpu_bo_handle user_fence_bo;
uint64_t *user_fence_cpu_address_base;
int refcount;
+
+ /* If true, report lost contexts and skip command submission.
+ * If false, terminate the process.
+ */
+ bool allow_context_lost;
+
unsigned initial_num_total_rejected_cs;
bool rejected_any_cs;
};
/* Flush CS. */
void (*flush_cs)(void *ctx, unsigned flags, struct pipe_fence_handle **fence);
void *flush_data;
- bool allow_context_lost;
bool noop;
bool has_chaining;
struct pipe_fence_handle *src);
static struct radeon_winsys_ctx *radeon_drm_ctx_create(struct radeon_winsys *ws,
- enum radeon_ctx_priority priority)
+ enum radeon_ctx_priority priority,
+ bool allow_context_lost)
{
struct radeon_ctx *ctx = CALLOC_STRUCT(radeon_ctx);
if (!ctx)
enum amd_ip_type ip_type,
void (*flush)(void *ctx, unsigned flags,
struct pipe_fence_handle **fence),
- void *flush_ctx,
- bool allow_context_lost)
+ void *flush_ctx)
{
struct radeon_drm_winsys *ws = ((struct radeon_ctx*)ctx)->ws;
struct radeon_drm_cs *cs;