goto free_chunk;
}
- mutex_lock(&p->ctx->lock);
-
/* skip guilty context job */
if (atomic_read(&p->ctx->guilty) == 1) {
ret = -ECANCELED;
}
}
+ /* Move fence waiting after getting reservation lock of
+ * PD root. Then there is no need on a ctx mutex lock.
+ */
+ r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entity);
+ if (unlikely(r != 0)) {
+ if (r != -ERESTARTSYS)
+ DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
+ goto error_validate;
+ }
+
amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
&p->bytes_moved_vis_threshold);
p->bytes_moved = 0;
dma_fence_put(parser->fence);
if (parser->ctx) {
- mutex_unlock(&parser->ctx->lock);
amdgpu_ctx_put(parser->ctx);
}
if (parser->bo_list)
if (parser->job->uf_addr && ring->funcs->no_user_fence)
return -EINVAL;
- return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->entity);
+ return 0;
}
static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
goto out;
r = amdgpu_cs_submit(&parser, cs);
-
out:
amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
kref_init(&ctx->refcount);
spin_lock_init(&ctx->ring_lock);
- mutex_init(&ctx->lock);
ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
ctx->reset_counter_query = ctx->reset_counter;
}
}
amdgpu_ctx_set_stable_pstate(ctx, AMDGPU_CTX_STABLE_PSTATE_NONE);
- mutex_destroy(&ctx->lock);
kfree(ctx);
}