if (submit->out_fence) {
/* first remove from IDR, so fence can not be found anymore */
- mutex_lock(&submit->gpu->fence_lock);
+ mutex_lock(&submit->gpu->idr_lock);
idr_remove(&submit->gpu->fence_idr, submit->out_fence_id);
- mutex_unlock(&submit->gpu->fence_lock);
+ mutex_unlock(&submit->gpu->idr_lock);
dma_fence_put(submit->out_fence);
}
gpu->dev = &pdev->dev;
mutex_init(&gpu->lock);
- mutex_init(&gpu->fence_lock);
+ mutex_init(&gpu->sched_lock);
+ mutex_init(&gpu->idr_lock);
/* Map registers: */
gpu->mmio = devm_platform_ioremap_resource(pdev, 0);
struct etnaviv_chip_identity identity;
enum etnaviv_sec_mode sec_mode;
struct workqueue_struct *wq;
+ struct mutex sched_lock;
struct drm_gpu_scheduler sched;
bool initialized;
bool fe_running;
u32 idle_mask;
/* Fencing support */
- struct mutex fence_lock;
+ struct mutex idr_lock;
struct idr fence_idr;
u32 next_fence;
u32 completed_fence;
int etnaviv_sched_push_job(struct etnaviv_gem_submit *submit)
{
+ struct etnaviv_gpu *gpu = submit->gpu;
int ret = 0;
/*
- * Hold the fence lock across the whole operation to avoid jobs being
+ * Hold the sched lock across the whole operation to avoid jobs being
* pushed out of order with regard to their sched fence seqnos as
* allocated in drm_sched_job_arm.
*/
- mutex_lock(&submit->gpu->fence_lock);
+ mutex_lock(&gpu->sched_lock);
drm_sched_job_arm(&submit->sched_job);
submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
- submit->out_fence_id = idr_alloc_cyclic(&submit->gpu->fence_idr,
+ mutex_lock(&gpu->idr_lock);
+ submit->out_fence_id = idr_alloc_cyclic(&gpu->fence_idr,
submit->out_fence, 0,
INT_MAX, GFP_KERNEL);
+ mutex_unlock(&gpu->idr_lock);
if (submit->out_fence_id < 0) {
drm_sched_job_cleanup(&submit->sched_job);
ret = -ENOMEM;
drm_sched_entity_push_job(&submit->sched_job);
out_unlock:
- mutex_unlock(&submit->gpu->fence_lock);
+ mutex_unlock(&gpu->sched_lock);
return ret;
}