long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
{
struct drm_gpu_scheduler *sched;
+ struct task_struct *last_user;
long ret = timeout;
sched = entity->rq->sched;
/* For killed process disable any more IBs enqueue right now */
- if ((current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
+ last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
+ if ((!last_user || last_user == current->group_leader) &&
+ (current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
drm_sched_entity_set_rq(entity, NULL);
return ret;
trace_drm_sched_job(sched_job, entity);
+ WRITE_ONCE(entity->last_user, current->group_leader);
first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
/* first job wakes up scheduler */
* @guilty: points to ctx's guilty.
* @fini_status: contains the exit status in case the process was signalled.
* @last_scheduled: points to the finished fence of the last scheduled job.
+ * @last_user: last group leader pushing a job into the entity.
*
* Entities will emit jobs in order to their corresponding hardware
* ring, and the scheduler will alternate between entities based on
struct dma_fence_cb cb;
atomic_t *guilty;
struct dma_fence *last_scheduled;
+ struct task_struct *last_user;
};
/**