2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
27 * The GPU scheduler provides entities which allow userspace to push jobs
28 * into software queues which are then scheduled on a hardware run queue.
29 * The software queues have a priority among them. The scheduler selects the entities
30 * from the run queue using a FIFO. The scheduler provides dependency handling
31 * features among jobs. The driver is supposed to provide callback functions for
32 * backend operations to the scheduler like submitting a job to hardware run queue,
33 * returning the dependencies of a job etc.
35 * The organisation of the scheduler is the following:
37 * 1. Each hw run queue has one scheduler
38 * 2. Each scheduler has multiple run queues with different priorities
39 * (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL)
40 * 3. Each scheduler run queue has a queue of entities to schedule
41 * 4. Entities themselves maintain a queue of jobs that will be scheduled on
44 * The jobs in a entity are always scheduled in the order that they were pushed.
47 #include <linux/kthread.h>
48 #include <linux/wait.h>
49 #include <linux/sched.h>
50 #include <uapi/linux/sched/types.h>
52 #include <drm/gpu_scheduler.h>
53 #include <drm/spsc_queue.h>
55 #define CREATE_TRACE_POINTS
56 #include "gpu_scheduler_trace.h"
58 #define to_drm_sched_job(sched_job) \
59 container_of((sched_job), struct drm_sched_job, queue_node)
61 static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
64 * drm_sched_rq_init - initialize a given run queue struct
66 * @rq: scheduler run queue
68 * Initializes a scheduler runqueue.
70 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
71 struct drm_sched_rq *rq)
73 spin_lock_init(&rq->lock);
74 INIT_LIST_HEAD(&rq->entities);
75 rq->current_entity = NULL;
80 * drm_sched_rq_add_entity - add an entity
82 * @rq: scheduler run queue
83 * @entity: scheduler entity
85 * Adds a scheduler entity to the run queue.
87 void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
88 struct drm_sched_entity *entity)
90 if (!list_empty(&entity->list))
93 list_add_tail(&entity->list, &rq->entities);
94 spin_unlock(&rq->lock);
98 * drm_sched_rq_remove_entity - remove an entity
100 * @rq: scheduler run queue
101 * @entity: scheduler entity
103 * Removes a scheduler entity from the run queue.
105 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
106 struct drm_sched_entity *entity)
108 if (list_empty(&entity->list))
110 spin_lock(&rq->lock);
111 list_del_init(&entity->list);
112 if (rq->current_entity == entity)
113 rq->current_entity = NULL;
114 spin_unlock(&rq->lock);
118 * drm_sched_rq_select_entity - Select an entity which could provide a job to run
120 * @rq: scheduler run queue to check.
122 * Try to find a ready entity, returns NULL if none found.
124 static struct drm_sched_entity *
125 drm_sched_rq_select_entity(struct drm_sched_rq *rq)
127 struct drm_sched_entity *entity;
129 spin_lock(&rq->lock);
131 entity = rq->current_entity;
133 list_for_each_entry_continue(entity, &rq->entities, list) {
134 if (drm_sched_entity_is_ready(entity)) {
135 rq->current_entity = entity;
136 spin_unlock(&rq->lock);
142 list_for_each_entry(entity, &rq->entities, list) {
144 if (drm_sched_entity_is_ready(entity)) {
145 rq->current_entity = entity;
146 spin_unlock(&rq->lock);
150 if (entity == rq->current_entity)
154 spin_unlock(&rq->lock);
160 * drm_sched_dependency_optimized
162 * @fence: the dependency fence
163 * @entity: the entity which depends on the above fence
165 * Returns true if the dependency can be optimized and false otherwise
167 bool drm_sched_dependency_optimized(struct dma_fence* fence,
168 struct drm_sched_entity *entity)
170 struct drm_gpu_scheduler *sched = entity->rq->sched;
171 struct drm_sched_fence *s_fence;
173 if (!fence || dma_fence_is_signaled(fence))
175 if (fence->context == entity->fence_context)
177 s_fence = to_drm_sched_fence(fence);
178 if (s_fence && s_fence->sched == sched)
183 EXPORT_SYMBOL(drm_sched_dependency_optimized);
186 * drm_sched_start_timeout - start timeout for reset worker
188 * @sched: scheduler instance to start the worker for
190 * Start the timeout for the given scheduler.
192 static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
194 if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
195 !list_empty(&sched->ring_mirror_list))
196 schedule_delayed_work(&sched->work_tdr, sched->timeout);
200 * drm_sched_fault - immediately start timeout handler
202 * @sched: scheduler where the timeout handling should be started.
204 * Start timeout handling immediately when the driver detects a hardware fault.
206 void drm_sched_fault(struct drm_gpu_scheduler *sched)
208 mod_delayed_work(system_wq, &sched->work_tdr, 0);
210 EXPORT_SYMBOL(drm_sched_fault);
213 * drm_sched_suspend_timeout - Suspend scheduler job timeout
215 * @sched: scheduler instance for which to suspend the timeout
217 * Suspend the delayed work timeout for the scheduler. This is done by
218 * modifying the delayed work timeout to an arbitrary large value,
219 * MAX_SCHEDULE_TIMEOUT in this case. Note that this function can be
220 * called from an IRQ context.
222 * Returns the timeout remaining
225 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
227 unsigned long sched_timeout, now = jiffies;
229 sched_timeout = sched->work_tdr.timer.expires;
232 * Modify the timeout to an arbitrarily large value. This also prevents
233 * the timeout to be restarted when new submissions arrive
235 if (mod_delayed_work(system_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
236 && time_after(sched_timeout, now))
237 return sched_timeout - now;
239 return sched->timeout;
241 EXPORT_SYMBOL(drm_sched_suspend_timeout);
244 * drm_sched_resume_timeout - Resume scheduler job timeout
246 * @sched: scheduler instance for which to resume the timeout
247 * @remaining: remaining timeout
249 * Resume the delayed work timeout for the scheduler. Note that
250 * this function can be called from an IRQ context.
252 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
253 unsigned long remaining)
257 spin_lock_irqsave(&sched->job_list_lock, flags);
259 if (list_empty(&sched->ring_mirror_list))
260 cancel_delayed_work(&sched->work_tdr);
262 mod_delayed_work(system_wq, &sched->work_tdr, remaining);
264 spin_unlock_irqrestore(&sched->job_list_lock, flags);
266 EXPORT_SYMBOL(drm_sched_resume_timeout);
268 static void drm_sched_job_begin(struct drm_sched_job *s_job)
270 struct drm_gpu_scheduler *sched = s_job->sched;
273 spin_lock_irqsave(&sched->job_list_lock, flags);
274 list_add_tail(&s_job->node, &sched->ring_mirror_list);
275 drm_sched_start_timeout(sched);
276 spin_unlock_irqrestore(&sched->job_list_lock, flags);
279 static void drm_sched_job_timedout(struct work_struct *work)
281 struct drm_gpu_scheduler *sched;
282 struct drm_sched_job *job;
285 sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
286 job = list_first_entry_or_null(&sched->ring_mirror_list,
287 struct drm_sched_job, node);
290 job->sched->ops->timedout_job(job);
293 * Guilty job did complete and hence needs to be manually removed
294 * See drm_sched_stop doc.
296 if (sched->free_guilty) {
297 job->sched->ops->free_job(job);
298 sched->free_guilty = false;
301 spin_lock_irqsave(&sched->job_list_lock, flags);
302 drm_sched_start_timeout(sched);
303 spin_unlock_irqrestore(&sched->job_list_lock, flags);
307 * drm_sched_increase_karma - Update sched_entity guilty flag
309 * @bad: The job guilty of time out
311 * Increment on every hang caused by the 'bad' job. If this exceeds the hang
312 * limit of the scheduler then the respective sched entity is marked guilty and
313 * jobs from it will not be scheduled further
315 void drm_sched_increase_karma(struct drm_sched_job *bad)
318 struct drm_sched_entity *tmp;
319 struct drm_sched_entity *entity;
320 struct drm_gpu_scheduler *sched = bad->sched;
322 /* don't increase @bad's karma if it's from KERNEL RQ,
323 * because sometimes GPU hang would cause kernel jobs (like VM updating jobs)
324 * corrupt but keep in mind that kernel jobs always considered good.
326 if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
327 atomic_inc(&bad->karma);
328 for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL;
330 struct drm_sched_rq *rq = &sched->sched_rq[i];
332 spin_lock(&rq->lock);
333 list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
334 if (bad->s_fence->scheduled.context ==
335 entity->fence_context) {
336 if (atomic_read(&bad->karma) >
337 bad->sched->hang_limit)
339 atomic_set(entity->guilty, 1);
343 spin_unlock(&rq->lock);
344 if (&entity->list != &rq->entities)
349 EXPORT_SYMBOL(drm_sched_increase_karma);
352 * drm_sched_hw_job_reset - stop the scheduler if it contains the bad job
354 * @sched: scheduler instance
355 * @bad: bad scheduler job
357 * Stop the scheduler and also removes and frees all completed jobs.
358 * Note: bad job will not be freed as it might be used later and so it's
359 * callers responsibility to release it manually if it's not part of the
360 * mirror list any more.
363 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
365 struct drm_sched_job *s_job, *tmp;
368 kthread_park(sched->thread);
371 * Iterate the job list from later to earlier one and either deactive
372 * their HW callbacks or remove them from mirror list if they already
374 * This iteration is thread safe as sched thread is stopped.
376 list_for_each_entry_safe_reverse(s_job, tmp, &sched->ring_mirror_list, node) {
377 if (s_job->s_fence->parent &&
378 dma_fence_remove_callback(s_job->s_fence->parent,
380 atomic_dec(&sched->hw_rq_count);
383 * remove job from ring_mirror_list.
384 * Locking here is for concurrent resume timeout
386 spin_lock_irqsave(&sched->job_list_lock, flags);
387 list_del_init(&s_job->node);
388 spin_unlock_irqrestore(&sched->job_list_lock, flags);
391 * Wait for job's HW fence callback to finish using s_job
392 * before releasing it.
394 * Job is still alive so fence refcount at least 1
396 dma_fence_wait(&s_job->s_fence->finished, false);
399 * We must keep bad job alive for later use during
400 * recovery by some of the drivers but leave a hint
401 * that the guilty job must be released.
404 sched->ops->free_job(s_job);
406 sched->free_guilty = true;
411 * Stop pending timer in flight as we rearm it in drm_sched_start. This
412 * avoids the pending timeout work in progress to fire right away after
413 * this TDR finished and before the newly restarted jobs had a
414 * chance to complete.
416 cancel_delayed_work(&sched->work_tdr);
419 EXPORT_SYMBOL(drm_sched_stop);
422 * drm_sched_job_recovery - recover jobs after a reset
424 * @sched: scheduler instance
427 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
429 struct drm_sched_job *s_job, *tmp;
434 * Locking the list is not required here as the sched thread is parked
435 * so no new jobs are being inserted or removed. Also concurrent
436 * GPU recovers can't run in parallel.
438 list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
439 struct dma_fence *fence = s_job->s_fence->parent;
441 atomic_inc(&sched->hw_rq_count);
447 r = dma_fence_add_callback(fence, &s_job->cb,
448 drm_sched_process_job);
450 drm_sched_process_job(fence, &s_job->cb);
452 DRM_ERROR("fence add callback failed (%d)\n",
455 drm_sched_process_job(NULL, &s_job->cb);
459 spin_lock_irqsave(&sched->job_list_lock, flags);
460 drm_sched_start_timeout(sched);
461 spin_unlock_irqrestore(&sched->job_list_lock, flags);
464 kthread_unpark(sched->thread);
466 EXPORT_SYMBOL(drm_sched_start);
469 * drm_sched_resubmit_jobs - helper to relunch job from mirror ring list
471 * @sched: scheduler instance
474 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
476 struct drm_sched_job *s_job, *tmp;
477 uint64_t guilty_context;
478 bool found_guilty = false;
480 list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
481 struct drm_sched_fence *s_fence = s_job->s_fence;
483 if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
485 guilty_context = s_job->s_fence->scheduled.context;
488 if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
489 dma_fence_set_error(&s_fence->finished, -ECANCELED);
491 dma_fence_put(s_job->s_fence->parent);
492 s_job->s_fence->parent = sched->ops->run_job(s_job);
495 EXPORT_SYMBOL(drm_sched_resubmit_jobs);
498 * drm_sched_job_init - init a scheduler job
500 * @job: scheduler job to init
501 * @entity: scheduler entity to use
502 * @owner: job owner for debugging
504 * Refer to drm_sched_entity_push_job() documentation
505 * for locking considerations.
507 * Returns 0 for success, negative error code otherwise.
509 int drm_sched_job_init(struct drm_sched_job *job,
510 struct drm_sched_entity *entity,
513 struct drm_gpu_scheduler *sched;
515 drm_sched_entity_select_rq(entity);
519 sched = entity->rq->sched;
522 job->entity = entity;
523 job->s_priority = entity->rq - sched->sched_rq;
524 job->s_fence = drm_sched_fence_create(entity, owner);
527 job->id = atomic64_inc_return(&sched->job_id_count);
529 INIT_LIST_HEAD(&job->node);
533 EXPORT_SYMBOL(drm_sched_job_init);
536 * drm_sched_job_cleanup - clean up scheduler job resources
538 * @job: scheduler job to clean up
540 void drm_sched_job_cleanup(struct drm_sched_job *job)
542 dma_fence_put(&job->s_fence->finished);
545 EXPORT_SYMBOL(drm_sched_job_cleanup);
548 * drm_sched_ready - is the scheduler ready
550 * @sched: scheduler instance
552 * Return true if we can push more jobs to the hw, otherwise false.
554 static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
556 return atomic_read(&sched->hw_rq_count) <
557 sched->hw_submission_limit;
561 * drm_sched_wakeup - Wake up the scheduler when it is ready
563 * @sched: scheduler instance
566 void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
568 if (drm_sched_ready(sched))
569 wake_up_interruptible(&sched->wake_up_worker);
573 * drm_sched_select_entity - Select next entity to process
575 * @sched: scheduler instance
577 * Returns the entity to process or NULL if none are found.
579 static struct drm_sched_entity *
580 drm_sched_select_entity(struct drm_gpu_scheduler *sched)
582 struct drm_sched_entity *entity;
585 if (!drm_sched_ready(sched))
588 /* Kernel run queue has higher priority than normal run queue*/
589 for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
590 entity = drm_sched_rq_select_entity(&sched->sched_rq[i]);
599 * drm_sched_process_job - process a job
602 * @cb: fence callbacks
604 * Called after job has finished execution.
606 static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
608 struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
609 struct drm_sched_fence *s_fence = s_job->s_fence;
610 struct drm_gpu_scheduler *sched = s_fence->sched;
612 atomic_dec(&sched->hw_rq_count);
613 atomic_dec(&sched->num_jobs);
615 trace_drm_sched_process_job(s_fence);
617 drm_sched_fence_finished(s_fence);
618 wake_up_interruptible(&sched->wake_up_worker);
622 * drm_sched_cleanup_jobs - destroy finished jobs
624 * @sched: scheduler instance
626 * Remove all finished jobs from the mirror list and destroy them.
628 static void drm_sched_cleanup_jobs(struct drm_gpu_scheduler *sched)
632 /* Don't destroy jobs while the timeout worker is running */
633 if (!cancel_delayed_work(&sched->work_tdr))
637 while (!list_empty(&sched->ring_mirror_list)) {
638 struct drm_sched_job *job;
640 job = list_first_entry(&sched->ring_mirror_list,
641 struct drm_sched_job, node);
642 if (!dma_fence_is_signaled(&job->s_fence->finished))
645 spin_lock_irqsave(&sched->job_list_lock, flags);
646 /* remove job from ring_mirror_list */
647 list_del_init(&job->node);
648 spin_unlock_irqrestore(&sched->job_list_lock, flags);
650 sched->ops->free_job(job);
653 /* queue timeout for next job */
654 spin_lock_irqsave(&sched->job_list_lock, flags);
655 drm_sched_start_timeout(sched);
656 spin_unlock_irqrestore(&sched->job_list_lock, flags);
661 * drm_sched_blocked - check if the scheduler is blocked
663 * @sched: scheduler instance
665 * Returns true if blocked, otherwise false.
667 static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
669 if (kthread_should_park()) {
678 * drm_sched_main - main scheduler thread
680 * @param: scheduler instance
684 static int drm_sched_main(void *param)
686 struct sched_param sparam = {.sched_priority = 1};
687 struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param;
690 sched_setscheduler(current, SCHED_FIFO, &sparam);
692 while (!kthread_should_stop()) {
693 struct drm_sched_entity *entity = NULL;
694 struct drm_sched_fence *s_fence;
695 struct drm_sched_job *sched_job;
696 struct dma_fence *fence;
698 wait_event_interruptible(sched->wake_up_worker,
699 (drm_sched_cleanup_jobs(sched),
700 (!drm_sched_blocked(sched) &&
701 (entity = drm_sched_select_entity(sched))) ||
702 kthread_should_stop()));
707 sched_job = drm_sched_entity_pop_job(entity);
711 s_fence = sched_job->s_fence;
713 atomic_inc(&sched->hw_rq_count);
714 drm_sched_job_begin(sched_job);
716 fence = sched->ops->run_job(sched_job);
717 drm_sched_fence_scheduled(s_fence);
720 s_fence->parent = dma_fence_get(fence);
721 r = dma_fence_add_callback(fence, &sched_job->cb,
722 drm_sched_process_job);
724 drm_sched_process_job(fence, &sched_job->cb);
726 DRM_ERROR("fence add callback failed (%d)\n",
728 dma_fence_put(fence);
730 drm_sched_process_job(NULL, &sched_job->cb);
732 wake_up(&sched->job_scheduled);
738 * drm_sched_init - Init a gpu scheduler instance
740 * @sched: scheduler instance
741 * @ops: backend operations for this scheduler
742 * @hw_submission: number of hw submissions that can be in flight
743 * @hang_limit: number of times to allow a job to hang before dropping it
744 * @timeout: timeout value in jiffies for the scheduler
745 * @name: name used for debugging
747 * Return 0 on success, otherwise error code.
749 int drm_sched_init(struct drm_gpu_scheduler *sched,
750 const struct drm_sched_backend_ops *ops,
751 unsigned hw_submission,
758 sched->hw_submission_limit = hw_submission;
760 sched->timeout = timeout;
761 sched->hang_limit = hang_limit;
762 for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_MAX; i++)
763 drm_sched_rq_init(sched, &sched->sched_rq[i]);
765 init_waitqueue_head(&sched->wake_up_worker);
766 init_waitqueue_head(&sched->job_scheduled);
767 INIT_LIST_HEAD(&sched->ring_mirror_list);
768 spin_lock_init(&sched->job_list_lock);
769 atomic_set(&sched->hw_rq_count, 0);
770 INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
771 atomic_set(&sched->num_jobs, 0);
772 atomic64_set(&sched->job_id_count, 0);
774 /* Each scheduler will run on a seperate kernel thread */
775 sched->thread = kthread_run(drm_sched_main, sched, sched->name);
776 if (IS_ERR(sched->thread)) {
777 ret = PTR_ERR(sched->thread);
778 sched->thread = NULL;
779 DRM_ERROR("Failed to create scheduler for %s.\n", name);
786 EXPORT_SYMBOL(drm_sched_init);
789 * drm_sched_fini - Destroy a gpu scheduler
791 * @sched: scheduler instance
793 * Tears down and cleans up the scheduler.
795 void drm_sched_fini(struct drm_gpu_scheduler *sched)
798 kthread_stop(sched->thread);
800 sched->ready = false;
802 EXPORT_SYMBOL(drm_sched_fini);