Merge tag 'drm-intel-next-2018-09-06-2' of git://anongit.freedesktop.org/drm/drm...
[platform/kernel/linux-rpi.git] / drivers / gpu / drm / i915 / i915_request.c
1 /*
2  * Copyright © 2008-2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24
25 #include <linux/prefetch.h>
26 #include <linux/dma-fence-array.h>
27 #include <linux/sched.h>
28 #include <linux/sched/clock.h>
29 #include <linux/sched/signal.h>
30
31 #include "i915_drv.h"
32
33 static const char *i915_fence_get_driver_name(struct dma_fence *fence)
34 {
35         return "i915";
36 }
37
38 static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
39 {
40         /*
41          * The timeline struct (as part of the ppgtt underneath a context)
42          * may be freed when the request is no longer in use by the GPU.
43          * We could extend the life of a context to beyond that of all
44          * fences, possibly keeping the hw resource around indefinitely,
45          * or we just give them a false name. Since
46          * dma_fence_ops.get_timeline_name is a debug feature, the occasional
47          * lie seems justifiable.
48          */
49         if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
50                 return "signaled";
51
52         return to_request(fence)->timeline->name;
53 }
54
55 static bool i915_fence_signaled(struct dma_fence *fence)
56 {
57         return i915_request_completed(to_request(fence));
58 }
59
60 static bool i915_fence_enable_signaling(struct dma_fence *fence)
61 {
62         return intel_engine_enable_signaling(to_request(fence), true);
63 }
64
65 static signed long i915_fence_wait(struct dma_fence *fence,
66                                    bool interruptible,
67                                    signed long timeout)
68 {
69         return i915_request_wait(to_request(fence), interruptible, timeout);
70 }
71
72 static void i915_fence_release(struct dma_fence *fence)
73 {
74         struct i915_request *rq = to_request(fence);
75
76         /*
77          * The request is put onto a RCU freelist (i.e. the address
78          * is immediately reused), mark the fences as being freed now.
79          * Otherwise the debugobjects for the fences are only marked as
80          * freed when the slab cache itself is freed, and so we would get
81          * caught trying to reuse dead objects.
82          */
83         i915_sw_fence_fini(&rq->submit);
84
85         kmem_cache_free(rq->i915->requests, rq);
86 }
87
88 const struct dma_fence_ops i915_fence_ops = {
89         .get_driver_name = i915_fence_get_driver_name,
90         .get_timeline_name = i915_fence_get_timeline_name,
91         .enable_signaling = i915_fence_enable_signaling,
92         .signaled = i915_fence_signaled,
93         .wait = i915_fence_wait,
94         .release = i915_fence_release,
95 };
96
97 static inline void
98 i915_request_remove_from_client(struct i915_request *request)
99 {
100         struct drm_i915_file_private *file_priv;
101
102         file_priv = request->file_priv;
103         if (!file_priv)
104                 return;
105
106         spin_lock(&file_priv->mm.lock);
107         if (request->file_priv) {
108                 list_del(&request->client_link);
109                 request->file_priv = NULL;
110         }
111         spin_unlock(&file_priv->mm.lock);
112 }
113
114 static struct i915_dependency *
115 i915_dependency_alloc(struct drm_i915_private *i915)
116 {
117         return kmem_cache_alloc(i915->dependencies, GFP_KERNEL);
118 }
119
120 static void
121 i915_dependency_free(struct drm_i915_private *i915,
122                      struct i915_dependency *dep)
123 {
124         kmem_cache_free(i915->dependencies, dep);
125 }
126
127 static void
128 __i915_sched_node_add_dependency(struct i915_sched_node *node,
129                                  struct i915_sched_node *signal,
130                                  struct i915_dependency *dep,
131                                  unsigned long flags)
132 {
133         INIT_LIST_HEAD(&dep->dfs_link);
134         list_add(&dep->wait_link, &signal->waiters_list);
135         list_add(&dep->signal_link, &node->signalers_list);
136         dep->signaler = signal;
137         dep->flags = flags;
138 }
139
140 static int
141 i915_sched_node_add_dependency(struct drm_i915_private *i915,
142                                struct i915_sched_node *node,
143                                struct i915_sched_node *signal)
144 {
145         struct i915_dependency *dep;
146
147         dep = i915_dependency_alloc(i915);
148         if (!dep)
149                 return -ENOMEM;
150
151         __i915_sched_node_add_dependency(node, signal, dep,
152                                          I915_DEPENDENCY_ALLOC);
153         return 0;
154 }
155
156 static void
157 i915_sched_node_fini(struct drm_i915_private *i915,
158                      struct i915_sched_node *node)
159 {
160         struct i915_dependency *dep, *tmp;
161
162         GEM_BUG_ON(!list_empty(&node->link));
163
164         /*
165          * Everyone we depended upon (the fences we wait to be signaled)
166          * should retire before us and remove themselves from our list.
167          * However, retirement is run independently on each timeline and
168          * so we may be called out-of-order.
169          */
170         list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
171                 GEM_BUG_ON(!i915_sched_node_signaled(dep->signaler));
172                 GEM_BUG_ON(!list_empty(&dep->dfs_link));
173
174                 list_del(&dep->wait_link);
175                 if (dep->flags & I915_DEPENDENCY_ALLOC)
176                         i915_dependency_free(i915, dep);
177         }
178
179         /* Remove ourselves from everyone who depends upon us */
180         list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
181                 GEM_BUG_ON(dep->signaler != node);
182                 GEM_BUG_ON(!list_empty(&dep->dfs_link));
183
184                 list_del(&dep->signal_link);
185                 if (dep->flags & I915_DEPENDENCY_ALLOC)
186                         i915_dependency_free(i915, dep);
187         }
188 }
189
190 static void
191 i915_sched_node_init(struct i915_sched_node *node)
192 {
193         INIT_LIST_HEAD(&node->signalers_list);
194         INIT_LIST_HEAD(&node->waiters_list);
195         INIT_LIST_HEAD(&node->link);
196         node->attr.priority = I915_PRIORITY_INVALID;
197 }
198
199 static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
200 {
201         struct intel_engine_cs *engine;
202         struct i915_timeline *timeline;
203         enum intel_engine_id id;
204         int ret;
205
206         /* Carefully retire all requests without writing to the rings */
207         ret = i915_gem_wait_for_idle(i915,
208                                      I915_WAIT_INTERRUPTIBLE |
209                                      I915_WAIT_LOCKED,
210                                      MAX_SCHEDULE_TIMEOUT);
211         if (ret)
212                 return ret;
213
214         GEM_BUG_ON(i915->gt.active_requests);
215
216         /* If the seqno wraps around, we need to clear the breadcrumb rbtree */
217         for_each_engine(engine, i915, id) {
218                 GEM_TRACE("%s seqno %d (current %d) -> %d\n",
219                           engine->name,
220                           engine->timeline.seqno,
221                           intel_engine_get_seqno(engine),
222                           seqno);
223
224                 if (!i915_seqno_passed(seqno, engine->timeline.seqno)) {
225                         /* Flush any waiters before we reuse the seqno */
226                         intel_engine_disarm_breadcrumbs(engine);
227                         intel_engine_init_hangcheck(engine);
228                         GEM_BUG_ON(!list_empty(&engine->breadcrumbs.signals));
229                 }
230
231                 /* Check we are idle before we fiddle with hw state! */
232                 GEM_BUG_ON(!intel_engine_is_idle(engine));
233                 GEM_BUG_ON(i915_gem_active_isset(&engine->timeline.last_request));
234
235                 /* Finally reset hw state */
236                 intel_engine_init_global_seqno(engine, seqno);
237                 engine->timeline.seqno = seqno;
238         }
239
240         list_for_each_entry(timeline, &i915->gt.timelines, link)
241                 memset(timeline->global_sync, 0, sizeof(timeline->global_sync));
242
243         i915->gt.request_serial = seqno;
244
245         return 0;
246 }
247
248 int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
249 {
250         struct drm_i915_private *i915 = to_i915(dev);
251
252         lockdep_assert_held(&i915->drm.struct_mutex);
253
254         if (seqno == 0)
255                 return -EINVAL;
256
257         /* HWS page needs to be set less than what we will inject to ring */
258         return reset_all_global_seqno(i915, seqno - 1);
259 }
260
261 static int reserve_gt(struct drm_i915_private *i915)
262 {
263         int ret;
264
265         /*
266          * Reservation is fine until we may need to wrap around
267          *
268          * By incrementing the serial for every request, we know that no
269          * individual engine may exceed that serial (as each is reset to 0
270          * on any wrap). This protects even the most pessimistic of migrations
271          * of every request from all engines onto just one.
272          */
273         while (unlikely(++i915->gt.request_serial == 0)) {
274                 ret = reset_all_global_seqno(i915, 0);
275                 if (ret) {
276                         i915->gt.request_serial--;
277                         return ret;
278                 }
279         }
280
281         if (!i915->gt.active_requests++)
282                 i915_gem_unpark(i915);
283
284         return 0;
285 }
286
287 static void unreserve_gt(struct drm_i915_private *i915)
288 {
289         GEM_BUG_ON(!i915->gt.active_requests);
290         if (!--i915->gt.active_requests)
291                 i915_gem_park(i915);
292 }
293
294 void i915_gem_retire_noop(struct i915_gem_active *active,
295                           struct i915_request *request)
296 {
297         /* Space left intentionally blank */
298 }
299
300 static void advance_ring(struct i915_request *request)
301 {
302         struct intel_ring *ring = request->ring;
303         unsigned int tail;
304
305         /*
306          * We know the GPU must have read the request to have
307          * sent us the seqno + interrupt, so use the position
308          * of tail of the request to update the last known position
309          * of the GPU head.
310          *
311          * Note this requires that we are always called in request
312          * completion order.
313          */
314         GEM_BUG_ON(!list_is_first(&request->ring_link, &ring->request_list));
315         if (list_is_last(&request->ring_link, &ring->request_list)) {
316                 /*
317                  * We may race here with execlists resubmitting this request
318                  * as we retire it. The resubmission will move the ring->tail
319                  * forwards (to request->wa_tail). We either read the
320                  * current value that was written to hw, or the value that
321                  * is just about to be. Either works, if we miss the last two
322                  * noops - they are safe to be replayed on a reset.
323                  */
324                 GEM_TRACE("marking %s as inactive\n", ring->timeline->name);
325                 tail = READ_ONCE(request->tail);
326                 list_del(&ring->active_link);
327         } else {
328                 tail = request->postfix;
329         }
330         list_del_init(&request->ring_link);
331
332         ring->head = tail;
333 }
334
335 static void free_capture_list(struct i915_request *request)
336 {
337         struct i915_capture_list *capture;
338
339         capture = request->capture_list;
340         while (capture) {
341                 struct i915_capture_list *next = capture->next;
342
343                 kfree(capture);
344                 capture = next;
345         }
346 }
347
348 static void __retire_engine_request(struct intel_engine_cs *engine,
349                                     struct i915_request *rq)
350 {
351         GEM_TRACE("%s(%s) fence %llx:%d, global=%d, current %d\n",
352                   __func__, engine->name,
353                   rq->fence.context, rq->fence.seqno,
354                   rq->global_seqno,
355                   intel_engine_get_seqno(engine));
356
357         GEM_BUG_ON(!i915_request_completed(rq));
358
359         local_irq_disable();
360
361         spin_lock(&engine->timeline.lock);
362         GEM_BUG_ON(!list_is_first(&rq->link, &engine->timeline.requests));
363         list_del_init(&rq->link);
364         spin_unlock(&engine->timeline.lock);
365
366         spin_lock(&rq->lock);
367         if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
368                 dma_fence_signal_locked(&rq->fence);
369         if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
370                 intel_engine_cancel_signaling(rq);
371         if (rq->waitboost) {
372                 GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters));
373                 atomic_dec(&rq->i915->gt_pm.rps.num_waiters);
374         }
375         spin_unlock(&rq->lock);
376
377         local_irq_enable();
378
379         /*
380          * The backing object for the context is done after switching to the
381          * *next* context. Therefore we cannot retire the previous context until
382          * the next context has already started running. However, since we
383          * cannot take the required locks at i915_request_submit() we
384          * defer the unpinning of the active context to now, retirement of
385          * the subsequent request.
386          */
387         if (engine->last_retired_context)
388                 intel_context_unpin(engine->last_retired_context);
389         engine->last_retired_context = rq->hw_context;
390 }
391
392 static void __retire_engine_upto(struct intel_engine_cs *engine,
393                                  struct i915_request *rq)
394 {
395         struct i915_request *tmp;
396
397         if (list_empty(&rq->link))
398                 return;
399
400         do {
401                 tmp = list_first_entry(&engine->timeline.requests,
402                                        typeof(*tmp), link);
403
404                 GEM_BUG_ON(tmp->engine != engine);
405                 __retire_engine_request(engine, tmp);
406         } while (tmp != rq);
407 }
408
409 static void i915_request_retire(struct i915_request *request)
410 {
411         struct i915_gem_active *active, *next;
412
413         GEM_TRACE("%s fence %llx:%d, global=%d, current %d\n",
414                   request->engine->name,
415                   request->fence.context, request->fence.seqno,
416                   request->global_seqno,
417                   intel_engine_get_seqno(request->engine));
418
419         lockdep_assert_held(&request->i915->drm.struct_mutex);
420         GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit));
421         GEM_BUG_ON(!i915_request_completed(request));
422
423         trace_i915_request_retire(request);
424
425         advance_ring(request);
426         free_capture_list(request);
427
428         /*
429          * Walk through the active list, calling retire on each. This allows
430          * objects to track their GPU activity and mark themselves as idle
431          * when their *last* active request is completed (updating state
432          * tracking lists for eviction, active references for GEM, etc).
433          *
434          * As the ->retire() may free the node, we decouple it first and
435          * pass along the auxiliary information (to avoid dereferencing
436          * the node after the callback).
437          */
438         list_for_each_entry_safe(active, next, &request->active_list, link) {
439                 /*
440                  * In microbenchmarks or focusing upon time inside the kernel,
441                  * we may spend an inordinate amount of time simply handling
442                  * the retirement of requests and processing their callbacks.
443                  * Of which, this loop itself is particularly hot due to the
444                  * cache misses when jumping around the list of i915_gem_active.
445                  * So we try to keep this loop as streamlined as possible and
446                  * also prefetch the next i915_gem_active to try and hide
447                  * the likely cache miss.
448                  */
449                 prefetchw(next);
450
451                 INIT_LIST_HEAD(&active->link);
452                 RCU_INIT_POINTER(active->request, NULL);
453
454                 active->retire(active, request);
455         }
456
457         i915_request_remove_from_client(request);
458
459         /* Retirement decays the ban score as it is a sign of ctx progress */
460         atomic_dec_if_positive(&request->gem_context->ban_score);
461         intel_context_unpin(request->hw_context);
462
463         __retire_engine_upto(request->engine, request);
464
465         unreserve_gt(request->i915);
466
467         i915_sched_node_fini(request->i915, &request->sched);
468         i915_request_put(request);
469 }
470
471 void i915_request_retire_upto(struct i915_request *rq)
472 {
473         struct intel_ring *ring = rq->ring;
474         struct i915_request *tmp;
475
476         GEM_TRACE("%s fence %llx:%d, global=%d, current %d\n",
477                   rq->engine->name,
478                   rq->fence.context, rq->fence.seqno,
479                   rq->global_seqno,
480                   intel_engine_get_seqno(rq->engine));
481
482         lockdep_assert_held(&rq->i915->drm.struct_mutex);
483         GEM_BUG_ON(!i915_request_completed(rq));
484
485         if (list_empty(&rq->ring_link))
486                 return;
487
488         do {
489                 tmp = list_first_entry(&ring->request_list,
490                                        typeof(*tmp), ring_link);
491
492                 i915_request_retire(tmp);
493         } while (tmp != rq);
494 }
495
496 static u32 timeline_get_seqno(struct i915_timeline *tl)
497 {
498         return ++tl->seqno;
499 }
500
501 static void move_to_timeline(struct i915_request *request,
502                              struct i915_timeline *timeline)
503 {
504         GEM_BUG_ON(request->timeline == &request->engine->timeline);
505         lockdep_assert_held(&request->engine->timeline.lock);
506
507         spin_lock(&request->timeline->lock);
508         list_move_tail(&request->link, &timeline->requests);
509         spin_unlock(&request->timeline->lock);
510 }
511
512 void __i915_request_submit(struct i915_request *request)
513 {
514         struct intel_engine_cs *engine = request->engine;
515         u32 seqno;
516
517         GEM_TRACE("%s fence %llx:%d -> global=%d, current %d\n",
518                   engine->name,
519                   request->fence.context, request->fence.seqno,
520                   engine->timeline.seqno + 1,
521                   intel_engine_get_seqno(engine));
522
523         GEM_BUG_ON(!irqs_disabled());
524         lockdep_assert_held(&engine->timeline.lock);
525
526         GEM_BUG_ON(request->global_seqno);
527
528         seqno = timeline_get_seqno(&engine->timeline);
529         GEM_BUG_ON(!seqno);
530         GEM_BUG_ON(intel_engine_signaled(engine, seqno));
531
532         /* We may be recursing from the signal callback of another i915 fence */
533         spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
534         request->global_seqno = seqno;
535         if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
536                 intel_engine_enable_signaling(request, false);
537         spin_unlock(&request->lock);
538
539         engine->emit_breadcrumb(request,
540                                 request->ring->vaddr + request->postfix);
541
542         /* Transfer from per-context onto the global per-engine timeline */
543         move_to_timeline(request, &engine->timeline);
544
545         trace_i915_request_execute(request);
546
547         wake_up_all(&request->execute);
548 }
549
550 void i915_request_submit(struct i915_request *request)
551 {
552         struct intel_engine_cs *engine = request->engine;
553         unsigned long flags;
554
555         /* Will be called from irq-context when using foreign fences. */
556         spin_lock_irqsave(&engine->timeline.lock, flags);
557
558         __i915_request_submit(request);
559
560         spin_unlock_irqrestore(&engine->timeline.lock, flags);
561 }
562
563 void __i915_request_unsubmit(struct i915_request *request)
564 {
565         struct intel_engine_cs *engine = request->engine;
566
567         GEM_TRACE("%s fence %llx:%d <- global=%d, current %d\n",
568                   engine->name,
569                   request->fence.context, request->fence.seqno,
570                   request->global_seqno,
571                   intel_engine_get_seqno(engine));
572
573         GEM_BUG_ON(!irqs_disabled());
574         lockdep_assert_held(&engine->timeline.lock);
575
576         /*
577          * Only unwind in reverse order, required so that the per-context list
578          * is kept in seqno/ring order.
579          */
580         GEM_BUG_ON(!request->global_seqno);
581         GEM_BUG_ON(request->global_seqno != engine->timeline.seqno);
582         GEM_BUG_ON(intel_engine_has_completed(engine, request->global_seqno));
583         engine->timeline.seqno--;
584
585         /* We may be recursing from the signal callback of another i915 fence */
586         spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
587         request->global_seqno = 0;
588         if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
589                 intel_engine_cancel_signaling(request);
590         spin_unlock(&request->lock);
591
592         /* Transfer back from the global per-engine timeline to per-context */
593         move_to_timeline(request, request->timeline);
594
595         /*
596          * We don't need to wake_up any waiters on request->execute, they
597          * will get woken by any other event or us re-adding this request
598          * to the engine timeline (__i915_request_submit()). The waiters
599          * should be quite adapt at finding that the request now has a new
600          * global_seqno to the one they went to sleep on.
601          */
602 }
603
604 void i915_request_unsubmit(struct i915_request *request)
605 {
606         struct intel_engine_cs *engine = request->engine;
607         unsigned long flags;
608
609         /* Will be called from irq-context when using foreign fences. */
610         spin_lock_irqsave(&engine->timeline.lock, flags);
611
612         __i915_request_unsubmit(request);
613
614         spin_unlock_irqrestore(&engine->timeline.lock, flags);
615 }
616
617 static int __i915_sw_fence_call
618 submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
619 {
620         struct i915_request *request =
621                 container_of(fence, typeof(*request), submit);
622
623         switch (state) {
624         case FENCE_COMPLETE:
625                 trace_i915_request_submit(request);
626                 /*
627                  * We need to serialize use of the submit_request() callback
628                  * with its hotplugging performed during an emergency
629                  * i915_gem_set_wedged().  We use the RCU mechanism to mark the
630                  * critical section in order to force i915_gem_set_wedged() to
631                  * wait until the submit_request() is completed before
632                  * proceeding.
633                  */
634                 rcu_read_lock();
635                 request->engine->submit_request(request);
636                 rcu_read_unlock();
637                 break;
638
639         case FENCE_FREE:
640                 i915_request_put(request);
641                 break;
642         }
643
644         return NOTIFY_DONE;
645 }
646
647 /**
648  * i915_request_alloc - allocate a request structure
649  *
650  * @engine: engine that we wish to issue the request on.
651  * @ctx: context that the request will be associated with.
652  *
653  * Returns a pointer to the allocated request if successful,
654  * or an error code if not.
655  */
656 struct i915_request *
657 i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
658 {
659         struct drm_i915_private *i915 = engine->i915;
660         struct i915_request *rq;
661         struct intel_context *ce;
662         int ret;
663
664         lockdep_assert_held(&i915->drm.struct_mutex);
665
666         /*
667          * Preempt contexts are reserved for exclusive use to inject a
668          * preemption context switch. They are never to be used for any trivial
669          * request!
670          */
671         GEM_BUG_ON(ctx == i915->preempt_context);
672
673         /*
674          * ABI: Before userspace accesses the GPU (e.g. execbuffer), report
675          * EIO if the GPU is already wedged.
676          */
677         if (i915_terminally_wedged(&i915->gpu_error))
678                 return ERR_PTR(-EIO);
679
680         /*
681          * Pinning the contexts may generate requests in order to acquire
682          * GGTT space, so do this first before we reserve a seqno for
683          * ourselves.
684          */
685         ce = intel_context_pin(ctx, engine);
686         if (IS_ERR(ce))
687                 return ERR_CAST(ce);
688
689         ret = reserve_gt(i915);
690         if (ret)
691                 goto err_unpin;
692
693         ret = intel_ring_wait_for_space(ce->ring, MIN_SPACE_FOR_ADD_REQUEST);
694         if (ret)
695                 goto err_unreserve;
696
697         /* Move our oldest request to the slab-cache (if not in use!) */
698         rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link);
699         if (!list_is_last(&rq->ring_link, &ce->ring->request_list) &&
700             i915_request_completed(rq))
701                 i915_request_retire(rq);
702
703         /*
704          * Beware: Dragons be flying overhead.
705          *
706          * We use RCU to look up requests in flight. The lookups may
707          * race with the request being allocated from the slab freelist.
708          * That is the request we are writing to here, may be in the process
709          * of being read by __i915_gem_active_get_rcu(). As such,
710          * we have to be very careful when overwriting the contents. During
711          * the RCU lookup, we change chase the request->engine pointer,
712          * read the request->global_seqno and increment the reference count.
713          *
714          * The reference count is incremented atomically. If it is zero,
715          * the lookup knows the request is unallocated and complete. Otherwise,
716          * it is either still in use, or has been reallocated and reset
717          * with dma_fence_init(). This increment is safe for release as we
718          * check that the request we have a reference to and matches the active
719          * request.
720          *
721          * Before we increment the refcount, we chase the request->engine
722          * pointer. We must not call kmem_cache_zalloc() or else we set
723          * that pointer to NULL and cause a crash during the lookup. If
724          * we see the request is completed (based on the value of the
725          * old engine and seqno), the lookup is complete and reports NULL.
726          * If we decide the request is not completed (new engine or seqno),
727          * then we grab a reference and double check that it is still the
728          * active request - which it won't be and restart the lookup.
729          *
730          * Do not use kmem_cache_zalloc() here!
731          */
732         rq = kmem_cache_alloc(i915->requests,
733                               GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
734         if (unlikely(!rq)) {
735                 /* Ratelimit ourselves to prevent oom from malicious clients */
736                 ret = i915_gem_wait_for_idle(i915,
737                                              I915_WAIT_LOCKED |
738                                              I915_WAIT_INTERRUPTIBLE,
739                                              MAX_SCHEDULE_TIMEOUT);
740                 if (ret)
741                         goto err_unreserve;
742
743                 /*
744                  * We've forced the client to stall and catch up with whatever
745                  * backlog there might have been. As we are assuming that we
746                  * caused the mempressure, now is an opportune time to
747                  * recover as much memory from the request pool as is possible.
748                  * Having already penalized the client to stall, we spend
749                  * a little extra time to re-optimise page allocation.
750                  */
751                 kmem_cache_shrink(i915->requests);
752                 rcu_barrier(); /* Recover the TYPESAFE_BY_RCU pages */
753
754                 rq = kmem_cache_alloc(i915->requests, GFP_KERNEL);
755                 if (!rq) {
756                         ret = -ENOMEM;
757                         goto err_unreserve;
758                 }
759         }
760
761         INIT_LIST_HEAD(&rq->active_list);
762         rq->i915 = i915;
763         rq->engine = engine;
764         rq->gem_context = ctx;
765         rq->hw_context = ce;
766         rq->ring = ce->ring;
767         rq->timeline = ce->ring->timeline;
768         GEM_BUG_ON(rq->timeline == &engine->timeline);
769
770         spin_lock_init(&rq->lock);
771         dma_fence_init(&rq->fence,
772                        &i915_fence_ops,
773                        &rq->lock,
774                        rq->timeline->fence_context,
775                        timeline_get_seqno(rq->timeline));
776
777         /* We bump the ref for the fence chain */
778         i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify);
779         init_waitqueue_head(&rq->execute);
780
781         i915_sched_node_init(&rq->sched);
782
783         /* No zalloc, must clear what we need by hand */
784         rq->global_seqno = 0;
785         rq->signaling.wait.seqno = 0;
786         rq->file_priv = NULL;
787         rq->batch = NULL;
788         rq->capture_list = NULL;
789         rq->waitboost = false;
790
791         /*
792          * Reserve space in the ring buffer for all the commands required to
793          * eventually emit this request. This is to guarantee that the
794          * i915_request_add() call can't fail. Note that the reserve may need
795          * to be redone if the request is not actually submitted straight
796          * away, e.g. because a GPU scheduler has deferred it.
797          */
798         rq->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
799         GEM_BUG_ON(rq->reserved_space < engine->emit_breadcrumb_sz);
800
801         /*
802          * Record the position of the start of the request so that
803          * should we detect the updated seqno part-way through the
804          * GPU processing the request, we never over-estimate the
805          * position of the head.
806          */
807         rq->head = rq->ring->emit;
808
809         /* Unconditionally invalidate GPU caches and TLBs. */
810         ret = engine->emit_flush(rq, EMIT_INVALIDATE);
811         if (ret)
812                 goto err_unwind;
813
814         ret = engine->request_alloc(rq);
815         if (ret)
816                 goto err_unwind;
817
818         /* Keep a second pin for the dual retirement along engine and ring */
819         __intel_context_pin(ce);
820
821         rq->infix = rq->ring->emit; /* end of header; start of user payload */
822
823         /* Check that we didn't interrupt ourselves with a new request */
824         GEM_BUG_ON(rq->timeline->seqno != rq->fence.seqno);
825         return rq;
826
827 err_unwind:
828         ce->ring->emit = rq->head;
829
830         /* Make sure we didn't add ourselves to external state before freeing */
831         GEM_BUG_ON(!list_empty(&rq->active_list));
832         GEM_BUG_ON(!list_empty(&rq->sched.signalers_list));
833         GEM_BUG_ON(!list_empty(&rq->sched.waiters_list));
834
835         kmem_cache_free(i915->requests, rq);
836 err_unreserve:
837         unreserve_gt(i915);
838 err_unpin:
839         intel_context_unpin(ce);
840         return ERR_PTR(ret);
841 }
842
843 static int
844 i915_request_await_request(struct i915_request *to, struct i915_request *from)
845 {
846         int ret;
847
848         GEM_BUG_ON(to == from);
849         GEM_BUG_ON(to->timeline == from->timeline);
850
851         if (i915_request_completed(from))
852                 return 0;
853
854         if (to->engine->schedule) {
855                 ret = i915_sched_node_add_dependency(to->i915,
856                                                      &to->sched,
857                                                      &from->sched);
858                 if (ret < 0)
859                         return ret;
860         }
861
862         if (to->engine == from->engine) {
863                 ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
864                                                        &from->submit,
865                                                        I915_FENCE_GFP);
866                 return ret < 0 ? ret : 0;
867         }
868
869         if (to->engine->semaphore.sync_to) {
870                 u32 seqno;
871
872                 GEM_BUG_ON(!from->engine->semaphore.signal);
873
874                 seqno = i915_request_global_seqno(from);
875                 if (!seqno)
876                         goto await_dma_fence;
877
878                 if (seqno <= to->timeline->global_sync[from->engine->id])
879                         return 0;
880
881                 trace_i915_gem_ring_sync_to(to, from);
882                 ret = to->engine->semaphore.sync_to(to, from);
883                 if (ret)
884                         return ret;
885
886                 to->timeline->global_sync[from->engine->id] = seqno;
887                 return 0;
888         }
889
890 await_dma_fence:
891         ret = i915_sw_fence_await_dma_fence(&to->submit,
892                                             &from->fence, 0,
893                                             I915_FENCE_GFP);
894         return ret < 0 ? ret : 0;
895 }
896
897 int
898 i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
899 {
900         struct dma_fence **child = &fence;
901         unsigned int nchild = 1;
902         int ret;
903
904         /*
905          * Note that if the fence-array was created in signal-on-any mode,
906          * we should *not* decompose it into its individual fences. However,
907          * we don't currently store which mode the fence-array is operating
908          * in. Fortunately, the only user of signal-on-any is private to
909          * amdgpu and we should not see any incoming fence-array from
910          * sync-file being in signal-on-any mode.
911          */
912         if (dma_fence_is_array(fence)) {
913                 struct dma_fence_array *array = to_dma_fence_array(fence);
914
915                 child = array->fences;
916                 nchild = array->num_fences;
917                 GEM_BUG_ON(!nchild);
918         }
919
920         do {
921                 fence = *child++;
922                 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
923                         continue;
924
925                 /*
926                  * Requests on the same timeline are explicitly ordered, along
927                  * with their dependencies, by i915_request_add() which ensures
928                  * that requests are submitted in-order through each ring.
929                  */
930                 if (fence->context == rq->fence.context)
931                         continue;
932
933                 /* Squash repeated waits to the same timelines */
934                 if (fence->context != rq->i915->mm.unordered_timeline &&
935                     i915_timeline_sync_is_later(rq->timeline, fence))
936                         continue;
937
938                 if (dma_fence_is_i915(fence))
939                         ret = i915_request_await_request(rq, to_request(fence));
940                 else
941                         ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
942                                                             I915_FENCE_TIMEOUT,
943                                                             I915_FENCE_GFP);
944                 if (ret < 0)
945                         return ret;
946
947                 /* Record the latest fence used against each timeline */
948                 if (fence->context != rq->i915->mm.unordered_timeline)
949                         i915_timeline_sync_set(rq->timeline, fence);
950         } while (--nchild);
951
952         return 0;
953 }
954
955 /**
956  * i915_request_await_object - set this request to (async) wait upon a bo
957  * @to: request we are wishing to use
958  * @obj: object which may be in use on another ring.
959  * @write: whether the wait is on behalf of a writer
960  *
961  * This code is meant to abstract object synchronization with the GPU.
962  * Conceptually we serialise writes between engines inside the GPU.
963  * We only allow one engine to write into a buffer at any time, but
964  * multiple readers. To ensure each has a coherent view of memory, we must:
965  *
966  * - If there is an outstanding write request to the object, the new
967  *   request must wait for it to complete (either CPU or in hw, requests
968  *   on the same ring will be naturally ordered).
969  *
970  * - If we are a write request (pending_write_domain is set), the new
971  *   request must wait for outstanding read requests to complete.
972  *
973  * Returns 0 if successful, else propagates up the lower layer error.
974  */
975 int
976 i915_request_await_object(struct i915_request *to,
977                           struct drm_i915_gem_object *obj,
978                           bool write)
979 {
980         struct dma_fence *excl;
981         int ret = 0;
982
983         if (write) {
984                 struct dma_fence **shared;
985                 unsigned int count, i;
986
987                 ret = reservation_object_get_fences_rcu(obj->resv,
988                                                         &excl, &count, &shared);
989                 if (ret)
990                         return ret;
991
992                 for (i = 0; i < count; i++) {
993                         ret = i915_request_await_dma_fence(to, shared[i]);
994                         if (ret)
995                                 break;
996
997                         dma_fence_put(shared[i]);
998                 }
999
1000                 for (; i < count; i++)
1001                         dma_fence_put(shared[i]);
1002                 kfree(shared);
1003         } else {
1004                 excl = reservation_object_get_excl_rcu(obj->resv);
1005         }
1006
1007         if (excl) {
1008                 if (ret == 0)
1009                         ret = i915_request_await_dma_fence(to, excl);
1010
1011                 dma_fence_put(excl);
1012         }
1013
1014         return ret;
1015 }
1016
1017 void i915_request_skip(struct i915_request *rq, int error)
1018 {
1019         void *vaddr = rq->ring->vaddr;
1020         u32 head;
1021
1022         GEM_BUG_ON(!IS_ERR_VALUE((long)error));
1023         dma_fence_set_error(&rq->fence, error);
1024
1025         /*
1026          * As this request likely depends on state from the lost
1027          * context, clear out all the user operations leaving the
1028          * breadcrumb at the end (so we get the fence notifications).
1029          */
1030         head = rq->infix;
1031         if (rq->postfix < head) {
1032                 memset(vaddr + head, 0, rq->ring->size - head);
1033                 head = 0;
1034         }
1035         memset(vaddr + head, 0, rq->postfix - head);
1036 }
1037
1038 /*
1039  * NB: This function is not allowed to fail. Doing so would mean the the
1040  * request is not being tracked for completion but the work itself is
1041  * going to happen on the hardware. This would be a Bad Thing(tm).
1042  */
1043 void i915_request_add(struct i915_request *request)
1044 {
1045         struct intel_engine_cs *engine = request->engine;
1046         struct i915_timeline *timeline = request->timeline;
1047         struct intel_ring *ring = request->ring;
1048         struct i915_request *prev;
1049         u32 *cs;
1050
1051         GEM_TRACE("%s fence %llx:%d\n",
1052                   engine->name, request->fence.context, request->fence.seqno);
1053
1054         lockdep_assert_held(&request->i915->drm.struct_mutex);
1055         trace_i915_request_add(request);
1056
1057         /*
1058          * Make sure that no request gazumped us - if it was allocated after
1059          * our i915_request_alloc() and called __i915_request_add() before
1060          * us, the timeline will hold its seqno which is later than ours.
1061          */
1062         GEM_BUG_ON(timeline->seqno != request->fence.seqno);
1063
1064         /*
1065          * To ensure that this call will not fail, space for its emissions
1066          * should already have been reserved in the ring buffer. Let the ring
1067          * know that it is time to use that space up.
1068          */
1069         request->reserved_space = 0;
1070         engine->emit_flush(request, EMIT_FLUSH);
1071
1072         /*
1073          * Record the position of the start of the breadcrumb so that
1074          * should we detect the updated seqno part-way through the
1075          * GPU processing the request, we never over-estimate the
1076          * position of the ring's HEAD.
1077          */
1078         cs = intel_ring_begin(request, engine->emit_breadcrumb_sz);
1079         GEM_BUG_ON(IS_ERR(cs));
1080         request->postfix = intel_ring_offset(request, cs);
1081
1082         /*
1083          * Seal the request and mark it as pending execution. Note that
1084          * we may inspect this state, without holding any locks, during
1085          * hangcheck. Hence we apply the barrier to ensure that we do not
1086          * see a more recent value in the hws than we are tracking.
1087          */
1088
1089         prev = i915_gem_active_raw(&timeline->last_request,
1090                                    &request->i915->drm.struct_mutex);
1091         if (prev && !i915_request_completed(prev)) {
1092                 i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
1093                                              &request->submitq);
1094                 if (engine->schedule)
1095                         __i915_sched_node_add_dependency(&request->sched,
1096                                                          &prev->sched,
1097                                                          &request->dep,
1098                                                          0);
1099         }
1100
1101         spin_lock_irq(&timeline->lock);
1102         list_add_tail(&request->link, &timeline->requests);
1103         spin_unlock_irq(&timeline->lock);
1104
1105         GEM_BUG_ON(timeline->seqno != request->fence.seqno);
1106         i915_gem_active_set(&timeline->last_request, request);
1107
1108         list_add_tail(&request->ring_link, &ring->request_list);
1109         if (list_is_first(&request->ring_link, &ring->request_list)) {
1110                 GEM_TRACE("marking %s as active\n", ring->timeline->name);
1111                 list_add(&ring->active_link, &request->i915->gt.active_rings);
1112         }
1113         request->emitted_jiffies = jiffies;
1114
1115         /*
1116          * Let the backend know a new request has arrived that may need
1117          * to adjust the existing execution schedule due to a high priority
1118          * request - i.e. we may want to preempt the current request in order
1119          * to run a high priority dependency chain *before* we can execute this
1120          * request.
1121          *
1122          * This is called before the request is ready to run so that we can
1123          * decide whether to preempt the entire chain so that it is ready to
1124          * run at the earliest possible convenience.
1125          */
1126         local_bh_disable();
1127         rcu_read_lock(); /* RCU serialisation for set-wedged protection */
1128         if (engine->schedule)
1129                 engine->schedule(request, &request->gem_context->sched);
1130         rcu_read_unlock();
1131         i915_sw_fence_commit(&request->submit);
1132         local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
1133
1134         /*
1135          * In typical scenarios, we do not expect the previous request on
1136          * the timeline to be still tracked by timeline->last_request if it
1137          * has been completed. If the completed request is still here, that
1138          * implies that request retirement is a long way behind submission,
1139          * suggesting that we haven't been retiring frequently enough from
1140          * the combination of retire-before-alloc, waiters and the background
1141          * retirement worker. So if the last request on this timeline was
1142          * already completed, do a catch up pass, flushing the retirement queue
1143          * up to this client. Since we have now moved the heaviest operations
1144          * during retirement onto secondary workers, such as freeing objects
1145          * or contexts, retiring a bunch of requests is mostly list management
1146          * (and cache misses), and so we should not be overly penalizing this
1147          * client by performing excess work, though we may still performing
1148          * work on behalf of others -- but instead we should benefit from
1149          * improved resource management. (Well, that's the theory at least.)
1150          */
1151         if (prev && i915_request_completed(prev))
1152                 i915_request_retire_upto(prev);
1153 }
1154
1155 static unsigned long local_clock_us(unsigned int *cpu)
1156 {
1157         unsigned long t;
1158
1159         /*
1160          * Cheaply and approximately convert from nanoseconds to microseconds.
1161          * The result and subsequent calculations are also defined in the same
1162          * approximate microseconds units. The principal source of timing
1163          * error here is from the simple truncation.
1164          *
1165          * Note that local_clock() is only defined wrt to the current CPU;
1166          * the comparisons are no longer valid if we switch CPUs. Instead of
1167          * blocking preemption for the entire busywait, we can detect the CPU
1168          * switch and use that as indicator of system load and a reason to
1169          * stop busywaiting, see busywait_stop().
1170          */
1171         *cpu = get_cpu();
1172         t = local_clock() >> 10;
1173         put_cpu();
1174
1175         return t;
1176 }
1177
1178 static bool busywait_stop(unsigned long timeout, unsigned int cpu)
1179 {
1180         unsigned int this_cpu;
1181
1182         if (time_after(local_clock_us(&this_cpu), timeout))
1183                 return true;
1184
1185         return this_cpu != cpu;
1186 }
1187
1188 static bool __i915_spin_request(const struct i915_request *rq,
1189                                 u32 seqno, int state, unsigned long timeout_us)
1190 {
1191         struct intel_engine_cs *engine = rq->engine;
1192         unsigned int irq, cpu;
1193
1194         GEM_BUG_ON(!seqno);
1195
1196         /*
1197          * Only wait for the request if we know it is likely to complete.
1198          *
1199          * We don't track the timestamps around requests, nor the average
1200          * request length, so we do not have a good indicator that this
1201          * request will complete within the timeout. What we do know is the
1202          * order in which requests are executed by the engine and so we can
1203          * tell if the request has started. If the request hasn't started yet,
1204          * it is a fair assumption that it will not complete within our
1205          * relatively short timeout.
1206          */
1207         if (!intel_engine_has_started(engine, seqno))
1208                 return false;
1209
1210         /*
1211          * When waiting for high frequency requests, e.g. during synchronous
1212          * rendering split between the CPU and GPU, the finite amount of time
1213          * required to set up the irq and wait upon it limits the response
1214          * rate. By busywaiting on the request completion for a short while we
1215          * can service the high frequency waits as quick as possible. However,
1216          * if it is a slow request, we want to sleep as quickly as possible.
1217          * The tradeoff between waiting and sleeping is roughly the time it
1218          * takes to sleep on a request, on the order of a microsecond.
1219          */
1220
1221         irq = READ_ONCE(engine->breadcrumbs.irq_count);
1222         timeout_us += local_clock_us(&cpu);
1223         do {
1224                 if (intel_engine_has_completed(engine, seqno))
1225                         return seqno == i915_request_global_seqno(rq);
1226
1227                 /*
1228                  * Seqno are meant to be ordered *before* the interrupt. If
1229                  * we see an interrupt without a corresponding seqno advance,
1230                  * assume we won't see one in the near future but require
1231                  * the engine->seqno_barrier() to fixup coherency.
1232                  */
1233                 if (READ_ONCE(engine->breadcrumbs.irq_count) != irq)
1234                         break;
1235
1236                 if (signal_pending_state(state, current))
1237                         break;
1238
1239                 if (busywait_stop(timeout_us, cpu))
1240                         break;
1241
1242                 cpu_relax();
1243         } while (!need_resched());
1244
1245         return false;
1246 }
1247
1248 static bool __i915_wait_request_check_and_reset(struct i915_request *request)
1249 {
1250         struct i915_gpu_error *error = &request->i915->gpu_error;
1251
1252         if (likely(!i915_reset_handoff(error)))
1253                 return false;
1254
1255         __set_current_state(TASK_RUNNING);
1256         i915_reset(request->i915, error->stalled_mask, error->reason);
1257         return true;
1258 }
1259
1260 /**
1261  * i915_request_wait - wait until execution of request has finished
1262  * @rq: the request to wait upon
1263  * @flags: how to wait
1264  * @timeout: how long to wait in jiffies
1265  *
1266  * i915_request_wait() waits for the request to be completed, for a
1267  * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
1268  * unbounded wait).
1269  *
1270  * If the caller holds the struct_mutex, the caller must pass I915_WAIT_LOCKED
1271  * in via the flags, and vice versa if the struct_mutex is not held, the caller
1272  * must not specify that the wait is locked.
1273  *
1274  * Returns the remaining time (in jiffies) if the request completed, which may
1275  * be zero or -ETIME if the request is unfinished after the timeout expires.
1276  * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
1277  * pending before the request completes.
1278  */
1279 long i915_request_wait(struct i915_request *rq,
1280                        unsigned int flags,
1281                        long timeout)
1282 {
1283         const int state = flags & I915_WAIT_INTERRUPTIBLE ?
1284                 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
1285         wait_queue_head_t *errq = &rq->i915->gpu_error.wait_queue;
1286         DEFINE_WAIT_FUNC(reset, default_wake_function);
1287         DEFINE_WAIT_FUNC(exec, default_wake_function);
1288         struct intel_wait wait;
1289
1290         might_sleep();
1291 #if IS_ENABLED(CONFIG_LOCKDEP)
1292         GEM_BUG_ON(debug_locks &&
1293                    !!lockdep_is_held(&rq->i915->drm.struct_mutex) !=
1294                    !!(flags & I915_WAIT_LOCKED));
1295 #endif
1296         GEM_BUG_ON(timeout < 0);
1297
1298         if (i915_request_completed(rq))
1299                 return timeout;
1300
1301         if (!timeout)
1302                 return -ETIME;
1303
1304         trace_i915_request_wait_begin(rq, flags);
1305
1306         add_wait_queue(&rq->execute, &exec);
1307         if (flags & I915_WAIT_LOCKED)
1308                 add_wait_queue(errq, &reset);
1309
1310         intel_wait_init(&wait);
1311
1312 restart:
1313         do {
1314                 set_current_state(state);
1315                 if (intel_wait_update_request(&wait, rq))
1316                         break;
1317
1318                 if (flags & I915_WAIT_LOCKED &&
1319                     __i915_wait_request_check_and_reset(rq))
1320                         continue;
1321
1322                 if (signal_pending_state(state, current)) {
1323                         timeout = -ERESTARTSYS;
1324                         goto complete;
1325                 }
1326
1327                 if (!timeout) {
1328                         timeout = -ETIME;
1329                         goto complete;
1330                 }
1331
1332                 timeout = io_schedule_timeout(timeout);
1333         } while (1);
1334
1335         GEM_BUG_ON(!intel_wait_has_seqno(&wait));
1336         GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
1337
1338         /* Optimistic short spin before touching IRQs */
1339         if (__i915_spin_request(rq, wait.seqno, state, 5))
1340                 goto complete;
1341
1342         set_current_state(state);
1343         if (intel_engine_add_wait(rq->engine, &wait))
1344                 /*
1345                  * In order to check that we haven't missed the interrupt
1346                  * as we enabled it, we need to kick ourselves to do a
1347                  * coherent check on the seqno before we sleep.
1348                  */
1349                 goto wakeup;
1350
1351         if (flags & I915_WAIT_LOCKED)
1352                 __i915_wait_request_check_and_reset(rq);
1353
1354         for (;;) {
1355                 if (signal_pending_state(state, current)) {
1356                         timeout = -ERESTARTSYS;
1357                         break;
1358                 }
1359
1360                 if (!timeout) {
1361                         timeout = -ETIME;
1362                         break;
1363                 }
1364
1365                 timeout = io_schedule_timeout(timeout);
1366
1367                 if (intel_wait_complete(&wait) &&
1368                     intel_wait_check_request(&wait, rq))
1369                         break;
1370
1371                 set_current_state(state);
1372
1373 wakeup:
1374                 /*
1375                  * Carefully check if the request is complete, giving time
1376                  * for the seqno to be visible following the interrupt.
1377                  * We also have to check in case we are kicked by the GPU
1378                  * reset in order to drop the struct_mutex.
1379                  */
1380                 if (__i915_request_irq_complete(rq))
1381                         break;
1382
1383                 /*
1384                  * If the GPU is hung, and we hold the lock, reset the GPU
1385                  * and then check for completion. On a full reset, the engine's
1386                  * HW seqno will be advanced passed us and we are complete.
1387                  * If we do a partial reset, we have to wait for the GPU to
1388                  * resume and update the breadcrumb.
1389                  *
1390                  * If we don't hold the mutex, we can just wait for the worker
1391                  * to come along and update the breadcrumb (either directly
1392                  * itself, or indirectly by recovering the GPU).
1393                  */
1394                 if (flags & I915_WAIT_LOCKED &&
1395                     __i915_wait_request_check_and_reset(rq))
1396                         continue;
1397
1398                 /* Only spin if we know the GPU is processing this request */
1399                 if (__i915_spin_request(rq, wait.seqno, state, 2))
1400                         break;
1401
1402                 if (!intel_wait_check_request(&wait, rq)) {
1403                         intel_engine_remove_wait(rq->engine, &wait);
1404                         goto restart;
1405                 }
1406         }
1407
1408         intel_engine_remove_wait(rq->engine, &wait);
1409 complete:
1410         __set_current_state(TASK_RUNNING);
1411         if (flags & I915_WAIT_LOCKED)
1412                 remove_wait_queue(errq, &reset);
1413         remove_wait_queue(&rq->execute, &exec);
1414         trace_i915_request_wait_end(rq);
1415
1416         return timeout;
1417 }
1418
1419 static void ring_retire_requests(struct intel_ring *ring)
1420 {
1421         struct i915_request *request, *next;
1422
1423         list_for_each_entry_safe(request, next,
1424                                  &ring->request_list, ring_link) {
1425                 if (!i915_request_completed(request))
1426                         break;
1427
1428                 i915_request_retire(request);
1429         }
1430 }
1431
1432 void i915_retire_requests(struct drm_i915_private *i915)
1433 {
1434         struct intel_ring *ring, *tmp;
1435
1436         lockdep_assert_held(&i915->drm.struct_mutex);
1437
1438         if (!i915->gt.active_requests)
1439                 return;
1440
1441         list_for_each_entry_safe(ring, tmp, &i915->gt.active_rings, active_link)
1442                 ring_retire_requests(ring);
1443 }
1444
1445 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1446 #include "selftests/mock_request.c"
1447 #include "selftests/i915_request.c"
1448 #endif