73e96ca024dfd25fd703a3a5dac5d9461c37ca38
[platform/kernel/linux-rpi.git] / drivers / gpu / drm / i915 / gt / intel_engine_pm.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5
6 #include "i915_drv.h"
7
8 #include "intel_breadcrumbs.h"
9 #include "intel_context.h"
10 #include "intel_engine.h"
11 #include "intel_engine_heartbeat.h"
12 #include "intel_engine_pm.h"
13 #include "intel_gt.h"
14 #include "intel_gt_pm.h"
15 #include "intel_rc6.h"
16 #include "intel_ring.h"
17 #include "shmem_utils.h"
18
19 static void dbg_poison_ce(struct intel_context *ce)
20 {
21         if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
22                 return;
23
24         if (ce->state) {
25                 struct drm_i915_gem_object *obj = ce->state->obj;
26                 int type = i915_coherent_map_type(ce->engine->i915, obj, true);
27                 void *map;
28
29                 if (!i915_gem_object_trylock(obj))
30                         return;
31
32                 map = i915_gem_object_pin_map(obj, type);
33                 if (!IS_ERR(map)) {
34                         memset(map, CONTEXT_REDZONE, obj->base.size);
35                         i915_gem_object_flush_map(obj);
36                         i915_gem_object_unpin_map(obj);
37                 }
38                 i915_gem_object_unlock(obj);
39         }
40 }
41
42 static int __engine_unpark(struct intel_wakeref *wf)
43 {
44         struct intel_engine_cs *engine =
45                 container_of(wf, typeof(*engine), wakeref);
46         struct intel_context *ce;
47
48         ENGINE_TRACE(engine, "\n");
49
50         intel_gt_pm_get(engine->gt);
51
52         /* Discard stale context state from across idling */
53         ce = engine->kernel_context;
54         if (ce) {
55                 GEM_BUG_ON(test_bit(CONTEXT_VALID_BIT, &ce->flags));
56
57                 /* Flush all pending HW writes before we touch the context */
58                 while (unlikely(intel_context_inflight(ce)))
59                         intel_engine_flush_submission(engine);
60
61                 /* First poison the image to verify we never fully trust it */
62                 dbg_poison_ce(ce);
63
64                 /* Scrub the context image after our loss of control */
65                 ce->ops->reset(ce);
66
67                 CE_TRACE(ce, "reset { seqno:%x, *hwsp:%x, ring:%x }\n",
68                          ce->timeline->seqno,
69                          READ_ONCE(*ce->timeline->hwsp_seqno),
70                          ce->ring->emit);
71                 GEM_BUG_ON(ce->timeline->seqno !=
72                            READ_ONCE(*ce->timeline->hwsp_seqno));
73         }
74
75         if (engine->unpark)
76                 engine->unpark(engine);
77
78         intel_breadcrumbs_unpark(engine->breadcrumbs);
79         intel_engine_unpark_heartbeat(engine);
80         return 0;
81 }
82
83 static void duration(struct dma_fence *fence, struct dma_fence_cb *cb)
84 {
85         struct i915_request *rq = to_request(fence);
86
87         ewma__engine_latency_add(&rq->engine->latency,
88                                  ktime_us_delta(rq->fence.timestamp,
89                                                 rq->duration.emitted));
90 }
91
92 static void
93 __queue_and_release_pm(struct i915_request *rq,
94                        struct intel_timeline *tl,
95                        struct intel_engine_cs *engine)
96 {
97         struct intel_gt_timelines *timelines = &engine->gt->timelines;
98
99         ENGINE_TRACE(engine, "parking\n");
100
101         /*
102          * We have to serialise all potential retirement paths with our
103          * submission, as we don't want to underflow either the
104          * engine->wakeref.counter or our timeline->active_count.
105          *
106          * Equally, we cannot allow a new submission to start until
107          * after we finish queueing, nor could we allow that submitter
108          * to retire us before we are ready!
109          */
110         spin_lock(&timelines->lock);
111
112         /* Let intel_gt_retire_requests() retire us (acquired under lock) */
113         if (!atomic_fetch_inc(&tl->active_count))
114                 list_add_tail(&tl->link, &timelines->active_list);
115
116         /* Hand the request over to HW and so engine_retire() */
117         __i915_request_queue_bh(rq);
118
119         /* Let new submissions commence (and maybe retire this timeline) */
120         __intel_wakeref_defer_park(&engine->wakeref);
121
122         spin_unlock(&timelines->lock);
123 }
124
125 static bool switch_to_kernel_context(struct intel_engine_cs *engine)
126 {
127         struct intel_context *ce = engine->kernel_context;
128         struct i915_request *rq;
129         bool result = true;
130
131         /* GPU is pointing to the void, as good as in the kernel context. */
132         if (intel_gt_is_wedged(engine->gt))
133                 return true;
134
135         GEM_BUG_ON(!intel_context_is_barrier(ce));
136         GEM_BUG_ON(ce->timeline->hwsp_ggtt != engine->status_page.vma);
137
138         /* Already inside the kernel context, safe to power down. */
139         if (engine->wakeref_serial == engine->serial)
140                 return true;
141
142         /*
143          * Note, we do this without taking the timeline->mutex. We cannot
144          * as we may be called while retiring the kernel context and so
145          * already underneath the timeline->mutex. Instead we rely on the
146          * exclusive property of the __engine_park that prevents anyone
147          * else from creating a request on this engine. This also requires
148          * that the ring is empty and we avoid any waits while constructing
149          * the context, as they assume protection by the timeline->mutex.
150          * This should hold true as we can only park the engine after
151          * retiring the last request, thus all rings should be empty and
152          * all timelines idle.
153          *
154          * For unlocking, there are 2 other parties and the GPU who have a
155          * stake here.
156          *
157          * A new gpu user will be waiting on the engine-pm to start their
158          * engine_unpark. New waiters are predicated on engine->wakeref.count
159          * and so intel_wakeref_defer_park() acts like a mutex_unlock of the
160          * engine->wakeref.
161          *
162          * The other party is intel_gt_retire_requests(), which is walking the
163          * list of active timelines looking for completions. Meanwhile as soon
164          * as we call __i915_request_queue(), the GPU may complete our request.
165          * Ergo, if we put ourselves on the timelines.active_list
166          * (se intel_timeline_enter()) before we increment the
167          * engine->wakeref.count, we may see the request completion and retire
168          * it causing an underflow of the engine->wakeref.
169          */
170         set_bit(CONTEXT_IS_PARKED, &ce->flags);
171         GEM_BUG_ON(atomic_read(&ce->timeline->active_count) < 0);
172
173         rq = __i915_request_create(ce, GFP_NOWAIT);
174         if (IS_ERR(rq))
175                 /* Context switch failed, hope for the best! Maybe reset? */
176                 goto out_unlock;
177
178         /* Check again on the next retirement. */
179         engine->wakeref_serial = engine->serial + 1;
180         i915_request_add_active_barriers(rq);
181
182         /* Install ourselves as a preemption barrier */
183         rq->sched.attr.priority = I915_PRIORITY_BARRIER;
184         if (likely(!__i915_request_commit(rq))) { /* engine should be idle! */
185                 /*
186                  * Use an interrupt for precise measurement of duration,
187                  * otherwise we rely on someone else retiring all the requests
188                  * which may delay the signaling (i.e. we will likely wait
189                  * until the background request retirement running every
190                  * second or two).
191                  */
192                 BUILD_BUG_ON(sizeof(rq->duration) > sizeof(rq->submitq));
193                 dma_fence_add_callback(&rq->fence, &rq->duration.cb, duration);
194                 rq->duration.emitted = ktime_get();
195         }
196
197         /* Expose ourselves to the world */
198         __queue_and_release_pm(rq, ce->timeline, engine);
199
200         result = false;
201 out_unlock:
202         clear_bit(CONTEXT_IS_PARKED, &ce->flags);
203         return result;
204 }
205
206 static void call_idle_barriers(struct intel_engine_cs *engine)
207 {
208         struct llist_node *node, *next;
209
210         llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
211                 struct dma_fence_cb *cb =
212                         container_of((struct list_head *)node,
213                                      typeof(*cb), node);
214
215                 cb->func(ERR_PTR(-EAGAIN), cb);
216         }
217 }
218
219 static int __engine_park(struct intel_wakeref *wf)
220 {
221         struct intel_engine_cs *engine =
222                 container_of(wf, typeof(*engine), wakeref);
223
224         engine->saturated = 0;
225
226         /*
227          * If one and only one request is completed between pm events,
228          * we know that we are inside the kernel context and it is
229          * safe to power down. (We are paranoid in case that runtime
230          * suspend causes corruption to the active context image, and
231          * want to avoid that impacting userspace.)
232          */
233         if (!switch_to_kernel_context(engine))
234                 return -EBUSY;
235
236         ENGINE_TRACE(engine, "parked\n");
237
238         call_idle_barriers(engine); /* cleanup after wedging */
239
240         intel_engine_park_heartbeat(engine);
241         intel_breadcrumbs_park(engine->breadcrumbs);
242
243         /* Must be reset upon idling, or we may miss the busy wakeup. */
244         GEM_BUG_ON(engine->sched_engine->queue_priority_hint != INT_MIN);
245
246         if (engine->park)
247                 engine->park(engine);
248
249         /* While gt calls i915_vma_parked(), we have to break the lock cycle */
250         intel_gt_pm_put_async(engine->gt);
251         return 0;
252 }
253
254 static const struct intel_wakeref_ops wf_ops = {
255         .get = __engine_unpark,
256         .put = __engine_park,
257 };
258
259 void intel_engine_init__pm(struct intel_engine_cs *engine)
260 {
261         struct intel_runtime_pm *rpm = engine->uncore->rpm;
262
263         intel_wakeref_init(&engine->wakeref, rpm, &wf_ops);
264         intel_engine_init_heartbeat(engine);
265 }
266
267 /**
268  * intel_engine_reset_pinned_contexts - Reset the pinned contexts of
269  * an engine.
270  * @engine: The engine whose pinned contexts we want to reset.
271  *
272  * Typically the pinned context LMEM images lose or get their content
273  * corrupted on suspend. This function resets their images.
274  */
275 void intel_engine_reset_pinned_contexts(struct intel_engine_cs *engine)
276 {
277         struct intel_context *ce;
278
279         list_for_each_entry(ce, &engine->pinned_contexts_list,
280                             pinned_contexts_link) {
281                 /* kernel context gets reset at __engine_unpark() */
282                 if (ce == engine->kernel_context)
283                         continue;
284
285                 dbg_poison_ce(ce);
286                 ce->ops->reset(ce);
287         }
288 }
289
290 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
291 #include "selftest_engine_pm.c"
292 #endif