Merge tag 'nfsd-5.16' of git://linux-nfs.org/~bfields/linux
[platform/kernel/linux-rpi.git] / drivers / gpu / drm / i915 / intel_runtime_pm.c
1 /*
2  * Copyright © 2012-2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eugeni Dodonov <eugeni.dodonov@intel.com>
25  *    Daniel Vetter <daniel.vetter@ffwll.ch>
26  *
27  */
28
29 #include <linux/pm_runtime.h>
30
31 #include <drm/drm_print.h>
32
33 #include "i915_drv.h"
34 #include "i915_trace.h"
35
36 /**
37  * DOC: runtime pm
38  *
39  * The i915 driver supports dynamic enabling and disabling of entire hardware
40  * blocks at runtime. This is especially important on the display side where
41  * software is supposed to control many power gates manually on recent hardware,
42  * since on the GT side a lot of the power management is done by the hardware.
43  * But even there some manual control at the device level is required.
44  *
45  * Since i915 supports a diverse set of platforms with a unified codebase and
46  * hardware engineers just love to shuffle functionality around between power
47  * domains there's a sizeable amount of indirection required. This file provides
48  * generic functions to the driver for grabbing and releasing references for
49  * abstract power domains. It then maps those to the actual power wells
50  * present for a given platform.
51  */
52
53 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
54
55 #include <linux/sort.h>
56
57 #define STACKDEPTH 8
58
59 static noinline depot_stack_handle_t __save_depot_stack(void)
60 {
61         unsigned long entries[STACKDEPTH];
62         unsigned int n;
63
64         n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
65         return stack_depot_save(entries, n, GFP_NOWAIT | __GFP_NOWARN);
66 }
67
68 static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
69 {
70         spin_lock_init(&rpm->debug.lock);
71 }
72
73 static noinline depot_stack_handle_t
74 track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
75 {
76         depot_stack_handle_t stack, *stacks;
77         unsigned long flags;
78
79         if (!rpm->available)
80                 return -1;
81
82         stack = __save_depot_stack();
83         if (!stack)
84                 return -1;
85
86         spin_lock_irqsave(&rpm->debug.lock, flags);
87
88         if (!rpm->debug.count)
89                 rpm->debug.last_acquire = stack;
90
91         stacks = krealloc(rpm->debug.owners,
92                           (rpm->debug.count + 1) * sizeof(*stacks),
93                           GFP_NOWAIT | __GFP_NOWARN);
94         if (stacks) {
95                 stacks[rpm->debug.count++] = stack;
96                 rpm->debug.owners = stacks;
97         } else {
98                 stack = -1;
99         }
100
101         spin_unlock_irqrestore(&rpm->debug.lock, flags);
102
103         return stack;
104 }
105
106 static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
107                                              depot_stack_handle_t stack)
108 {
109         struct drm_i915_private *i915 = container_of(rpm,
110                                                      struct drm_i915_private,
111                                                      runtime_pm);
112         unsigned long flags, n;
113         bool found = false;
114
115         if (unlikely(stack == -1))
116                 return;
117
118         spin_lock_irqsave(&rpm->debug.lock, flags);
119         for (n = rpm->debug.count; n--; ) {
120                 if (rpm->debug.owners[n] == stack) {
121                         memmove(rpm->debug.owners + n,
122                                 rpm->debug.owners + n + 1,
123                                 (--rpm->debug.count - n) * sizeof(stack));
124                         found = true;
125                         break;
126                 }
127         }
128         spin_unlock_irqrestore(&rpm->debug.lock, flags);
129
130         if (drm_WARN(&i915->drm, !found,
131                      "Unmatched wakeref (tracking %lu), count %u\n",
132                      rpm->debug.count, atomic_read(&rpm->wakeref_count))) {
133                 char *buf;
134
135                 buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
136                 if (!buf)
137                         return;
138
139                 stack_depot_snprint(stack, buf, PAGE_SIZE, 2);
140                 DRM_DEBUG_DRIVER("wakeref %x from\n%s", stack, buf);
141
142                 stack = READ_ONCE(rpm->debug.last_release);
143                 if (stack) {
144                         stack_depot_snprint(stack, buf, PAGE_SIZE, 2);
145                         DRM_DEBUG_DRIVER("wakeref last released at\n%s", buf);
146                 }
147
148                 kfree(buf);
149         }
150 }
151
152 static int cmphandle(const void *_a, const void *_b)
153 {
154         const depot_stack_handle_t * const a = _a, * const b = _b;
155
156         if (*a < *b)
157                 return -1;
158         else if (*a > *b)
159                 return 1;
160         else
161                 return 0;
162 }
163
164 static void
165 __print_intel_runtime_pm_wakeref(struct drm_printer *p,
166                                  const struct intel_runtime_pm_debug *dbg)
167 {
168         unsigned long i;
169         char *buf;
170
171         buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
172         if (!buf)
173                 return;
174
175         if (dbg->last_acquire) {
176                 stack_depot_snprint(dbg->last_acquire, buf, PAGE_SIZE, 2);
177                 drm_printf(p, "Wakeref last acquired:\n%s", buf);
178         }
179
180         if (dbg->last_release) {
181                 stack_depot_snprint(dbg->last_release, buf, PAGE_SIZE, 2);
182                 drm_printf(p, "Wakeref last released:\n%s", buf);
183         }
184
185         drm_printf(p, "Wakeref count: %lu\n", dbg->count);
186
187         sort(dbg->owners, dbg->count, sizeof(*dbg->owners), cmphandle, NULL);
188
189         for (i = 0; i < dbg->count; i++) {
190                 depot_stack_handle_t stack = dbg->owners[i];
191                 unsigned long rep;
192
193                 rep = 1;
194                 while (i + 1 < dbg->count && dbg->owners[i + 1] == stack)
195                         rep++, i++;
196                 stack_depot_snprint(stack, buf, PAGE_SIZE, 2);
197                 drm_printf(p, "Wakeref x%lu taken at:\n%s", rep, buf);
198         }
199
200         kfree(buf);
201 }
202
203 static noinline void
204 __untrack_all_wakerefs(struct intel_runtime_pm_debug *debug,
205                        struct intel_runtime_pm_debug *saved)
206 {
207         *saved = *debug;
208
209         debug->owners = NULL;
210         debug->count = 0;
211         debug->last_release = __save_depot_stack();
212 }
213
214 static void
215 dump_and_free_wakeref_tracking(struct intel_runtime_pm_debug *debug)
216 {
217         if (debug->count) {
218                 struct drm_printer p = drm_debug_printer("i915");
219
220                 __print_intel_runtime_pm_wakeref(&p, debug);
221         }
222
223         kfree(debug->owners);
224 }
225
226 static noinline void
227 __intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm)
228 {
229         struct intel_runtime_pm_debug dbg = {};
230         unsigned long flags;
231
232         if (!atomic_dec_and_lock_irqsave(&rpm->wakeref_count,
233                                          &rpm->debug.lock,
234                                          flags))
235                 return;
236
237         __untrack_all_wakerefs(&rpm->debug, &dbg);
238         spin_unlock_irqrestore(&rpm->debug.lock, flags);
239
240         dump_and_free_wakeref_tracking(&dbg);
241 }
242
243 static noinline void
244 untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm)
245 {
246         struct intel_runtime_pm_debug dbg = {};
247         unsigned long flags;
248
249         spin_lock_irqsave(&rpm->debug.lock, flags);
250         __untrack_all_wakerefs(&rpm->debug, &dbg);
251         spin_unlock_irqrestore(&rpm->debug.lock, flags);
252
253         dump_and_free_wakeref_tracking(&dbg);
254 }
255
256 void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
257                                     struct drm_printer *p)
258 {
259         struct intel_runtime_pm_debug dbg = {};
260
261         do {
262                 unsigned long alloc = dbg.count;
263                 depot_stack_handle_t *s;
264
265                 spin_lock_irq(&rpm->debug.lock);
266                 dbg.count = rpm->debug.count;
267                 if (dbg.count <= alloc) {
268                         memcpy(dbg.owners,
269                                rpm->debug.owners,
270                                dbg.count * sizeof(*s));
271                 }
272                 dbg.last_acquire = rpm->debug.last_acquire;
273                 dbg.last_release = rpm->debug.last_release;
274                 spin_unlock_irq(&rpm->debug.lock);
275                 if (dbg.count <= alloc)
276                         break;
277
278                 s = krealloc(dbg.owners,
279                              dbg.count * sizeof(*s),
280                              GFP_NOWAIT | __GFP_NOWARN);
281                 if (!s)
282                         goto out;
283
284                 dbg.owners = s;
285         } while (1);
286
287         __print_intel_runtime_pm_wakeref(p, &dbg);
288
289 out:
290         kfree(dbg.owners);
291 }
292
293 #else
294
295 static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
296 {
297 }
298
299 static depot_stack_handle_t
300 track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
301 {
302         return -1;
303 }
304
305 static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
306                                              intel_wakeref_t wref)
307 {
308 }
309
310 static void
311 __intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm)
312 {
313         atomic_dec(&rpm->wakeref_count);
314 }
315
316 static void
317 untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm)
318 {
319 }
320
321 #endif
322
323 static void
324 intel_runtime_pm_acquire(struct intel_runtime_pm *rpm, bool wakelock)
325 {
326         if (wakelock) {
327                 atomic_add(1 + INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count);
328                 assert_rpm_wakelock_held(rpm);
329         } else {
330                 atomic_inc(&rpm->wakeref_count);
331                 assert_rpm_raw_wakeref_held(rpm);
332         }
333 }
334
335 static void
336 intel_runtime_pm_release(struct intel_runtime_pm *rpm, int wakelock)
337 {
338         if (wakelock) {
339                 assert_rpm_wakelock_held(rpm);
340                 atomic_sub(INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count);
341         } else {
342                 assert_rpm_raw_wakeref_held(rpm);
343         }
344
345         __intel_wakeref_dec_and_check_tracking(rpm);
346 }
347
348 static intel_wakeref_t __intel_runtime_pm_get(struct intel_runtime_pm *rpm,
349                                               bool wakelock)
350 {
351         struct drm_i915_private *i915 = container_of(rpm,
352                                                      struct drm_i915_private,
353                                                      runtime_pm);
354         int ret;
355
356         ret = pm_runtime_get_sync(rpm->kdev);
357         drm_WARN_ONCE(&i915->drm, ret < 0,
358                       "pm_runtime_get_sync() failed: %d\n", ret);
359
360         intel_runtime_pm_acquire(rpm, wakelock);
361
362         return track_intel_runtime_pm_wakeref(rpm);
363 }
364
365 /**
366  * intel_runtime_pm_get_raw - grab a raw runtime pm reference
367  * @rpm: the intel_runtime_pm structure
368  *
369  * This is the unlocked version of intel_display_power_is_enabled() and should
370  * only be used from error capture and recovery code where deadlocks are
371  * possible.
372  * This function grabs a device-level runtime pm reference (mostly used for
373  * asynchronous PM management from display code) and ensures that it is powered
374  * up. Raw references are not considered during wakelock assert checks.
375  *
376  * Any runtime pm reference obtained by this function must have a symmetric
377  * call to intel_runtime_pm_put_raw() to release the reference again.
378  *
379  * Returns: the wakeref cookie to pass to intel_runtime_pm_put_raw(), evaluates
380  * as True if the wakeref was acquired, or False otherwise.
381  */
382 intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm)
383 {
384         return __intel_runtime_pm_get(rpm, false);
385 }
386
387 /**
388  * intel_runtime_pm_get - grab a runtime pm reference
389  * @rpm: the intel_runtime_pm structure
390  *
391  * This function grabs a device-level runtime pm reference (mostly used for GEM
392  * code to ensure the GTT or GT is on) and ensures that it is powered up.
393  *
394  * Any runtime pm reference obtained by this function must have a symmetric
395  * call to intel_runtime_pm_put() to release the reference again.
396  *
397  * Returns: the wakeref cookie to pass to intel_runtime_pm_put()
398  */
399 intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm)
400 {
401         return __intel_runtime_pm_get(rpm, true);
402 }
403
404 /**
405  * __intel_runtime_pm_get_if_active - grab a runtime pm reference if device is active
406  * @rpm: the intel_runtime_pm structure
407  * @ignore_usecount: get a ref even if dev->power.usage_count is 0
408  *
409  * This function grabs a device-level runtime pm reference if the device is
410  * already active and ensures that it is powered up. It is illegal to try
411  * and access the HW should intel_runtime_pm_get_if_active() report failure.
412  *
413  * If @ignore_usecount is true, a reference will be acquired even if there is no
414  * user requiring the device to be powered up (dev->power.usage_count == 0).
415  * If the function returns false in this case then it's guaranteed that the
416  * device's runtime suspend hook has been called already or that it will be
417  * called (and hence it's also guaranteed that the device's runtime resume
418  * hook will be called eventually).
419  *
420  * Any runtime pm reference obtained by this function must have a symmetric
421  * call to intel_runtime_pm_put() to release the reference again.
422  *
423  * Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates
424  * as True if the wakeref was acquired, or False otherwise.
425  */
426 static intel_wakeref_t __intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm,
427                                                         bool ignore_usecount)
428 {
429         if (IS_ENABLED(CONFIG_PM)) {
430                 /*
431                  * In cases runtime PM is disabled by the RPM core and we get
432                  * an -EINVAL return value we are not supposed to call this
433                  * function, since the power state is undefined. This applies
434                  * atm to the late/early system suspend/resume handlers.
435                  */
436                 if (pm_runtime_get_if_active(rpm->kdev, ignore_usecount) <= 0)
437                         return 0;
438         }
439
440         intel_runtime_pm_acquire(rpm, true);
441
442         return track_intel_runtime_pm_wakeref(rpm);
443 }
444
445 intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm)
446 {
447         return __intel_runtime_pm_get_if_active(rpm, false);
448 }
449
450 intel_wakeref_t intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm)
451 {
452         return __intel_runtime_pm_get_if_active(rpm, true);
453 }
454
455 /**
456  * intel_runtime_pm_get_noresume - grab a runtime pm reference
457  * @rpm: the intel_runtime_pm structure
458  *
459  * This function grabs a device-level runtime pm reference (mostly used for GEM
460  * code to ensure the GTT or GT is on).
461  *
462  * It will _not_ power up the device but instead only check that it's powered
463  * on.  Therefore it is only valid to call this functions from contexts where
464  * the device is known to be powered up and where trying to power it up would
465  * result in hilarity and deadlocks. That pretty much means only the system
466  * suspend/resume code where this is used to grab runtime pm references for
467  * delayed setup down in work items.
468  *
469  * Any runtime pm reference obtained by this function must have a symmetric
470  * call to intel_runtime_pm_put() to release the reference again.
471  *
472  * Returns: the wakeref cookie to pass to intel_runtime_pm_put()
473  */
474 intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm)
475 {
476         assert_rpm_wakelock_held(rpm);
477         pm_runtime_get_noresume(rpm->kdev);
478
479         intel_runtime_pm_acquire(rpm, true);
480
481         return track_intel_runtime_pm_wakeref(rpm);
482 }
483
484 static void __intel_runtime_pm_put(struct intel_runtime_pm *rpm,
485                                    intel_wakeref_t wref,
486                                    bool wakelock)
487 {
488         struct device *kdev = rpm->kdev;
489
490         untrack_intel_runtime_pm_wakeref(rpm, wref);
491
492         intel_runtime_pm_release(rpm, wakelock);
493
494         pm_runtime_mark_last_busy(kdev);
495         pm_runtime_put_autosuspend(kdev);
496 }
497
498 /**
499  * intel_runtime_pm_put_raw - release a raw runtime pm reference
500  * @rpm: the intel_runtime_pm structure
501  * @wref: wakeref acquired for the reference that is being released
502  *
503  * This function drops the device-level runtime pm reference obtained by
504  * intel_runtime_pm_get_raw() and might power down the corresponding
505  * hardware block right away if this is the last reference.
506  */
507 void
508 intel_runtime_pm_put_raw(struct intel_runtime_pm *rpm, intel_wakeref_t wref)
509 {
510         __intel_runtime_pm_put(rpm, wref, false);
511 }
512
513 /**
514  * intel_runtime_pm_put_unchecked - release an unchecked runtime pm reference
515  * @rpm: the intel_runtime_pm structure
516  *
517  * This function drops the device-level runtime pm reference obtained by
518  * intel_runtime_pm_get() and might power down the corresponding
519  * hardware block right away if this is the last reference.
520  *
521  * This function exists only for historical reasons and should be avoided in
522  * new code, as the correctness of its use cannot be checked. Always use
523  * intel_runtime_pm_put() instead.
524  */
525 void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm)
526 {
527         __intel_runtime_pm_put(rpm, -1, true);
528 }
529
530 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
531 /**
532  * intel_runtime_pm_put - release a runtime pm reference
533  * @rpm: the intel_runtime_pm structure
534  * @wref: wakeref acquired for the reference that is being released
535  *
536  * This function drops the device-level runtime pm reference obtained by
537  * intel_runtime_pm_get() and might power down the corresponding
538  * hardware block right away if this is the last reference.
539  */
540 void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref)
541 {
542         __intel_runtime_pm_put(rpm, wref, true);
543 }
544 #endif
545
546 /**
547  * intel_runtime_pm_enable - enable runtime pm
548  * @rpm: the intel_runtime_pm structure
549  *
550  * This function enables runtime pm at the end of the driver load sequence.
551  *
552  * Note that this function does currently not enable runtime pm for the
553  * subordinate display power domains. That is done by
554  * intel_power_domains_enable().
555  */
556 void intel_runtime_pm_enable(struct intel_runtime_pm *rpm)
557 {
558         struct drm_i915_private *i915 = container_of(rpm,
559                                                      struct drm_i915_private,
560                                                      runtime_pm);
561         struct device *kdev = rpm->kdev;
562
563         /*
564          * Disable the system suspend direct complete optimization, which can
565          * leave the device suspended skipping the driver's suspend handlers
566          * if the device was already runtime suspended. This is needed due to
567          * the difference in our runtime and system suspend sequence and
568          * becaue the HDA driver may require us to enable the audio power
569          * domain during system suspend.
570          */
571         dev_pm_set_driver_flags(kdev, DPM_FLAG_NO_DIRECT_COMPLETE);
572
573         pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
574         pm_runtime_mark_last_busy(kdev);
575
576         /*
577          * Take a permanent reference to disable the RPM functionality and drop
578          * it only when unloading the driver. Use the low level get/put helpers,
579          * so the driver's own RPM reference tracking asserts also work on
580          * platforms without RPM support.
581          */
582         if (!rpm->available) {
583                 int ret;
584
585                 pm_runtime_dont_use_autosuspend(kdev);
586                 ret = pm_runtime_get_sync(kdev);
587                 drm_WARN(&i915->drm, ret < 0,
588                          "pm_runtime_get_sync() failed: %d\n", ret);
589         } else {
590                 pm_runtime_use_autosuspend(kdev);
591         }
592
593         /*
594          * The core calls the driver load handler with an RPM reference held.
595          * We drop that here and will reacquire it during unloading in
596          * intel_power_domains_fini().
597          */
598         pm_runtime_put_autosuspend(kdev);
599 }
600
601 void intel_runtime_pm_disable(struct intel_runtime_pm *rpm)
602 {
603         struct drm_i915_private *i915 = container_of(rpm,
604                                                      struct drm_i915_private,
605                                                      runtime_pm);
606         struct device *kdev = rpm->kdev;
607
608         /* Transfer rpm ownership back to core */
609         drm_WARN(&i915->drm, pm_runtime_get_sync(kdev) < 0,
610                  "Failed to pass rpm ownership back to core\n");
611
612         pm_runtime_dont_use_autosuspend(kdev);
613
614         if (!rpm->available)
615                 pm_runtime_put(kdev);
616 }
617
618 void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm)
619 {
620         struct drm_i915_private *i915 = container_of(rpm,
621                                                      struct drm_i915_private,
622                                                      runtime_pm);
623         int count = atomic_read(&rpm->wakeref_count);
624
625         drm_WARN(&i915->drm, count,
626                  "i915 raw-wakerefs=%d wakelocks=%d on cleanup\n",
627                  intel_rpm_raw_wakeref_count(count),
628                  intel_rpm_wakelock_count(count));
629
630         untrack_all_intel_runtime_pm_wakerefs(rpm);
631 }
632
633 void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm)
634 {
635         struct drm_i915_private *i915 =
636                         container_of(rpm, struct drm_i915_private, runtime_pm);
637         struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
638         struct device *kdev = &pdev->dev;
639
640         rpm->kdev = kdev;
641         rpm->available = HAS_RUNTIME_PM(i915);
642
643         init_intel_runtime_pm_wakeref(rpm);
644 }