Merge tag 'v4.4-rc2' into drm-intel-next-queued
[platform/kernel/linux-rpi.git] / drivers / gpu / drm / i915 / intel_runtime_pm.c
1 /*
2  * Copyright © 2012-2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eugeni Dodonov <eugeni.dodonov@intel.com>
25  *    Daniel Vetter <daniel.vetter@ffwll.ch>
26  *
27  */
28
29 #include <linux/pm_runtime.h>
30 #include <linux/vgaarb.h>
31
32 #include "i915_drv.h"
33 #include "intel_drv.h"
34
35 /**
36  * DOC: runtime pm
37  *
38  * The i915 driver supports dynamic enabling and disabling of entire hardware
39  * blocks at runtime. This is especially important on the display side where
40  * software is supposed to control many power gates manually on recent hardware,
41  * since on the GT side a lot of the power management is done by the hardware.
42  * But even there some manual control at the device level is required.
43  *
44  * Since i915 supports a diverse set of platforms with a unified codebase and
45  * hardware engineers just love to shuffle functionality around between power
46  * domains there's a sizeable amount of indirection required. This file provides
47  * generic functions to the driver for grabbing and releasing references for
48  * abstract power domains. It then maps those to the actual power wells
49  * present for a given platform.
50  */
51
52 #define for_each_power_well(i, power_well, domain_mask, power_domains)  \
53         for (i = 0;                                                     \
54              i < (power_domains)->power_well_count &&                   \
55                  ((power_well) = &(power_domains)->power_wells[i]);     \
56              i++)                                                       \
57                 if ((power_well)->domains & (domain_mask))
58
59 #define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
60         for (i = (power_domains)->power_well_count - 1;                  \
61              i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
62              i--)                                                        \
63                 if ((power_well)->domains & (domain_mask))
64
65 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
66                                     int power_well_id);
67
68 static void intel_power_well_enable(struct drm_i915_private *dev_priv,
69                                     struct i915_power_well *power_well)
70 {
71         DRM_DEBUG_KMS("enabling %s\n", power_well->name);
72         power_well->ops->enable(dev_priv, power_well);
73         power_well->hw_enabled = true;
74 }
75
76 static void intel_power_well_disable(struct drm_i915_private *dev_priv,
77                                      struct i915_power_well *power_well)
78 {
79         DRM_DEBUG_KMS("disabling %s\n", power_well->name);
80         power_well->hw_enabled = false;
81         power_well->ops->disable(dev_priv, power_well);
82 }
83
84 /*
85  * We should only use the power well if we explicitly asked the hardware to
86  * enable it, so check if it's enabled and also check if we've requested it to
87  * be enabled.
88  */
89 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
90                                    struct i915_power_well *power_well)
91 {
92         return I915_READ(HSW_PWR_WELL_DRIVER) ==
93                      (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
94 }
95
96 /**
97  * __intel_display_power_is_enabled - unlocked check for a power domain
98  * @dev_priv: i915 device instance
99  * @domain: power domain to check
100  *
101  * This is the unlocked version of intel_display_power_is_enabled() and should
102  * only be used from error capture and recovery code where deadlocks are
103  * possible.
104  *
105  * Returns:
106  * True when the power domain is enabled, false otherwise.
107  */
108 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
109                                       enum intel_display_power_domain domain)
110 {
111         struct i915_power_domains *power_domains;
112         struct i915_power_well *power_well;
113         bool is_enabled;
114         int i;
115
116         if (dev_priv->pm.suspended)
117                 return false;
118
119         power_domains = &dev_priv->power_domains;
120
121         is_enabled = true;
122
123         for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
124                 if (power_well->always_on)
125                         continue;
126
127                 if (!power_well->hw_enabled) {
128                         is_enabled = false;
129                         break;
130                 }
131         }
132
133         return is_enabled;
134 }
135
136 /**
137  * intel_display_power_is_enabled - check for a power domain
138  * @dev_priv: i915 device instance
139  * @domain: power domain to check
140  *
141  * This function can be used to check the hw power domain state. It is mostly
142  * used in hardware state readout functions. Everywhere else code should rely
143  * upon explicit power domain reference counting to ensure that the hardware
144  * block is powered up before accessing it.
145  *
146  * Callers must hold the relevant modesetting locks to ensure that concurrent
147  * threads can't disable the power well while the caller tries to read a few
148  * registers.
149  *
150  * Returns:
151  * True when the power domain is enabled, false otherwise.
152  */
153 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
154                                     enum intel_display_power_domain domain)
155 {
156         struct i915_power_domains *power_domains;
157         bool ret;
158
159         power_domains = &dev_priv->power_domains;
160
161         mutex_lock(&power_domains->lock);
162         ret = __intel_display_power_is_enabled(dev_priv, domain);
163         mutex_unlock(&power_domains->lock);
164
165         return ret;
166 }
167
168 /**
169  * intel_display_set_init_power - set the initial power domain state
170  * @dev_priv: i915 device instance
171  * @enable: whether to enable or disable the initial power domain state
172  *
173  * For simplicity our driver load/unload and system suspend/resume code assumes
174  * that all power domains are always enabled. This functions controls the state
175  * of this little hack. While the initial power domain state is enabled runtime
176  * pm is effectively disabled.
177  */
178 void intel_display_set_init_power(struct drm_i915_private *dev_priv,
179                                   bool enable)
180 {
181         if (dev_priv->power_domains.init_power_on == enable)
182                 return;
183
184         if (enable)
185                 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
186         else
187                 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
188
189         dev_priv->power_domains.init_power_on = enable;
190 }
191
192 /*
193  * Starting with Haswell, we have a "Power Down Well" that can be turned off
194  * when not needed anymore. We have 4 registers that can request the power well
195  * to be enabled, and it will only be disabled if none of the registers is
196  * requesting it to be enabled.
197  */
198 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
199 {
200         struct drm_device *dev = dev_priv->dev;
201
202         /*
203          * After we re-enable the power well, if we touch VGA register 0x3d5
204          * we'll get unclaimed register interrupts. This stops after we write
205          * anything to the VGA MSR register. The vgacon module uses this
206          * register all the time, so if we unbind our driver and, as a
207          * consequence, bind vgacon, we'll get stuck in an infinite loop at
208          * console_unlock(). So make here we touch the VGA MSR register, making
209          * sure vgacon can keep working normally without triggering interrupts
210          * and error messages.
211          */
212         vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
213         outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
214         vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
215
216         if (IS_BROADWELL(dev))
217                 gen8_irq_power_well_post_enable(dev_priv,
218                                                 1 << PIPE_C | 1 << PIPE_B);
219 }
220
221 static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
222                                        struct i915_power_well *power_well)
223 {
224         struct drm_device *dev = dev_priv->dev;
225
226         /*
227          * After we re-enable the power well, if we touch VGA register 0x3d5
228          * we'll get unclaimed register interrupts. This stops after we write
229          * anything to the VGA MSR register. The vgacon module uses this
230          * register all the time, so if we unbind our driver and, as a
231          * consequence, bind vgacon, we'll get stuck in an infinite loop at
232          * console_unlock(). So make here we touch the VGA MSR register, making
233          * sure vgacon can keep working normally without triggering interrupts
234          * and error messages.
235          */
236         if (power_well->data == SKL_DISP_PW_2) {
237                 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
238                 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
239                 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
240
241                 gen8_irq_power_well_post_enable(dev_priv,
242                                                 1 << PIPE_C | 1 << PIPE_B);
243         }
244 }
245
246 static void hsw_set_power_well(struct drm_i915_private *dev_priv,
247                                struct i915_power_well *power_well, bool enable)
248 {
249         bool is_enabled, enable_requested;
250         uint32_t tmp;
251
252         tmp = I915_READ(HSW_PWR_WELL_DRIVER);
253         is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
254         enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
255
256         if (enable) {
257                 if (!enable_requested)
258                         I915_WRITE(HSW_PWR_WELL_DRIVER,
259                                    HSW_PWR_WELL_ENABLE_REQUEST);
260
261                 if (!is_enabled) {
262                         DRM_DEBUG_KMS("Enabling power well\n");
263                         if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
264                                       HSW_PWR_WELL_STATE_ENABLED), 20))
265                                 DRM_ERROR("Timeout enabling power well\n");
266                         hsw_power_well_post_enable(dev_priv);
267                 }
268
269         } else {
270                 if (enable_requested) {
271                         I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
272                         POSTING_READ(HSW_PWR_WELL_DRIVER);
273                         DRM_DEBUG_KMS("Requesting to disable the power well\n");
274                 }
275         }
276 }
277
278 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS (         \
279         BIT(POWER_DOMAIN_TRANSCODER_A) |                \
280         BIT(POWER_DOMAIN_PIPE_B) |                      \
281         BIT(POWER_DOMAIN_TRANSCODER_B) |                \
282         BIT(POWER_DOMAIN_PIPE_C) |                      \
283         BIT(POWER_DOMAIN_TRANSCODER_C) |                \
284         BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |         \
285         BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |         \
286         BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |            \
287         BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |            \
288         BIT(POWER_DOMAIN_PORT_DDI_D_LANES) |            \
289         BIT(POWER_DOMAIN_PORT_DDI_E_LANES) |            \
290         BIT(POWER_DOMAIN_AUX_B) |                       \
291         BIT(POWER_DOMAIN_AUX_C) |                       \
292         BIT(POWER_DOMAIN_AUX_D) |                       \
293         BIT(POWER_DOMAIN_AUDIO) |                       \
294         BIT(POWER_DOMAIN_VGA) |                         \
295         BIT(POWER_DOMAIN_INIT))
296 #define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS (             \
297         BIT(POWER_DOMAIN_PORT_DDI_A_LANES) |            \
298         BIT(POWER_DOMAIN_PORT_DDI_E_LANES) |            \
299         BIT(POWER_DOMAIN_INIT))
300 #define SKL_DISPLAY_DDI_B_POWER_DOMAINS (               \
301         BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |            \
302         BIT(POWER_DOMAIN_INIT))
303 #define SKL_DISPLAY_DDI_C_POWER_DOMAINS (               \
304         BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |            \
305         BIT(POWER_DOMAIN_INIT))
306 #define SKL_DISPLAY_DDI_D_POWER_DOMAINS (               \
307         BIT(POWER_DOMAIN_PORT_DDI_D_LANES) |            \
308         BIT(POWER_DOMAIN_INIT))
309 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS (              \
310         SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
311         BIT(POWER_DOMAIN_MODESET) |                     \
312         BIT(POWER_DOMAIN_AUX_A) |                       \
313         BIT(POWER_DOMAIN_INIT))
314 #define SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS (           \
315         (POWER_DOMAIN_MASK & ~(                         \
316         SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
317         SKL_DISPLAY_DC_OFF_POWER_DOMAINS)) |            \
318         BIT(POWER_DOMAIN_INIT))
319
320 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS (         \
321         BIT(POWER_DOMAIN_TRANSCODER_A) |                \
322         BIT(POWER_DOMAIN_PIPE_B) |                      \
323         BIT(POWER_DOMAIN_TRANSCODER_B) |                \
324         BIT(POWER_DOMAIN_PIPE_C) |                      \
325         BIT(POWER_DOMAIN_TRANSCODER_C) |                \
326         BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |         \
327         BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |         \
328         BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |            \
329         BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |            \
330         BIT(POWER_DOMAIN_AUX_B) |                       \
331         BIT(POWER_DOMAIN_AUX_C) |                       \
332         BIT(POWER_DOMAIN_AUDIO) |                       \
333         BIT(POWER_DOMAIN_VGA) |                         \
334         BIT(POWER_DOMAIN_GMBUS) |                       \
335         BIT(POWER_DOMAIN_INIT))
336 #define BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS (         \
337         BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
338         BIT(POWER_DOMAIN_PIPE_A) |                      \
339         BIT(POWER_DOMAIN_TRANSCODER_EDP) |              \
340         BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |         \
341         BIT(POWER_DOMAIN_PORT_DDI_A_LANES) |            \
342         BIT(POWER_DOMAIN_AUX_A) |                       \
343         BIT(POWER_DOMAIN_PLLS) |                        \
344         BIT(POWER_DOMAIN_INIT))
345 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS (              \
346         BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
347         BIT(POWER_DOMAIN_MODESET) |                     \
348         BIT(POWER_DOMAIN_AUX_A) |                       \
349         BIT(POWER_DOMAIN_INIT))
350 #define BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS (           \
351         (POWER_DOMAIN_MASK & ~(BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS |  \
352         BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS)) |       \
353         BIT(POWER_DOMAIN_INIT))
354
355 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
356 {
357         struct drm_device *dev = dev_priv->dev;
358
359         WARN(!IS_BROXTON(dev), "Platform doesn't support DC9.\n");
360         WARN((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
361                 "DC9 already programmed to be enabled.\n");
362         WARN(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
363                 "DC5 still not disabled to enable DC9.\n");
364         WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n");
365         WARN(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n");
366
367          /*
368           * TODO: check for the following to verify the conditions to enter DC9
369           * state are satisfied:
370           * 1] Check relevant display engine registers to verify if mode set
371           * disable sequence was followed.
372           * 2] Check if display uninitialize sequence is initialized.
373           */
374 }
375
376 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
377 {
378         WARN(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n");
379         WARN(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
380                 "DC9 already programmed to be disabled.\n");
381         WARN(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
382                 "DC5 still not disabled.\n");
383
384          /*
385           * TODO: check for the following to verify DC9 state was indeed
386           * entered before programming to disable it:
387           * 1] Check relevant display engine registers to verify if mode
388           *  set disable sequence was followed.
389           * 2] Check if display uninitialize sequence is initialized.
390           */
391 }
392
393 static void gen9_set_dc_state_debugmask_memory_up(
394                         struct drm_i915_private *dev_priv)
395 {
396         uint32_t val;
397
398         /* The below bit doesn't need to be cleared ever afterwards */
399         val = I915_READ(DC_STATE_DEBUG);
400         if (!(val & DC_STATE_DEBUG_MASK_MEMORY_UP)) {
401                 val |= DC_STATE_DEBUG_MASK_MEMORY_UP;
402                 I915_WRITE(DC_STATE_DEBUG, val);
403                 POSTING_READ(DC_STATE_DEBUG);
404         }
405 }
406
407 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
408 {
409         uint32_t val;
410         uint32_t mask;
411
412         mask = DC_STATE_EN_UPTO_DC5;
413         if (IS_BROXTON(dev_priv))
414                 mask |= DC_STATE_EN_DC9;
415         else
416                 mask |= DC_STATE_EN_UPTO_DC6;
417
418         WARN_ON_ONCE(state & ~mask);
419
420         if (i915.enable_dc == 0)
421                 state = DC_STATE_DISABLE;
422         else if (i915.enable_dc == 1 && state > DC_STATE_EN_UPTO_DC5)
423                 state = DC_STATE_EN_UPTO_DC5;
424
425         if (state & DC_STATE_EN_UPTO_DC5_DC6_MASK)
426                 gen9_set_dc_state_debugmask_memory_up(dev_priv);
427
428         val = I915_READ(DC_STATE_EN);
429         DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
430                       val & mask, state);
431         val &= ~mask;
432         val |= state;
433         I915_WRITE(DC_STATE_EN, val);
434         POSTING_READ(DC_STATE_EN);
435 }
436
437 void bxt_enable_dc9(struct drm_i915_private *dev_priv)
438 {
439         assert_can_enable_dc9(dev_priv);
440
441         DRM_DEBUG_KMS("Enabling DC9\n");
442
443         gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
444 }
445
446 void bxt_disable_dc9(struct drm_i915_private *dev_priv)
447 {
448         assert_can_disable_dc9(dev_priv);
449
450         DRM_DEBUG_KMS("Disabling DC9\n");
451
452         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
453 }
454
455 static void assert_csr_loaded(struct drm_i915_private *dev_priv)
456 {
457         WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
458                   "CSR program storage start is NULL\n");
459         WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
460         WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
461 }
462
463 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
464 {
465         struct drm_device *dev = dev_priv->dev;
466         bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
467                                         SKL_DISP_PW_2);
468
469         WARN_ONCE(!IS_SKYLAKE(dev), "Platform doesn't support DC5.\n");
470         WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
471         WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
472
473         WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
474                   "DC5 already programmed to be enabled.\n");
475         WARN_ONCE(dev_priv->pm.suspended,
476                   "DC5 cannot be enabled, if platform is runtime-suspended.\n");
477
478         assert_csr_loaded(dev_priv);
479 }
480
481 static void assert_can_disable_dc5(struct drm_i915_private *dev_priv)
482 {
483         /*
484          * During initialization, the firmware may not be loaded yet.
485          * We still want to make sure that the DC enabling flag is cleared.
486          */
487         if (dev_priv->power_domains.initializing)
488                 return;
489
490         WARN_ONCE(dev_priv->pm.suspended,
491                 "Disabling of DC5 while platform is runtime-suspended should never happen.\n");
492 }
493
494 static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
495 {
496         assert_can_enable_dc5(dev_priv);
497
498         DRM_DEBUG_KMS("Enabling DC5\n");
499
500         gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
501 }
502
503 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
504 {
505         struct drm_device *dev = dev_priv->dev;
506
507         WARN_ONCE(!IS_SKYLAKE(dev), "Platform doesn't support DC6.\n");
508         WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
509         WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
510                   "Backlight is not disabled.\n");
511         WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
512                   "DC6 already programmed to be enabled.\n");
513
514         assert_csr_loaded(dev_priv);
515 }
516
517 static void assert_can_disable_dc6(struct drm_i915_private *dev_priv)
518 {
519         /*
520          * During initialization, the firmware may not be loaded yet.
521          * We still want to make sure that the DC enabling flag is cleared.
522          */
523         if (dev_priv->power_domains.initializing)
524                 return;
525
526         WARN_ONCE(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
527                   "DC6 already programmed to be disabled.\n");
528 }
529
530 static void gen9_disable_dc5_dc6(struct drm_i915_private *dev_priv)
531 {
532         assert_can_disable_dc5(dev_priv);
533
534         if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 && i915.enable_dc != 1)
535                 assert_can_disable_dc6(dev_priv);
536
537         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
538 }
539
540 void skl_enable_dc6(struct drm_i915_private *dev_priv)
541 {
542         assert_can_enable_dc6(dev_priv);
543
544         DRM_DEBUG_KMS("Enabling DC6\n");
545
546         gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
547
548 }
549
550 void skl_disable_dc6(struct drm_i915_private *dev_priv)
551 {
552         assert_can_disable_dc6(dev_priv);
553
554         DRM_DEBUG_KMS("Disabling DC6\n");
555
556         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
557 }
558
559 static void skl_set_power_well(struct drm_i915_private *dev_priv,
560                         struct i915_power_well *power_well, bool enable)
561 {
562         struct drm_device *dev = dev_priv->dev;
563         uint32_t tmp, fuse_status;
564         uint32_t req_mask, state_mask;
565         bool is_enabled, enable_requested, check_fuse_status = false;
566
567         tmp = I915_READ(HSW_PWR_WELL_DRIVER);
568         fuse_status = I915_READ(SKL_FUSE_STATUS);
569
570         switch (power_well->data) {
571         case SKL_DISP_PW_1:
572                 if (wait_for((I915_READ(SKL_FUSE_STATUS) &
573                         SKL_FUSE_PG0_DIST_STATUS), 1)) {
574                         DRM_ERROR("PG0 not enabled\n");
575                         return;
576                 }
577                 break;
578         case SKL_DISP_PW_2:
579                 if (!(fuse_status & SKL_FUSE_PG1_DIST_STATUS)) {
580                         DRM_ERROR("PG1 in disabled state\n");
581                         return;
582                 }
583                 break;
584         case SKL_DISP_PW_DDI_A_E:
585         case SKL_DISP_PW_DDI_B:
586         case SKL_DISP_PW_DDI_C:
587         case SKL_DISP_PW_DDI_D:
588         case SKL_DISP_PW_MISC_IO:
589                 break;
590         default:
591                 WARN(1, "Unknown power well %lu\n", power_well->data);
592                 return;
593         }
594
595         req_mask = SKL_POWER_WELL_REQ(power_well->data);
596         enable_requested = tmp & req_mask;
597         state_mask = SKL_POWER_WELL_STATE(power_well->data);
598         is_enabled = tmp & state_mask;
599
600         if (enable) {
601                 if (!enable_requested) {
602                         WARN((tmp & state_mask) &&
603                                 !I915_READ(HSW_PWR_WELL_BIOS),
604                                 "Invalid for power well status to be enabled, unless done by the BIOS, \
605                                 when request is to disable!\n");
606                         if (power_well->data == SKL_DISP_PW_2) {
607                                 /*
608                                  * DDI buffer programming unnecessary during
609                                  * driver-load/resume as it's already done
610                                  * during modeset initialization then. It's
611                                  * also invalid here as encoder list is still
612                                  * uninitialized.
613                                  */
614                                 if (!dev_priv->power_domains.initializing)
615                                         intel_prepare_ddi(dev);
616                         }
617                         I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
618                 }
619
620                 if (!is_enabled) {
621                         DRM_DEBUG_KMS("Enabling %s\n", power_well->name);
622                         if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
623                                 state_mask), 1))
624                                 DRM_ERROR("%s enable timeout\n",
625                                         power_well->name);
626                         check_fuse_status = true;
627                 }
628         } else {
629                 if (enable_requested) {
630                         I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask);
631                         POSTING_READ(HSW_PWR_WELL_DRIVER);
632                         DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
633                 }
634         }
635
636         if (check_fuse_status) {
637                 if (power_well->data == SKL_DISP_PW_1) {
638                         if (wait_for((I915_READ(SKL_FUSE_STATUS) &
639                                 SKL_FUSE_PG1_DIST_STATUS), 1))
640                                 DRM_ERROR("PG1 distributing status timeout\n");
641                 } else if (power_well->data == SKL_DISP_PW_2) {
642                         if (wait_for((I915_READ(SKL_FUSE_STATUS) &
643                                 SKL_FUSE_PG2_DIST_STATUS), 1))
644                                 DRM_ERROR("PG2 distributing status timeout\n");
645                 }
646         }
647
648         if (enable && !is_enabled)
649                 skl_power_well_post_enable(dev_priv, power_well);
650 }
651
652 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
653                                    struct i915_power_well *power_well)
654 {
655         hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
656
657         /*
658          * We're taking over the BIOS, so clear any requests made by it since
659          * the driver is in charge now.
660          */
661         if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
662                 I915_WRITE(HSW_PWR_WELL_BIOS, 0);
663 }
664
665 static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
666                                   struct i915_power_well *power_well)
667 {
668         hsw_set_power_well(dev_priv, power_well, true);
669 }
670
671 static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
672                                    struct i915_power_well *power_well)
673 {
674         hsw_set_power_well(dev_priv, power_well, false);
675 }
676
677 static bool skl_power_well_enabled(struct drm_i915_private *dev_priv,
678                                         struct i915_power_well *power_well)
679 {
680         uint32_t mask = SKL_POWER_WELL_REQ(power_well->data) |
681                 SKL_POWER_WELL_STATE(power_well->data);
682
683         return (I915_READ(HSW_PWR_WELL_DRIVER) & mask) == mask;
684 }
685
686 static void skl_power_well_sync_hw(struct drm_i915_private *dev_priv,
687                                 struct i915_power_well *power_well)
688 {
689         skl_set_power_well(dev_priv, power_well, power_well->count > 0);
690
691         /* Clear any request made by BIOS as driver is taking over */
692         I915_WRITE(HSW_PWR_WELL_BIOS, 0);
693 }
694
695 static void skl_power_well_enable(struct drm_i915_private *dev_priv,
696                                 struct i915_power_well *power_well)
697 {
698         skl_set_power_well(dev_priv, power_well, true);
699 }
700
701 static void skl_power_well_disable(struct drm_i915_private *dev_priv,
702                                 struct i915_power_well *power_well)
703 {
704         skl_set_power_well(dev_priv, power_well, false);
705 }
706
707 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
708                                            struct i915_power_well *power_well)
709 {
710         return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
711 }
712
713 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
714                                           struct i915_power_well *power_well)
715 {
716         gen9_disable_dc5_dc6(dev_priv);
717 }
718
719 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
720                                            struct i915_power_well *power_well)
721 {
722         if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 && i915.enable_dc != 1)
723                 skl_enable_dc6(dev_priv);
724         else
725                 gen9_enable_dc5(dev_priv);
726 }
727
728 static void gen9_dc_off_power_well_sync_hw(struct drm_i915_private *dev_priv,
729                                            struct i915_power_well *power_well)
730 {
731         if (power_well->count > 0) {
732                 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
733         } else {
734                 if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 &&
735                     i915.enable_dc != 1)
736                         gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
737                 else
738                         gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
739         }
740 }
741
742 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
743                                            struct i915_power_well *power_well)
744 {
745 }
746
747 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
748                                              struct i915_power_well *power_well)
749 {
750         return true;
751 }
752
753 static void vlv_set_power_well(struct drm_i915_private *dev_priv,
754                                struct i915_power_well *power_well, bool enable)
755 {
756         enum punit_power_well power_well_id = power_well->data;
757         u32 mask;
758         u32 state;
759         u32 ctrl;
760
761         mask = PUNIT_PWRGT_MASK(power_well_id);
762         state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
763                          PUNIT_PWRGT_PWR_GATE(power_well_id);
764
765         mutex_lock(&dev_priv->rps.hw_lock);
766
767 #define COND \
768         ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
769
770         if (COND)
771                 goto out;
772
773         ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
774         ctrl &= ~mask;
775         ctrl |= state;
776         vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
777
778         if (wait_for(COND, 100))
779                 DRM_ERROR("timeout setting power well state %08x (%08x)\n",
780                           state,
781                           vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
782
783 #undef COND
784
785 out:
786         mutex_unlock(&dev_priv->rps.hw_lock);
787 }
788
789 static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
790                                    struct i915_power_well *power_well)
791 {
792         vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
793 }
794
795 static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
796                                   struct i915_power_well *power_well)
797 {
798         vlv_set_power_well(dev_priv, power_well, true);
799 }
800
801 static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
802                                    struct i915_power_well *power_well)
803 {
804         vlv_set_power_well(dev_priv, power_well, false);
805 }
806
807 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
808                                    struct i915_power_well *power_well)
809 {
810         int power_well_id = power_well->data;
811         bool enabled = false;
812         u32 mask;
813         u32 state;
814         u32 ctrl;
815
816         mask = PUNIT_PWRGT_MASK(power_well_id);
817         ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
818
819         mutex_lock(&dev_priv->rps.hw_lock);
820
821         state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
822         /*
823          * We only ever set the power-on and power-gate states, anything
824          * else is unexpected.
825          */
826         WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
827                 state != PUNIT_PWRGT_PWR_GATE(power_well_id));
828         if (state == ctrl)
829                 enabled = true;
830
831         /*
832          * A transient state at this point would mean some unexpected party
833          * is poking at the power controls too.
834          */
835         ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
836         WARN_ON(ctrl != state);
837
838         mutex_unlock(&dev_priv->rps.hw_lock);
839
840         return enabled;
841 }
842
843 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
844 {
845         enum pipe pipe;
846
847         /*
848          * Enable the CRI clock source so we can get at the
849          * display and the reference clock for VGA
850          * hotplug / manual detection. Supposedly DSI also
851          * needs the ref clock up and running.
852          *
853          * CHV DPLL B/C have some issues if VGA mode is enabled.
854          */
855         for_each_pipe(dev_priv->dev, pipe) {
856                 u32 val = I915_READ(DPLL(pipe));
857
858                 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
859                 if (pipe != PIPE_A)
860                         val |= DPLL_INTEGRATED_CRI_CLK_VLV;
861
862                 I915_WRITE(DPLL(pipe), val);
863         }
864
865         spin_lock_irq(&dev_priv->irq_lock);
866         valleyview_enable_display_irqs(dev_priv);
867         spin_unlock_irq(&dev_priv->irq_lock);
868
869         /*
870          * During driver initialization/resume we can avoid restoring the
871          * part of the HW/SW state that will be inited anyway explicitly.
872          */
873         if (dev_priv->power_domains.initializing)
874                 return;
875
876         intel_hpd_init(dev_priv);
877
878         i915_redisable_vga_power_on(dev_priv->dev);
879 }
880
881 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
882 {
883         spin_lock_irq(&dev_priv->irq_lock);
884         valleyview_disable_display_irqs(dev_priv);
885         spin_unlock_irq(&dev_priv->irq_lock);
886
887         vlv_power_sequencer_reset(dev_priv);
888 }
889
890 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
891                                           struct i915_power_well *power_well)
892 {
893         WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
894
895         vlv_set_power_well(dev_priv, power_well, true);
896
897         vlv_display_power_well_init(dev_priv);
898 }
899
900 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
901                                            struct i915_power_well *power_well)
902 {
903         WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
904
905         vlv_display_power_well_deinit(dev_priv);
906
907         vlv_set_power_well(dev_priv, power_well, false);
908 }
909
910 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
911                                            struct i915_power_well *power_well)
912 {
913         WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
914
915         /* since ref/cri clock was enabled */
916         udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
917
918         vlv_set_power_well(dev_priv, power_well, true);
919
920         /*
921          * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
922          *  6.  De-assert cmn_reset/side_reset. Same as VLV X0.
923          *   a. GUnit 0x2110 bit[0] set to 1 (def 0)
924          *   b. The other bits such as sfr settings / modesel may all
925          *      be set to 0.
926          *
927          * This should only be done on init and resume from S3 with
928          * both PLLs disabled, or we risk losing DPIO and PLL
929          * synchronization.
930          */
931         I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
932 }
933
934 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
935                                             struct i915_power_well *power_well)
936 {
937         enum pipe pipe;
938
939         WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
940
941         for_each_pipe(dev_priv, pipe)
942                 assert_pll_disabled(dev_priv, pipe);
943
944         /* Assert common reset */
945         I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
946
947         vlv_set_power_well(dev_priv, power_well, false);
948 }
949
950 #define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
951
952 static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
953                                                  int power_well_id)
954 {
955         struct i915_power_domains *power_domains = &dev_priv->power_domains;
956         int i;
957
958         for (i = 0; i < power_domains->power_well_count; i++) {
959                 struct i915_power_well *power_well;
960
961                 power_well = &power_domains->power_wells[i];
962                 if (power_well->data == power_well_id)
963                         return power_well;
964         }
965
966         return NULL;
967 }
968
969 #define BITS_SET(val, bits) (((val) & (bits)) == (bits))
970
971 static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
972 {
973         struct i915_power_well *cmn_bc =
974                 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
975         struct i915_power_well *cmn_d =
976                 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
977         u32 phy_control = dev_priv->chv_phy_control;
978         u32 phy_status = 0;
979         u32 phy_status_mask = 0xffffffff;
980         u32 tmp;
981
982         /*
983          * The BIOS can leave the PHY is some weird state
984          * where it doesn't fully power down some parts.
985          * Disable the asserts until the PHY has been fully
986          * reset (ie. the power well has been disabled at
987          * least once).
988          */
989         if (!dev_priv->chv_phy_assert[DPIO_PHY0])
990                 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
991                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
992                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
993                                      PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
994                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
995                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
996
997         if (!dev_priv->chv_phy_assert[DPIO_PHY1])
998                 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
999                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1000                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1001
1002         if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
1003                 phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1004
1005                 /* this assumes override is only used to enable lanes */
1006                 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1007                         phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1008
1009                 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1010                         phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1011
1012                 /* CL1 is on whenever anything is on in either channel */
1013                 if (BITS_SET(phy_control,
1014                              PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1015                              PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1016                         phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1017
1018                 /*
1019                  * The DPLLB check accounts for the pipe B + port A usage
1020                  * with CL2 powered up but all the lanes in the second channel
1021                  * powered down.
1022                  */
1023                 if (BITS_SET(phy_control,
1024                              PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1025                     (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1026                         phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1027
1028                 if (BITS_SET(phy_control,
1029                              PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1030                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1031                 if (BITS_SET(phy_control,
1032                              PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1033                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1034
1035                 if (BITS_SET(phy_control,
1036                              PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1037                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1038                 if (BITS_SET(phy_control,
1039                              PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1040                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1041         }
1042
1043         if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
1044                 phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1045
1046                 /* this assumes override is only used to enable lanes */
1047                 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1048                         phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1049
1050                 if (BITS_SET(phy_control,
1051                              PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1052                         phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1053
1054                 if (BITS_SET(phy_control,
1055                              PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1056                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1057                 if (BITS_SET(phy_control,
1058                              PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1059                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1060         }
1061
1062         phy_status &= phy_status_mask;
1063
1064         /*
1065          * The PHY may be busy with some initial calibration and whatnot,
1066          * so the power state can take a while to actually change.
1067          */
1068         if (wait_for((tmp = I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask) == phy_status, 10))
1069                 WARN(phy_status != tmp,
1070                      "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1071                      tmp, phy_status, dev_priv->chv_phy_control);
1072 }
1073
1074 #undef BITS_SET
1075
1076 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1077                                            struct i915_power_well *power_well)
1078 {
1079         enum dpio_phy phy;
1080         enum pipe pipe;
1081         uint32_t tmp;
1082
1083         WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
1084                      power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
1085
1086         if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1087                 pipe = PIPE_A;
1088                 phy = DPIO_PHY0;
1089         } else {
1090                 pipe = PIPE_C;
1091                 phy = DPIO_PHY1;
1092         }
1093
1094         /* since ref/cri clock was enabled */
1095         udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1096         vlv_set_power_well(dev_priv, power_well, true);
1097
1098         /* Poll for phypwrgood signal */
1099         if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
1100                 DRM_ERROR("Display PHY %d is not power up\n", phy);
1101
1102         mutex_lock(&dev_priv->sb_lock);
1103
1104         /* Enable dynamic power down */
1105         tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1106         tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1107                 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1108         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1109
1110         if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1111                 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1112                 tmp |= DPIO_DYNPWRDOWNEN_CH1;
1113                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1114         } else {
1115                 /*
1116                  * Force the non-existing CL2 off. BXT does this
1117                  * too, so maybe it saves some power even though
1118                  * CL2 doesn't exist?
1119                  */
1120                 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1121                 tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1122                 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1123         }
1124
1125         mutex_unlock(&dev_priv->sb_lock);
1126
1127         dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1128         I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1129
1130         DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1131                       phy, dev_priv->chv_phy_control);
1132
1133         assert_chv_phy_status(dev_priv);
1134 }
1135
1136 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1137                                             struct i915_power_well *power_well)
1138 {
1139         enum dpio_phy phy;
1140
1141         WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
1142                      power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
1143
1144         if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1145                 phy = DPIO_PHY0;
1146                 assert_pll_disabled(dev_priv, PIPE_A);
1147                 assert_pll_disabled(dev_priv, PIPE_B);
1148         } else {
1149                 phy = DPIO_PHY1;
1150                 assert_pll_disabled(dev_priv, PIPE_C);
1151         }
1152
1153         dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1154         I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1155
1156         vlv_set_power_well(dev_priv, power_well, false);
1157
1158         DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1159                       phy, dev_priv->chv_phy_control);
1160
1161         /* PHY is fully reset now, so we can enable the PHY state asserts */
1162         dev_priv->chv_phy_assert[phy] = true;
1163
1164         assert_chv_phy_status(dev_priv);
1165 }
1166
1167 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1168                                      enum dpio_channel ch, bool override, unsigned int mask)
1169 {
1170         enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1171         u32 reg, val, expected, actual;
1172
1173         /*
1174          * The BIOS can leave the PHY is some weird state
1175          * where it doesn't fully power down some parts.
1176          * Disable the asserts until the PHY has been fully
1177          * reset (ie. the power well has been disabled at
1178          * least once).
1179          */
1180         if (!dev_priv->chv_phy_assert[phy])
1181                 return;
1182
1183         if (ch == DPIO_CH0)
1184                 reg = _CHV_CMN_DW0_CH0;
1185         else
1186                 reg = _CHV_CMN_DW6_CH1;
1187
1188         mutex_lock(&dev_priv->sb_lock);
1189         val = vlv_dpio_read(dev_priv, pipe, reg);
1190         mutex_unlock(&dev_priv->sb_lock);
1191
1192         /*
1193          * This assumes !override is only used when the port is disabled.
1194          * All lanes should power down even without the override when
1195          * the port is disabled.
1196          */
1197         if (!override || mask == 0xf) {
1198                 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1199                 /*
1200                  * If CH1 common lane is not active anymore
1201                  * (eg. for pipe B DPLL) the entire channel will
1202                  * shut down, which causes the common lane registers
1203                  * to read as 0. That means we can't actually check
1204                  * the lane power down status bits, but as the entire
1205                  * register reads as 0 it's a good indication that the
1206                  * channel is indeed entirely powered down.
1207                  */
1208                 if (ch == DPIO_CH1 && val == 0)
1209                         expected = 0;
1210         } else if (mask != 0x0) {
1211                 expected = DPIO_ANYDL_POWERDOWN;
1212         } else {
1213                 expected = 0;
1214         }
1215
1216         if (ch == DPIO_CH0)
1217                 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1218         else
1219                 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1220         actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1221
1222         WARN(actual != expected,
1223              "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1224              !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
1225              !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
1226              reg, val);
1227 }
1228
1229 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1230                           enum dpio_channel ch, bool override)
1231 {
1232         struct i915_power_domains *power_domains = &dev_priv->power_domains;
1233         bool was_override;
1234
1235         mutex_lock(&power_domains->lock);
1236
1237         was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1238
1239         if (override == was_override)
1240                 goto out;
1241
1242         if (override)
1243                 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1244         else
1245                 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1246
1247         I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1248
1249         DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1250                       phy, ch, dev_priv->chv_phy_control);
1251
1252         assert_chv_phy_status(dev_priv);
1253
1254 out:
1255         mutex_unlock(&power_domains->lock);
1256
1257         return was_override;
1258 }
1259
1260 void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1261                              bool override, unsigned int mask)
1262 {
1263         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1264         struct i915_power_domains *power_domains = &dev_priv->power_domains;
1265         enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
1266         enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
1267
1268         mutex_lock(&power_domains->lock);
1269
1270         dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1271         dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1272
1273         if (override)
1274                 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1275         else
1276                 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1277
1278         I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1279
1280         DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1281                       phy, ch, mask, dev_priv->chv_phy_control);
1282
1283         assert_chv_phy_status(dev_priv);
1284
1285         assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1286
1287         mutex_unlock(&power_domains->lock);
1288 }
1289
1290 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1291                                         struct i915_power_well *power_well)
1292 {
1293         enum pipe pipe = power_well->data;
1294         bool enabled;
1295         u32 state, ctrl;
1296
1297         mutex_lock(&dev_priv->rps.hw_lock);
1298
1299         state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
1300         /*
1301          * We only ever set the power-on and power-gate states, anything
1302          * else is unexpected.
1303          */
1304         WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
1305         enabled = state == DP_SSS_PWR_ON(pipe);
1306
1307         /*
1308          * A transient state at this point would mean some unexpected party
1309          * is poking at the power controls too.
1310          */
1311         ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
1312         WARN_ON(ctrl << 16 != state);
1313
1314         mutex_unlock(&dev_priv->rps.hw_lock);
1315
1316         return enabled;
1317 }
1318
1319 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1320                                     struct i915_power_well *power_well,
1321                                     bool enable)
1322 {
1323         enum pipe pipe = power_well->data;
1324         u32 state;
1325         u32 ctrl;
1326
1327         state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1328
1329         mutex_lock(&dev_priv->rps.hw_lock);
1330
1331 #define COND \
1332         ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
1333
1334         if (COND)
1335                 goto out;
1336
1337         ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
1338         ctrl &= ~DP_SSC_MASK(pipe);
1339         ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1340         vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
1341
1342         if (wait_for(COND, 100))
1343                 DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1344                           state,
1345                           vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
1346
1347 #undef COND
1348
1349 out:
1350         mutex_unlock(&dev_priv->rps.hw_lock);
1351 }
1352
1353 static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
1354                                         struct i915_power_well *power_well)
1355 {
1356         WARN_ON_ONCE(power_well->data != PIPE_A);
1357
1358         chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
1359 }
1360
1361 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1362                                        struct i915_power_well *power_well)
1363 {
1364         WARN_ON_ONCE(power_well->data != PIPE_A);
1365
1366         chv_set_pipe_power_well(dev_priv, power_well, true);
1367
1368         vlv_display_power_well_init(dev_priv);
1369 }
1370
1371 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1372                                         struct i915_power_well *power_well)
1373 {
1374         WARN_ON_ONCE(power_well->data != PIPE_A);
1375
1376         vlv_display_power_well_deinit(dev_priv);
1377
1378         chv_set_pipe_power_well(dev_priv, power_well, false);
1379 }
1380
1381 /**
1382  * intel_display_power_get - grab a power domain reference
1383  * @dev_priv: i915 device instance
1384  * @domain: power domain to reference
1385  *
1386  * This function grabs a power domain reference for @domain and ensures that the
1387  * power domain and all its parents are powered up. Therefore users should only
1388  * grab a reference to the innermost power domain they need.
1389  *
1390  * Any power domain reference obtained by this function must have a symmetric
1391  * call to intel_display_power_put() to release the reference again.
1392  */
1393 void intel_display_power_get(struct drm_i915_private *dev_priv,
1394                              enum intel_display_power_domain domain)
1395 {
1396         struct i915_power_domains *power_domains;
1397         struct i915_power_well *power_well;
1398         int i;
1399
1400         intel_runtime_pm_get(dev_priv);
1401
1402         power_domains = &dev_priv->power_domains;
1403
1404         mutex_lock(&power_domains->lock);
1405
1406         for_each_power_well(i, power_well, BIT(domain), power_domains) {
1407                 if (!power_well->count++)
1408                         intel_power_well_enable(dev_priv, power_well);
1409         }
1410
1411         power_domains->domain_use_count[domain]++;
1412
1413         mutex_unlock(&power_domains->lock);
1414 }
1415
1416 /**
1417  * intel_display_power_put - release a power domain reference
1418  * @dev_priv: i915 device instance
1419  * @domain: power domain to reference
1420  *
1421  * This function drops the power domain reference obtained by
1422  * intel_display_power_get() and might power down the corresponding hardware
1423  * block right away if this is the last reference.
1424  */
1425 void intel_display_power_put(struct drm_i915_private *dev_priv,
1426                              enum intel_display_power_domain domain)
1427 {
1428         struct i915_power_domains *power_domains;
1429         struct i915_power_well *power_well;
1430         int i;
1431
1432         power_domains = &dev_priv->power_domains;
1433
1434         mutex_lock(&power_domains->lock);
1435
1436         WARN_ON(!power_domains->domain_use_count[domain]);
1437         power_domains->domain_use_count[domain]--;
1438
1439         for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
1440                 WARN_ON(!power_well->count);
1441
1442                 if (!--power_well->count)
1443                         intel_power_well_disable(dev_priv, power_well);
1444         }
1445
1446         mutex_unlock(&power_domains->lock);
1447
1448         intel_runtime_pm_put(dev_priv);
1449 }
1450
1451 #define HSW_ALWAYS_ON_POWER_DOMAINS (                   \
1452         BIT(POWER_DOMAIN_PIPE_A) |                      \
1453         BIT(POWER_DOMAIN_TRANSCODER_EDP) |              \
1454         BIT(POWER_DOMAIN_PORT_DDI_A_LANES) |            \
1455         BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |            \
1456         BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |            \
1457         BIT(POWER_DOMAIN_PORT_DDI_D_LANES) |            \
1458         BIT(POWER_DOMAIN_PORT_CRT) |                    \
1459         BIT(POWER_DOMAIN_PLLS) |                        \
1460         BIT(POWER_DOMAIN_AUX_A) |                       \
1461         BIT(POWER_DOMAIN_AUX_B) |                       \
1462         BIT(POWER_DOMAIN_AUX_C) |                       \
1463         BIT(POWER_DOMAIN_AUX_D) |                       \
1464         BIT(POWER_DOMAIN_GMBUS) |                       \
1465         BIT(POWER_DOMAIN_INIT))
1466 #define HSW_DISPLAY_POWER_DOMAINS (                             \
1467         (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) |    \
1468         BIT(POWER_DOMAIN_INIT))
1469
1470 #define BDW_ALWAYS_ON_POWER_DOMAINS (                   \
1471         HSW_ALWAYS_ON_POWER_DOMAINS |                   \
1472         BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
1473 #define BDW_DISPLAY_POWER_DOMAINS (                             \
1474         (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) |    \
1475         BIT(POWER_DOMAIN_INIT))
1476
1477 #define VLV_ALWAYS_ON_POWER_DOMAINS     BIT(POWER_DOMAIN_INIT)
1478 #define VLV_DISPLAY_POWER_DOMAINS       POWER_DOMAIN_MASK
1479
1480 #define VLV_DPIO_CMN_BC_POWER_DOMAINS (         \
1481         BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |    \
1482         BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |    \
1483         BIT(POWER_DOMAIN_PORT_CRT) |            \
1484         BIT(POWER_DOMAIN_AUX_B) |               \
1485         BIT(POWER_DOMAIN_AUX_C) |               \
1486         BIT(POWER_DOMAIN_INIT))
1487
1488 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS (  \
1489         BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |    \
1490         BIT(POWER_DOMAIN_AUX_B) |               \
1491         BIT(POWER_DOMAIN_INIT))
1492
1493 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS (  \
1494         BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |    \
1495         BIT(POWER_DOMAIN_AUX_B) |               \
1496         BIT(POWER_DOMAIN_INIT))
1497
1498 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS (  \
1499         BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |    \
1500         BIT(POWER_DOMAIN_AUX_C) |               \
1501         BIT(POWER_DOMAIN_INIT))
1502
1503 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS (  \
1504         BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |    \
1505         BIT(POWER_DOMAIN_AUX_C) |               \
1506         BIT(POWER_DOMAIN_INIT))
1507
1508 #define CHV_DPIO_CMN_BC_POWER_DOMAINS (         \
1509         BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |    \
1510         BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |    \
1511         BIT(POWER_DOMAIN_AUX_B) |               \
1512         BIT(POWER_DOMAIN_AUX_C) |               \
1513         BIT(POWER_DOMAIN_INIT))
1514
1515 #define CHV_DPIO_CMN_D_POWER_DOMAINS (          \
1516         BIT(POWER_DOMAIN_PORT_DDI_D_LANES) |    \
1517         BIT(POWER_DOMAIN_AUX_D) |               \
1518         BIT(POWER_DOMAIN_INIT))
1519
1520 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
1521         .sync_hw = i9xx_always_on_power_well_noop,
1522         .enable = i9xx_always_on_power_well_noop,
1523         .disable = i9xx_always_on_power_well_noop,
1524         .is_enabled = i9xx_always_on_power_well_enabled,
1525 };
1526
1527 static const struct i915_power_well_ops chv_pipe_power_well_ops = {
1528         .sync_hw = chv_pipe_power_well_sync_hw,
1529         .enable = chv_pipe_power_well_enable,
1530         .disable = chv_pipe_power_well_disable,
1531         .is_enabled = chv_pipe_power_well_enabled,
1532 };
1533
1534 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
1535         .sync_hw = vlv_power_well_sync_hw,
1536         .enable = chv_dpio_cmn_power_well_enable,
1537         .disable = chv_dpio_cmn_power_well_disable,
1538         .is_enabled = vlv_power_well_enabled,
1539 };
1540
1541 static struct i915_power_well i9xx_always_on_power_well[] = {
1542         {
1543                 .name = "always-on",
1544                 .always_on = 1,
1545                 .domains = POWER_DOMAIN_MASK,
1546                 .ops = &i9xx_always_on_power_well_ops,
1547         },
1548 };
1549
1550 static const struct i915_power_well_ops hsw_power_well_ops = {
1551         .sync_hw = hsw_power_well_sync_hw,
1552         .enable = hsw_power_well_enable,
1553         .disable = hsw_power_well_disable,
1554         .is_enabled = hsw_power_well_enabled,
1555 };
1556
1557 static const struct i915_power_well_ops skl_power_well_ops = {
1558         .sync_hw = skl_power_well_sync_hw,
1559         .enable = skl_power_well_enable,
1560         .disable = skl_power_well_disable,
1561         .is_enabled = skl_power_well_enabled,
1562 };
1563
1564 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
1565         .sync_hw = gen9_dc_off_power_well_sync_hw,
1566         .enable = gen9_dc_off_power_well_enable,
1567         .disable = gen9_dc_off_power_well_disable,
1568         .is_enabled = gen9_dc_off_power_well_enabled,
1569 };
1570
1571 static struct i915_power_well hsw_power_wells[] = {
1572         {
1573                 .name = "always-on",
1574                 .always_on = 1,
1575                 .domains = HSW_ALWAYS_ON_POWER_DOMAINS,
1576                 .ops = &i9xx_always_on_power_well_ops,
1577         },
1578         {
1579                 .name = "display",
1580                 .domains = HSW_DISPLAY_POWER_DOMAINS,
1581                 .ops = &hsw_power_well_ops,
1582         },
1583 };
1584
1585 static struct i915_power_well bdw_power_wells[] = {
1586         {
1587                 .name = "always-on",
1588                 .always_on = 1,
1589                 .domains = BDW_ALWAYS_ON_POWER_DOMAINS,
1590                 .ops = &i9xx_always_on_power_well_ops,
1591         },
1592         {
1593                 .name = "display",
1594                 .domains = BDW_DISPLAY_POWER_DOMAINS,
1595                 .ops = &hsw_power_well_ops,
1596         },
1597 };
1598
1599 static const struct i915_power_well_ops vlv_display_power_well_ops = {
1600         .sync_hw = vlv_power_well_sync_hw,
1601         .enable = vlv_display_power_well_enable,
1602         .disable = vlv_display_power_well_disable,
1603         .is_enabled = vlv_power_well_enabled,
1604 };
1605
1606 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
1607         .sync_hw = vlv_power_well_sync_hw,
1608         .enable = vlv_dpio_cmn_power_well_enable,
1609         .disable = vlv_dpio_cmn_power_well_disable,
1610         .is_enabled = vlv_power_well_enabled,
1611 };
1612
1613 static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
1614         .sync_hw = vlv_power_well_sync_hw,
1615         .enable = vlv_power_well_enable,
1616         .disable = vlv_power_well_disable,
1617         .is_enabled = vlv_power_well_enabled,
1618 };
1619
1620 static struct i915_power_well vlv_power_wells[] = {
1621         {
1622                 .name = "always-on",
1623                 .always_on = 1,
1624                 .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
1625                 .ops = &i9xx_always_on_power_well_ops,
1626                 .data = PUNIT_POWER_WELL_ALWAYS_ON,
1627         },
1628         {
1629                 .name = "display",
1630                 .domains = VLV_DISPLAY_POWER_DOMAINS,
1631                 .data = PUNIT_POWER_WELL_DISP2D,
1632                 .ops = &vlv_display_power_well_ops,
1633         },
1634         {
1635                 .name = "dpio-tx-b-01",
1636                 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1637                            VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1638                            VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1639                            VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1640                 .ops = &vlv_dpio_power_well_ops,
1641                 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
1642         },
1643         {
1644                 .name = "dpio-tx-b-23",
1645                 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1646                            VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1647                            VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1648                            VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1649                 .ops = &vlv_dpio_power_well_ops,
1650                 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
1651         },
1652         {
1653                 .name = "dpio-tx-c-01",
1654                 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1655                            VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1656                            VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1657                            VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1658                 .ops = &vlv_dpio_power_well_ops,
1659                 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
1660         },
1661         {
1662                 .name = "dpio-tx-c-23",
1663                 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1664                            VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1665                            VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1666                            VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1667                 .ops = &vlv_dpio_power_well_ops,
1668                 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
1669         },
1670         {
1671                 .name = "dpio-common",
1672                 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
1673                 .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
1674                 .ops = &vlv_dpio_cmn_power_well_ops,
1675         },
1676 };
1677
1678 static struct i915_power_well chv_power_wells[] = {
1679         {
1680                 .name = "always-on",
1681                 .always_on = 1,
1682                 .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
1683                 .ops = &i9xx_always_on_power_well_ops,
1684         },
1685         {
1686                 .name = "display",
1687                 /*
1688                  * Pipe A power well is the new disp2d well. Pipe B and C
1689                  * power wells don't actually exist. Pipe A power well is
1690                  * required for any pipe to work.
1691                  */
1692                 .domains = VLV_DISPLAY_POWER_DOMAINS,
1693                 .data = PIPE_A,
1694                 .ops = &chv_pipe_power_well_ops,
1695         },
1696         {
1697                 .name = "dpio-common-bc",
1698                 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
1699                 .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
1700                 .ops = &chv_dpio_cmn_power_well_ops,
1701         },
1702         {
1703                 .name = "dpio-common-d",
1704                 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
1705                 .data = PUNIT_POWER_WELL_DPIO_CMN_D,
1706                 .ops = &chv_dpio_cmn_power_well_ops,
1707         },
1708 };
1709
1710 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
1711                                     int power_well_id)
1712 {
1713         struct i915_power_well *power_well;
1714         bool ret;
1715
1716         power_well = lookup_power_well(dev_priv, power_well_id);
1717         ret = power_well->ops->is_enabled(dev_priv, power_well);
1718
1719         return ret;
1720 }
1721
1722 static struct i915_power_well skl_power_wells[] = {
1723         {
1724                 .name = "always-on",
1725                 .always_on = 1,
1726                 .domains = SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
1727                 .ops = &i9xx_always_on_power_well_ops,
1728                 .data = SKL_DISP_PW_ALWAYS_ON,
1729         },
1730         {
1731                 .name = "power well 1",
1732                 /* Handled by the DMC firmware */
1733                 .domains = 0,
1734                 .ops = &skl_power_well_ops,
1735                 .data = SKL_DISP_PW_1,
1736         },
1737         {
1738                 .name = "MISC IO power well",
1739                 /* Handled by the DMC firmware */
1740                 .domains = 0,
1741                 .ops = &skl_power_well_ops,
1742                 .data = SKL_DISP_PW_MISC_IO,
1743         },
1744         {
1745                 .name = "DC off",
1746                 .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
1747                 .ops = &gen9_dc_off_power_well_ops,
1748                 .data = SKL_DISP_PW_DC_OFF,
1749         },
1750         {
1751                 .name = "power well 2",
1752                 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
1753                 .ops = &skl_power_well_ops,
1754                 .data = SKL_DISP_PW_2,
1755         },
1756         {
1757                 .name = "DDI A/E power well",
1758                 .domains = SKL_DISPLAY_DDI_A_E_POWER_DOMAINS,
1759                 .ops = &skl_power_well_ops,
1760                 .data = SKL_DISP_PW_DDI_A_E,
1761         },
1762         {
1763                 .name = "DDI B power well",
1764                 .domains = SKL_DISPLAY_DDI_B_POWER_DOMAINS,
1765                 .ops = &skl_power_well_ops,
1766                 .data = SKL_DISP_PW_DDI_B,
1767         },
1768         {
1769                 .name = "DDI C power well",
1770                 .domains = SKL_DISPLAY_DDI_C_POWER_DOMAINS,
1771                 .ops = &skl_power_well_ops,
1772                 .data = SKL_DISP_PW_DDI_C,
1773         },
1774         {
1775                 .name = "DDI D power well",
1776                 .domains = SKL_DISPLAY_DDI_D_POWER_DOMAINS,
1777                 .ops = &skl_power_well_ops,
1778                 .data = SKL_DISP_PW_DDI_D,
1779         },
1780 };
1781
1782 void skl_pw1_misc_io_init(struct drm_i915_private *dev_priv)
1783 {
1784         struct i915_power_well *well;
1785
1786         if (!IS_SKYLAKE(dev_priv))
1787                 return;
1788
1789         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1790         intel_power_well_enable(dev_priv, well);
1791
1792         well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
1793         intel_power_well_enable(dev_priv, well);
1794 }
1795
1796 void skl_pw1_misc_io_fini(struct drm_i915_private *dev_priv)
1797 {
1798         struct i915_power_well *well;
1799
1800         if (!IS_SKYLAKE(dev_priv))
1801                 return;
1802
1803         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1804         intel_power_well_disable(dev_priv, well);
1805
1806         well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
1807         intel_power_well_disable(dev_priv, well);
1808 }
1809
1810 static struct i915_power_well bxt_power_wells[] = {
1811         {
1812                 .name = "always-on",
1813                 .always_on = 1,
1814                 .domains = BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
1815                 .ops = &i9xx_always_on_power_well_ops,
1816         },
1817         {
1818                 .name = "power well 1",
1819                 .domains = BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS,
1820                 .ops = &skl_power_well_ops,
1821                 .data = SKL_DISP_PW_1,
1822         },
1823         {
1824                 .name = "DC off",
1825                 .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
1826                 .ops = &gen9_dc_off_power_well_ops,
1827                 .data = SKL_DISP_PW_DC_OFF,
1828         },
1829         {
1830                 .name = "power well 2",
1831                 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
1832                 .ops = &skl_power_well_ops,
1833                 .data = SKL_DISP_PW_2,
1834         },
1835 };
1836
1837 static int
1838 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
1839                                    int disable_power_well)
1840 {
1841         if (disable_power_well >= 0)
1842                 return !!disable_power_well;
1843
1844         if (IS_SKYLAKE(dev_priv)) {
1845                 DRM_DEBUG_KMS("Disabling display power well support\n");
1846                 return 0;
1847         }
1848
1849         return 1;
1850 }
1851
1852 #define set_power_wells(power_domains, __power_wells) ({                \
1853         (power_domains)->power_wells = (__power_wells);                 \
1854         (power_domains)->power_well_count = ARRAY_SIZE(__power_wells);  \
1855 })
1856
1857 /**
1858  * intel_power_domains_init - initializes the power domain structures
1859  * @dev_priv: i915 device instance
1860  *
1861  * Initializes the power domain structures for @dev_priv depending upon the
1862  * supported platform.
1863  */
1864 int intel_power_domains_init(struct drm_i915_private *dev_priv)
1865 {
1866         struct i915_power_domains *power_domains = &dev_priv->power_domains;
1867
1868         i915.disable_power_well = sanitize_disable_power_well_option(dev_priv,
1869                                                      i915.disable_power_well);
1870
1871         BUILD_BUG_ON(POWER_DOMAIN_NUM > 31);
1872
1873         mutex_init(&power_domains->lock);
1874
1875         /*
1876          * The enabling order will be from lower to higher indexed wells,
1877          * the disabling order is reversed.
1878          */
1879         if (IS_HASWELL(dev_priv->dev)) {
1880                 set_power_wells(power_domains, hsw_power_wells);
1881         } else if (IS_BROADWELL(dev_priv->dev)) {
1882                 set_power_wells(power_domains, bdw_power_wells);
1883         } else if (IS_SKYLAKE(dev_priv->dev) || IS_KABYLAKE(dev_priv->dev)) {
1884                 set_power_wells(power_domains, skl_power_wells);
1885         } else if (IS_BROXTON(dev_priv->dev)) {
1886                 set_power_wells(power_domains, bxt_power_wells);
1887         } else if (IS_CHERRYVIEW(dev_priv->dev)) {
1888                 set_power_wells(power_domains, chv_power_wells);
1889         } else if (IS_VALLEYVIEW(dev_priv->dev)) {
1890                 set_power_wells(power_domains, vlv_power_wells);
1891         } else {
1892                 set_power_wells(power_domains, i9xx_always_on_power_well);
1893         }
1894
1895         return 0;
1896 }
1897
1898 /**
1899  * intel_power_domains_fini - finalizes the power domain structures
1900  * @dev_priv: i915 device instance
1901  *
1902  * Finalizes the power domain structures for @dev_priv depending upon the
1903  * supported platform. This function also disables runtime pm and ensures that
1904  * the device stays powered up so that the driver can be reloaded.
1905  */
1906 void intel_power_domains_fini(struct drm_i915_private *dev_priv)
1907 {
1908         /* The i915.ko module is still not prepared to be loaded when
1909          * the power well is not enabled, so just enable it in case
1910          * we're going to unload/reload. */
1911         intel_display_set_init_power(dev_priv, true);
1912
1913         /* Remove the refcount we took to keep power well support disabled. */
1914         if (!i915.disable_power_well)
1915                 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
1916 }
1917
1918 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
1919 {
1920         struct i915_power_domains *power_domains = &dev_priv->power_domains;
1921         struct i915_power_well *power_well;
1922         int i;
1923
1924         mutex_lock(&power_domains->lock);
1925         for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
1926                 power_well->ops->sync_hw(dev_priv, power_well);
1927                 power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
1928                                                                      power_well);
1929         }
1930         mutex_unlock(&power_domains->lock);
1931 }
1932
1933 static void skl_display_core_init(struct drm_i915_private *dev_priv,
1934                                   bool resume)
1935 {
1936         struct i915_power_domains *power_domains = &dev_priv->power_domains;
1937         uint32_t val;
1938
1939         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1940
1941         /* enable PCH reset handshake */
1942         val = I915_READ(HSW_NDE_RSTWRN_OPT);
1943         I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE);
1944
1945         /* enable PG1 and Misc I/O */
1946         mutex_lock(&power_domains->lock);
1947         skl_pw1_misc_io_init(dev_priv);
1948         mutex_unlock(&power_domains->lock);
1949
1950         if (!resume)
1951                 return;
1952
1953         skl_init_cdclk(dev_priv);
1954
1955         if (dev_priv->csr.dmc_payload)
1956                 intel_csr_load_program(dev_priv);
1957 }
1958
1959 static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
1960 {
1961         struct i915_power_domains *power_domains = &dev_priv->power_domains;
1962
1963         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1964
1965         skl_uninit_cdclk(dev_priv);
1966
1967         /* The spec doesn't call for removing the reset handshake flag */
1968         /* disable PG1 and Misc I/O */
1969         mutex_lock(&power_domains->lock);
1970         skl_pw1_misc_io_fini(dev_priv);
1971         mutex_unlock(&power_domains->lock);
1972 }
1973
1974 static void chv_phy_control_init(struct drm_i915_private *dev_priv)
1975 {
1976         struct i915_power_well *cmn_bc =
1977                 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
1978         struct i915_power_well *cmn_d =
1979                 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
1980
1981         /*
1982          * DISPLAY_PHY_CONTROL can get corrupted if read. As a
1983          * workaround never ever read DISPLAY_PHY_CONTROL, and
1984          * instead maintain a shadow copy ourselves. Use the actual
1985          * power well state and lane status to reconstruct the
1986          * expected initial value.
1987          */
1988         dev_priv->chv_phy_control =
1989                 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
1990                 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
1991                 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
1992                 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
1993                 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
1994
1995         /*
1996          * If all lanes are disabled we leave the override disabled
1997          * with all power down bits cleared to match the state we
1998          * would use after disabling the port. Otherwise enable the
1999          * override and set the lane powerdown bits accding to the
2000          * current lane status.
2001          */
2002         if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
2003                 uint32_t status = I915_READ(DPLL(PIPE_A));
2004                 unsigned int mask;
2005
2006                 mask = status & DPLL_PORTB_READY_MASK;
2007                 if (mask == 0xf)
2008                         mask = 0x0;
2009                 else
2010                         dev_priv->chv_phy_control |=
2011                                 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
2012
2013                 dev_priv->chv_phy_control |=
2014                         PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
2015
2016                 mask = (status & DPLL_PORTC_READY_MASK) >> 4;
2017                 if (mask == 0xf)
2018                         mask = 0x0;
2019                 else
2020                         dev_priv->chv_phy_control |=
2021                                 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
2022
2023                 dev_priv->chv_phy_control |=
2024                         PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
2025
2026                 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
2027
2028                 dev_priv->chv_phy_assert[DPIO_PHY0] = false;
2029         } else {
2030                 dev_priv->chv_phy_assert[DPIO_PHY0] = true;
2031         }
2032
2033         if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
2034                 uint32_t status = I915_READ(DPIO_PHY_STATUS);
2035                 unsigned int mask;
2036
2037                 mask = status & DPLL_PORTD_READY_MASK;
2038
2039                 if (mask == 0xf)
2040                         mask = 0x0;
2041                 else
2042                         dev_priv->chv_phy_control |=
2043                                 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
2044
2045                 dev_priv->chv_phy_control |=
2046                         PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
2047
2048                 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
2049
2050                 dev_priv->chv_phy_assert[DPIO_PHY1] = false;
2051         } else {
2052                 dev_priv->chv_phy_assert[DPIO_PHY1] = true;
2053         }
2054
2055         I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
2056
2057         DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
2058                       dev_priv->chv_phy_control);
2059 }
2060
2061 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
2062 {
2063         struct i915_power_well *cmn =
2064                 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
2065         struct i915_power_well *disp2d =
2066                 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
2067
2068         /* If the display might be already active skip this */
2069         if (cmn->ops->is_enabled(dev_priv, cmn) &&
2070             disp2d->ops->is_enabled(dev_priv, disp2d) &&
2071             I915_READ(DPIO_CTL) & DPIO_CMNRST)
2072                 return;
2073
2074         DRM_DEBUG_KMS("toggling display PHY side reset\n");
2075
2076         /* cmnlane needs DPLL registers */
2077         disp2d->ops->enable(dev_priv, disp2d);
2078
2079         /*
2080          * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
2081          * Need to assert and de-assert PHY SB reset by gating the
2082          * common lane power, then un-gating it.
2083          * Simply ungating isn't enough to reset the PHY enough to get
2084          * ports and lanes running.
2085          */
2086         cmn->ops->disable(dev_priv, cmn);
2087 }
2088
2089 /**
2090  * intel_power_domains_init_hw - initialize hardware power domain state
2091  * @dev_priv: i915 device instance
2092  *
2093  * This function initializes the hardware power domain state and enables all
2094  * power domains using intel_display_set_init_power().
2095  */
2096 void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
2097 {
2098         struct drm_device *dev = dev_priv->dev;
2099         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2100
2101         power_domains->initializing = true;
2102
2103         if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
2104                 skl_display_core_init(dev_priv, resume);
2105         } else if (IS_CHERRYVIEW(dev)) {
2106                 mutex_lock(&power_domains->lock);
2107                 chv_phy_control_init(dev_priv);
2108                 mutex_unlock(&power_domains->lock);
2109         } else if (IS_VALLEYVIEW(dev)) {
2110                 mutex_lock(&power_domains->lock);
2111                 vlv_cmnlane_wa(dev_priv);
2112                 mutex_unlock(&power_domains->lock);
2113         }
2114
2115         /* For now, we need the power well to be always enabled. */
2116         intel_display_set_init_power(dev_priv, true);
2117         /* Disable power support if the user asked so. */
2118         if (!i915.disable_power_well)
2119                 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
2120         intel_power_domains_sync_hw(dev_priv);
2121         power_domains->initializing = false;
2122 }
2123
2124 /**
2125  * intel_power_domains_suspend - suspend power domain state
2126  * @dev_priv: i915 device instance
2127  *
2128  * This function prepares the hardware power domain state before entering
2129  * system suspend. It must be paired with intel_power_domains_init_hw().
2130  */
2131 void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
2132 {
2133         if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
2134                 skl_display_core_uninit(dev_priv);
2135
2136         /*
2137          * Even if power well support was disabled we still want to disable
2138          * power wells while we are system suspended.
2139          */
2140         if (!i915.disable_power_well)
2141                 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
2142 }
2143
2144 /**
2145  * intel_runtime_pm_get - grab a runtime pm reference
2146  * @dev_priv: i915 device instance
2147  *
2148  * This function grabs a device-level runtime pm reference (mostly used for GEM
2149  * code to ensure the GTT or GT is on) and ensures that it is powered up.
2150  *
2151  * Any runtime pm reference obtained by this function must have a symmetric
2152  * call to intel_runtime_pm_put() to release the reference again.
2153  */
2154 void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
2155 {
2156         struct drm_device *dev = dev_priv->dev;
2157         struct device *device = &dev->pdev->dev;
2158
2159         if (!HAS_RUNTIME_PM(dev))
2160                 return;
2161
2162         pm_runtime_get_sync(device);
2163         WARN(dev_priv->pm.suspended, "Device still suspended.\n");
2164 }
2165
2166 /**
2167  * intel_runtime_pm_get_noresume - grab a runtime pm reference
2168  * @dev_priv: i915 device instance
2169  *
2170  * This function grabs a device-level runtime pm reference (mostly used for GEM
2171  * code to ensure the GTT or GT is on).
2172  *
2173  * It will _not_ power up the device but instead only check that it's powered
2174  * on.  Therefore it is only valid to call this functions from contexts where
2175  * the device is known to be powered up and where trying to power it up would
2176  * result in hilarity and deadlocks. That pretty much means only the system
2177  * suspend/resume code where this is used to grab runtime pm references for
2178  * delayed setup down in work items.
2179  *
2180  * Any runtime pm reference obtained by this function must have a symmetric
2181  * call to intel_runtime_pm_put() to release the reference again.
2182  */
2183 void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
2184 {
2185         struct drm_device *dev = dev_priv->dev;
2186         struct device *device = &dev->pdev->dev;
2187
2188         if (!HAS_RUNTIME_PM(dev))
2189                 return;
2190
2191         WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
2192         pm_runtime_get_noresume(device);
2193 }
2194
2195 /**
2196  * intel_runtime_pm_put - release a runtime pm reference
2197  * @dev_priv: i915 device instance
2198  *
2199  * This function drops the device-level runtime pm reference obtained by
2200  * intel_runtime_pm_get() and might power down the corresponding
2201  * hardware block right away if this is the last reference.
2202  */
2203 void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
2204 {
2205         struct drm_device *dev = dev_priv->dev;
2206         struct device *device = &dev->pdev->dev;
2207
2208         if (!HAS_RUNTIME_PM(dev))
2209                 return;
2210
2211         pm_runtime_mark_last_busy(device);
2212         pm_runtime_put_autosuspend(device);
2213 }
2214
2215 /**
2216  * intel_runtime_pm_enable - enable runtime pm
2217  * @dev_priv: i915 device instance
2218  *
2219  * This function enables runtime pm at the end of the driver load sequence.
2220  *
2221  * Note that this function does currently not enable runtime pm for the
2222  * subordinate display power domains. That is only done on the first modeset
2223  * using intel_display_set_init_power().
2224  */
2225 void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
2226 {
2227         struct drm_device *dev = dev_priv->dev;
2228         struct device *device = &dev->pdev->dev;
2229
2230         if (!HAS_RUNTIME_PM(dev))
2231                 return;
2232
2233         /*
2234          * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
2235          * requirement.
2236          */
2237         if (!intel_enable_rc6(dev)) {
2238                 DRM_INFO("RC6 disabled, disabling runtime PM support\n");
2239                 return;
2240         }
2241
2242         pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
2243         pm_runtime_mark_last_busy(device);
2244         pm_runtime_use_autosuspend(device);
2245
2246         pm_runtime_put_autosuspend(device);
2247 }
2248