Merge tag 'drm-intel-next-2021-01-04' of git://anongit.freedesktop.org/drm/drm-intel...
[platform/kernel/linux-starfive.git] / drivers / gpu / drm / i915 / display / intel_dp.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27
28 #include <linux/export.h>
29 #include <linux/i2c.h>
30 #include <linux/notifier.h>
31 #include <linux/slab.h>
32 #include <linux/types.h>
33
34 #include <asm/byteorder.h>
35
36 #include <drm/drm_atomic_helper.h>
37 #include <drm/drm_crtc.h>
38 #include <drm/drm_dp_helper.h>
39 #include <drm/drm_edid.h>
40 #include <drm/drm_probe_helper.h>
41
42 #include "i915_debugfs.h"
43 #include "i915_drv.h"
44 #include "i915_trace.h"
45 #include "intel_atomic.h"
46 #include "intel_audio.h"
47 #include "intel_connector.h"
48 #include "intel_ddi.h"
49 #include "intel_display_types.h"
50 #include "intel_dp.h"
51 #include "intel_dp_link_training.h"
52 #include "intel_dp_mst.h"
53 #include "intel_dpio_phy.h"
54 #include "intel_fifo_underrun.h"
55 #include "intel_hdcp.h"
56 #include "intel_hdmi.h"
57 #include "intel_hotplug.h"
58 #include "intel_lspcon.h"
59 #include "intel_lvds.h"
60 #include "intel_panel.h"
61 #include "intel_psr.h"
62 #include "intel_sideband.h"
63 #include "intel_tc.h"
64 #include "intel_vdsc.h"
65
66 #define DP_DPRX_ESI_LEN 14
67
68 /* DP DSC throughput values used for slice count calculations KPixels/s */
69 #define DP_DSC_PEAK_PIXEL_RATE                  2720000
70 #define DP_DSC_MAX_ENC_THROUGHPUT_0             340000
71 #define DP_DSC_MAX_ENC_THROUGHPUT_1             400000
72
73 /* DP DSC FEC Overhead factor = 1/(0.972261) */
74 #define DP_DSC_FEC_OVERHEAD_FACTOR              972261
75
76 /* Compliance test status bits  */
77 #define INTEL_DP_RESOLUTION_SHIFT_MASK  0
78 #define INTEL_DP_RESOLUTION_PREFERRED   (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
79 #define INTEL_DP_RESOLUTION_STANDARD    (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
80 #define INTEL_DP_RESOLUTION_FAILSAFE    (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
81
82 struct dp_link_dpll {
83         int clock;
84         struct dpll dpll;
85 };
86
87 static const struct dp_link_dpll g4x_dpll[] = {
88         { 162000,
89                 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
90         { 270000,
91                 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
92 };
93
94 static const struct dp_link_dpll pch_dpll[] = {
95         { 162000,
96                 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
97         { 270000,
98                 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
99 };
100
101 static const struct dp_link_dpll vlv_dpll[] = {
102         { 162000,
103                 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
104         { 270000,
105                 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
106 };
107
108 /*
109  * CHV supports eDP 1.4 that have  more link rates.
110  * Below only provides the fixed rate but exclude variable rate.
111  */
112 static const struct dp_link_dpll chv_dpll[] = {
113         /*
114          * CHV requires to program fractional division for m2.
115          * m2 is stored in fixed point format using formula below
116          * (m2_int << 22) | m2_fraction
117          */
118         { 162000,       /* m2_int = 32, m2_fraction = 1677722 */
119                 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
120         { 270000,       /* m2_int = 27, m2_fraction = 0 */
121                 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
122 };
123
124 /* Constants for DP DSC configurations */
125 static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
126
127 /* With Single pipe configuration, HW is capable of supporting maximum
128  * of 4 slices per line.
129  */
130 static const u8 valid_dsc_slicecount[] = {1, 2, 4};
131
132 /**
133  * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
134  * @intel_dp: DP struct
135  *
136  * If a CPU or PCH DP output is attached to an eDP panel, this function
137  * will return true, and false otherwise.
138  */
139 bool intel_dp_is_edp(struct intel_dp *intel_dp)
140 {
141         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
142
143         return dig_port->base.type == INTEL_OUTPUT_EDP;
144 }
145
146 static void intel_dp_link_down(struct intel_encoder *encoder,
147                                const struct intel_crtc_state *old_crtc_state);
148 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
149 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
150 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
151                                            const struct intel_crtc_state *crtc_state);
152 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
153                                       enum pipe pipe);
154 static void intel_dp_unset_edid(struct intel_dp *intel_dp);
155
156 /* update sink rates from dpcd */
157 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
158 {
159         static const int dp_rates[] = {
160                 162000, 270000, 540000, 810000
161         };
162         int i, max_rate;
163         int max_lttpr_rate;
164
165         if (drm_dp_has_quirk(&intel_dp->desc, 0,
166                              DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) {
167                 /* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */
168                 static const int quirk_rates[] = { 162000, 270000, 324000 };
169
170                 memcpy(intel_dp->sink_rates, quirk_rates, sizeof(quirk_rates));
171                 intel_dp->num_sink_rates = ARRAY_SIZE(quirk_rates);
172
173                 return;
174         }
175
176         max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
177         max_lttpr_rate = drm_dp_lttpr_max_link_rate(intel_dp->lttpr_common_caps);
178         if (max_lttpr_rate)
179                 max_rate = min(max_rate, max_lttpr_rate);
180
181         for (i = 0; i < ARRAY_SIZE(dp_rates); i++) {
182                 if (dp_rates[i] > max_rate)
183                         break;
184                 intel_dp->sink_rates[i] = dp_rates[i];
185         }
186
187         intel_dp->num_sink_rates = i;
188 }
189
190 /* Get length of rates array potentially limited by max_rate. */
191 static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate)
192 {
193         int i;
194
195         /* Limit results by potentially reduced max rate */
196         for (i = 0; i < len; i++) {
197                 if (rates[len - i - 1] <= max_rate)
198                         return len - i;
199         }
200
201         return 0;
202 }
203
204 /* Get length of common rates array potentially limited by max_rate. */
205 static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp,
206                                           int max_rate)
207 {
208         return intel_dp_rate_limit_len(intel_dp->common_rates,
209                                        intel_dp->num_common_rates, max_rate);
210 }
211
212 /* Theoretical max between source and sink */
213 static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
214 {
215         return intel_dp->common_rates[intel_dp->num_common_rates - 1];
216 }
217
218 /* Theoretical max between source and sink */
219 static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
220 {
221         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
222         int source_max = dig_port->max_lanes;
223         int sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
224         int fia_max = intel_tc_port_fia_max_lane_count(dig_port);
225         int lttpr_max = drm_dp_lttpr_max_lane_count(intel_dp->lttpr_common_caps);
226
227         if (lttpr_max)
228                 sink_max = min(sink_max, lttpr_max);
229
230         return min3(source_max, sink_max, fia_max);
231 }
232
233 int intel_dp_max_lane_count(struct intel_dp *intel_dp)
234 {
235         return intel_dp->max_link_lane_count;
236 }
237
238 int
239 intel_dp_link_required(int pixel_clock, int bpp)
240 {
241         /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */
242         return DIV_ROUND_UP(pixel_clock * bpp, 8);
243 }
244
245 int
246 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
247 {
248         /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the
249          * link rate that is generally expressed in Gbps. Since, 8 bits of data
250          * is transmitted every LS_Clk per lane, there is no need to account for
251          * the channel encoding that is done in the PHY layer here.
252          */
253
254         return max_link_clock * max_lanes;
255 }
256
257 bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp)
258 {
259         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
260         struct intel_encoder *encoder = &intel_dig_port->base;
261         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
262
263         return INTEL_GEN(dev_priv) >= 12 ||
264                 (INTEL_GEN(dev_priv) == 11 &&
265                  encoder->port != PORT_A);
266 }
267
268 static int cnl_max_source_rate(struct intel_dp *intel_dp)
269 {
270         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
271         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
272         enum port port = dig_port->base.port;
273
274         u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
275
276         /* Low voltage SKUs are limited to max of 5.4G */
277         if (voltage == VOLTAGE_INFO_0_85V)
278                 return 540000;
279
280         /* For this SKU 8.1G is supported in all ports */
281         if (IS_CNL_WITH_PORT_F(dev_priv))
282                 return 810000;
283
284         /* For other SKUs, max rate on ports A and D is 5.4G */
285         if (port == PORT_A || port == PORT_D)
286                 return 540000;
287
288         return 810000;
289 }
290
291 static int icl_max_source_rate(struct intel_dp *intel_dp)
292 {
293         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
294         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
295         enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
296
297         if (intel_phy_is_combo(dev_priv, phy) &&
298             !intel_dp_is_edp(intel_dp))
299                 return 540000;
300
301         return 810000;
302 }
303
304 static int ehl_max_source_rate(struct intel_dp *intel_dp)
305 {
306         if (intel_dp_is_edp(intel_dp))
307                 return 540000;
308
309         return 810000;
310 }
311
312 static void
313 intel_dp_set_source_rates(struct intel_dp *intel_dp)
314 {
315         /* The values must be in increasing order */
316         static const int cnl_rates[] = {
317                 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000
318         };
319         static const int bxt_rates[] = {
320                 162000, 216000, 243000, 270000, 324000, 432000, 540000
321         };
322         static const int skl_rates[] = {
323                 162000, 216000, 270000, 324000, 432000, 540000
324         };
325         static const int hsw_rates[] = {
326                 162000, 270000, 540000
327         };
328         static const int g4x_rates[] = {
329                 162000, 270000
330         };
331         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
332         struct intel_encoder *encoder = &dig_port->base;
333         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
334         const int *source_rates;
335         int size, max_rate = 0, vbt_max_rate;
336
337         /* This should only be done once */
338         drm_WARN_ON(&dev_priv->drm,
339                     intel_dp->source_rates || intel_dp->num_source_rates);
340
341         if (INTEL_GEN(dev_priv) >= 10) {
342                 source_rates = cnl_rates;
343                 size = ARRAY_SIZE(cnl_rates);
344                 if (IS_GEN(dev_priv, 10))
345                         max_rate = cnl_max_source_rate(intel_dp);
346                 else if (IS_JSL_EHL(dev_priv))
347                         max_rate = ehl_max_source_rate(intel_dp);
348                 else
349                         max_rate = icl_max_source_rate(intel_dp);
350         } else if (IS_GEN9_LP(dev_priv)) {
351                 source_rates = bxt_rates;
352                 size = ARRAY_SIZE(bxt_rates);
353         } else if (IS_GEN9_BC(dev_priv)) {
354                 source_rates = skl_rates;
355                 size = ARRAY_SIZE(skl_rates);
356         } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
357                    IS_BROADWELL(dev_priv)) {
358                 source_rates = hsw_rates;
359                 size = ARRAY_SIZE(hsw_rates);
360         } else {
361                 source_rates = g4x_rates;
362                 size = ARRAY_SIZE(g4x_rates);
363         }
364
365         vbt_max_rate = intel_bios_dp_max_link_rate(encoder);
366         if (max_rate && vbt_max_rate)
367                 max_rate = min(max_rate, vbt_max_rate);
368         else if (vbt_max_rate)
369                 max_rate = vbt_max_rate;
370
371         if (max_rate)
372                 size = intel_dp_rate_limit_len(source_rates, size, max_rate);
373
374         intel_dp->source_rates = source_rates;
375         intel_dp->num_source_rates = size;
376 }
377
378 static int intersect_rates(const int *source_rates, int source_len,
379                            const int *sink_rates, int sink_len,
380                            int *common_rates)
381 {
382         int i = 0, j = 0, k = 0;
383
384         while (i < source_len && j < sink_len) {
385                 if (source_rates[i] == sink_rates[j]) {
386                         if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
387                                 return k;
388                         common_rates[k] = source_rates[i];
389                         ++k;
390                         ++i;
391                         ++j;
392                 } else if (source_rates[i] < sink_rates[j]) {
393                         ++i;
394                 } else {
395                         ++j;
396                 }
397         }
398         return k;
399 }
400
401 /* return index of rate in rates array, or -1 if not found */
402 static int intel_dp_rate_index(const int *rates, int len, int rate)
403 {
404         int i;
405
406         for (i = 0; i < len; i++)
407                 if (rate == rates[i])
408                         return i;
409
410         return -1;
411 }
412
413 static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
414 {
415         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
416
417         drm_WARN_ON(&i915->drm,
418                     !intel_dp->num_source_rates || !intel_dp->num_sink_rates);
419
420         intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates,
421                                                      intel_dp->num_source_rates,
422                                                      intel_dp->sink_rates,
423                                                      intel_dp->num_sink_rates,
424                                                      intel_dp->common_rates);
425
426         /* Paranoia, there should always be something in common. */
427         if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates == 0)) {
428                 intel_dp->common_rates[0] = 162000;
429                 intel_dp->num_common_rates = 1;
430         }
431 }
432
433 static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
434                                        u8 lane_count)
435 {
436         /*
437          * FIXME: we need to synchronize the current link parameters with
438          * hardware readout. Currently fast link training doesn't work on
439          * boot-up.
440          */
441         if (link_rate == 0 ||
442             link_rate > intel_dp->max_link_rate)
443                 return false;
444
445         if (lane_count == 0 ||
446             lane_count > intel_dp_max_lane_count(intel_dp))
447                 return false;
448
449         return true;
450 }
451
452 static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
453                                                      int link_rate,
454                                                      u8 lane_count)
455 {
456         const struct drm_display_mode *fixed_mode =
457                 intel_dp->attached_connector->panel.fixed_mode;
458         int mode_rate, max_rate;
459
460         mode_rate = intel_dp_link_required(fixed_mode->clock, 18);
461         max_rate = intel_dp_max_data_rate(link_rate, lane_count);
462         if (mode_rate > max_rate)
463                 return false;
464
465         return true;
466 }
467
468 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
469                                             int link_rate, u8 lane_count)
470 {
471         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
472         int index;
473
474         /*
475          * TODO: Enable fallback on MST links once MST link compute can handle
476          * the fallback params.
477          */
478         if (intel_dp->is_mst) {
479                 drm_err(&i915->drm, "Link Training Unsuccessful\n");
480                 return -1;
481         }
482
483         index = intel_dp_rate_index(intel_dp->common_rates,
484                                     intel_dp->num_common_rates,
485                                     link_rate);
486         if (index > 0) {
487                 if (intel_dp_is_edp(intel_dp) &&
488                     !intel_dp_can_link_train_fallback_for_edp(intel_dp,
489                                                               intel_dp->common_rates[index - 1],
490                                                               lane_count)) {
491                         drm_dbg_kms(&i915->drm,
492                                     "Retrying Link training for eDP with same parameters\n");
493                         return 0;
494                 }
495                 intel_dp->max_link_rate = intel_dp->common_rates[index - 1];
496                 intel_dp->max_link_lane_count = lane_count;
497         } else if (lane_count > 1) {
498                 if (intel_dp_is_edp(intel_dp) &&
499                     !intel_dp_can_link_train_fallback_for_edp(intel_dp,
500                                                               intel_dp_max_common_rate(intel_dp),
501                                                               lane_count >> 1)) {
502                         drm_dbg_kms(&i915->drm,
503                                     "Retrying Link training for eDP with same parameters\n");
504                         return 0;
505                 }
506                 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
507                 intel_dp->max_link_lane_count = lane_count >> 1;
508         } else {
509                 drm_err(&i915->drm, "Link Training Unsuccessful\n");
510                 return -1;
511         }
512
513         return 0;
514 }
515
516 u32 intel_dp_mode_to_fec_clock(u32 mode_clock)
517 {
518         return div_u64(mul_u32_u32(mode_clock, 1000000U),
519                        DP_DSC_FEC_OVERHEAD_FACTOR);
520 }
521
522 static int
523 small_joiner_ram_size_bits(struct drm_i915_private *i915)
524 {
525         if (INTEL_GEN(i915) >= 11)
526                 return 7680 * 8;
527         else
528                 return 6144 * 8;
529 }
530
531 static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
532                                        u32 link_clock, u32 lane_count,
533                                        u32 mode_clock, u32 mode_hdisplay,
534                                        bool bigjoiner)
535 {
536         u32 bits_per_pixel, max_bpp_small_joiner_ram;
537         int i;
538
539         /*
540          * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
541          * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP)
542          * for SST -> TimeSlotsPerMTP is 1,
543          * for MST -> TimeSlotsPerMTP has to be calculated
544          */
545         bits_per_pixel = (link_clock * lane_count * 8) /
546                          intel_dp_mode_to_fec_clock(mode_clock);
547         drm_dbg_kms(&i915->drm, "Max link bpp: %u\n", bits_per_pixel);
548
549         /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
550         max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) /
551                 mode_hdisplay;
552
553         if (bigjoiner)
554                 max_bpp_small_joiner_ram *= 2;
555
556         drm_dbg_kms(&i915->drm, "Max small joiner bpp: %u\n",
557                     max_bpp_small_joiner_ram);
558
559         /*
560          * Greatest allowed DSC BPP = MIN (output BPP from available Link BW
561          * check, output bpp from small joiner RAM check)
562          */
563         bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
564
565         if (bigjoiner) {
566                 u32 max_bpp_bigjoiner =
567                         i915->max_cdclk_freq * 48 /
568                         intel_dp_mode_to_fec_clock(mode_clock);
569
570                 DRM_DEBUG_KMS("Max big joiner bpp: %u\n", max_bpp_bigjoiner);
571                 bits_per_pixel = min(bits_per_pixel, max_bpp_bigjoiner);
572         }
573
574         /* Error out if the max bpp is less than smallest allowed valid bpp */
575         if (bits_per_pixel < valid_dsc_bpp[0]) {
576                 drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n",
577                             bits_per_pixel, valid_dsc_bpp[0]);
578                 return 0;
579         }
580
581         /* Find the nearest match in the array of known BPPs from VESA */
582         for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
583                 if (bits_per_pixel < valid_dsc_bpp[i + 1])
584                         break;
585         }
586         bits_per_pixel = valid_dsc_bpp[i];
587
588         /*
589          * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
590          * fractional part is 0
591          */
592         return bits_per_pixel << 4;
593 }
594
595 static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
596                                        int mode_clock, int mode_hdisplay,
597                                        bool bigjoiner)
598 {
599         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
600         u8 min_slice_count, i;
601         int max_slice_width;
602
603         if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
604                 min_slice_count = DIV_ROUND_UP(mode_clock,
605                                                DP_DSC_MAX_ENC_THROUGHPUT_0);
606         else
607                 min_slice_count = DIV_ROUND_UP(mode_clock,
608                                                DP_DSC_MAX_ENC_THROUGHPUT_1);
609
610         max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
611         if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
612                 drm_dbg_kms(&i915->drm,
613                             "Unsupported slice width %d by DP DSC Sink device\n",
614                             max_slice_width);
615                 return 0;
616         }
617         /* Also take into account max slice width */
618         min_slice_count = max_t(u8, min_slice_count,
619                                 DIV_ROUND_UP(mode_hdisplay,
620                                              max_slice_width));
621
622         /* Find the closest match to the valid slice count values */
623         for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
624                 u8 test_slice_count = valid_dsc_slicecount[i] << bigjoiner;
625
626                 if (test_slice_count >
627                     drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, false))
628                         break;
629
630                 /* big joiner needs small joiner to be enabled */
631                 if (bigjoiner && test_slice_count < 4)
632                         continue;
633
634                 if (min_slice_count <= test_slice_count)
635                         return test_slice_count;
636         }
637
638         drm_dbg_kms(&i915->drm, "Unsupported Slice Count %d\n",
639                     min_slice_count);
640         return 0;
641 }
642
643 static enum intel_output_format
644 intel_dp_output_format(struct drm_connector *connector,
645                        const struct drm_display_mode *mode)
646 {
647         struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
648         const struct drm_display_info *info = &connector->display_info;
649
650         if (!connector->ycbcr_420_allowed ||
651             !drm_mode_is_420_only(info, mode))
652                 return INTEL_OUTPUT_FORMAT_RGB;
653
654         if (intel_dp->dfp.rgb_to_ycbcr &&
655             intel_dp->dfp.ycbcr_444_to_420)
656                 return INTEL_OUTPUT_FORMAT_RGB;
657
658         if (intel_dp->dfp.ycbcr_444_to_420)
659                 return INTEL_OUTPUT_FORMAT_YCBCR444;
660         else
661                 return INTEL_OUTPUT_FORMAT_YCBCR420;
662 }
663
664 int intel_dp_min_bpp(enum intel_output_format output_format)
665 {
666         if (output_format == INTEL_OUTPUT_FORMAT_RGB)
667                 return 6 * 3;
668         else
669                 return 8 * 3;
670 }
671
672 static int intel_dp_output_bpp(enum intel_output_format output_format, int bpp)
673 {
674         /*
675          * bpp value was assumed to RGB format. And YCbCr 4:2:0 output
676          * format of the number of bytes per pixel will be half the number
677          * of bytes of RGB pixel.
678          */
679         if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
680                 bpp /= 2;
681
682         return bpp;
683 }
684
685 static int
686 intel_dp_mode_min_output_bpp(struct drm_connector *connector,
687                              const struct drm_display_mode *mode)
688 {
689         enum intel_output_format output_format =
690                 intel_dp_output_format(connector, mode);
691
692         return intel_dp_output_bpp(output_format, intel_dp_min_bpp(output_format));
693 }
694
695 static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv,
696                                   int hdisplay)
697 {
698         /*
699          * Older platforms don't like hdisplay==4096 with DP.
700          *
701          * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline
702          * and frame counter increment), but we don't get vblank interrupts,
703          * and the pipe underruns immediately. The link also doesn't seem
704          * to get trained properly.
705          *
706          * On CHV the vblank interrupts don't seem to disappear but
707          * otherwise the symptoms are similar.
708          *
709          * TODO: confirm the behaviour on HSW+
710          */
711         return hdisplay == 4096 && !HAS_DDI(dev_priv);
712 }
713
714 static enum drm_mode_status
715 intel_dp_mode_valid_downstream(struct intel_connector *connector,
716                                const struct drm_display_mode *mode,
717                                int target_clock)
718 {
719         struct intel_dp *intel_dp = intel_attached_dp(connector);
720         const struct drm_display_info *info = &connector->base.display_info;
721         int tmds_clock;
722
723         /* If PCON supports FRL MODE, check FRL bandwidth constraints */
724         if (intel_dp->dfp.pcon_max_frl_bw) {
725                 int target_bw;
726                 int max_frl_bw;
727                 int bpp = intel_dp_mode_min_output_bpp(&connector->base, mode);
728
729                 target_bw = bpp * target_clock;
730
731                 max_frl_bw = intel_dp->dfp.pcon_max_frl_bw;
732
733                 /* converting bw from Gbps to Kbps*/
734                 max_frl_bw = max_frl_bw * 1000000;
735
736                 if (target_bw > max_frl_bw)
737                         return MODE_CLOCK_HIGH;
738
739                 return MODE_OK;
740         }
741
742         if (intel_dp->dfp.max_dotclock &&
743             target_clock > intel_dp->dfp.max_dotclock)
744                 return MODE_CLOCK_HIGH;
745
746         /* Assume 8bpc for the DP++/HDMI/DVI TMDS clock check */
747         tmds_clock = target_clock;
748         if (drm_mode_is_420_only(info, mode))
749                 tmds_clock /= 2;
750
751         if (intel_dp->dfp.min_tmds_clock &&
752             tmds_clock < intel_dp->dfp.min_tmds_clock)
753                 return MODE_CLOCK_LOW;
754         if (intel_dp->dfp.max_tmds_clock &&
755             tmds_clock > intel_dp->dfp.max_tmds_clock)
756                 return MODE_CLOCK_HIGH;
757
758         return MODE_OK;
759 }
760
761 static enum drm_mode_status
762 intel_dp_mode_valid(struct drm_connector *connector,
763                     struct drm_display_mode *mode)
764 {
765         struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
766         struct intel_connector *intel_connector = to_intel_connector(connector);
767         struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
768         struct drm_i915_private *dev_priv = to_i915(connector->dev);
769         int target_clock = mode->clock;
770         int max_rate, mode_rate, max_lanes, max_link_clock;
771         int max_dotclk = dev_priv->max_dotclk_freq;
772         u16 dsc_max_output_bpp = 0;
773         u8 dsc_slice_count = 0;
774         enum drm_mode_status status;
775         bool dsc = false, bigjoiner = false;
776
777         if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
778                 return MODE_NO_DBLESCAN;
779
780         if (mode->flags & DRM_MODE_FLAG_DBLCLK)
781                 return MODE_H_ILLEGAL;
782
783         if (intel_dp_is_edp(intel_dp) && fixed_mode) {
784                 if (mode->hdisplay > fixed_mode->hdisplay)
785                         return MODE_PANEL;
786
787                 if (mode->vdisplay > fixed_mode->vdisplay)
788                         return MODE_PANEL;
789
790                 target_clock = fixed_mode->clock;
791         }
792
793         if (mode->clock < 10000)
794                 return MODE_CLOCK_LOW;
795
796         if ((target_clock > max_dotclk || mode->hdisplay > 5120) &&
797             intel_dp_can_bigjoiner(intel_dp)) {
798                 bigjoiner = true;
799                 max_dotclk *= 2;
800         }
801         if (target_clock > max_dotclk)
802                 return MODE_CLOCK_HIGH;
803
804         max_link_clock = intel_dp_max_link_rate(intel_dp);
805         max_lanes = intel_dp_max_lane_count(intel_dp);
806
807         max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
808         mode_rate = intel_dp_link_required(target_clock,
809                                            intel_dp_mode_min_output_bpp(connector, mode));
810
811         if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay))
812                 return MODE_H_ILLEGAL;
813
814         /*
815          * Output bpp is stored in 6.4 format so right shift by 4 to get the
816          * integer value since we support only integer values of bpp.
817          */
818         if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) &&
819             drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
820                 if (intel_dp_is_edp(intel_dp)) {
821                         dsc_max_output_bpp =
822                                 drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4;
823                         dsc_slice_count =
824                                 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
825                                                                 true);
826                 } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) {
827                         dsc_max_output_bpp =
828                                 intel_dp_dsc_get_output_bpp(dev_priv,
829                                                             max_link_clock,
830                                                             max_lanes,
831                                                             target_clock,
832                                                             mode->hdisplay,
833                                                             bigjoiner) >> 4;
834                         dsc_slice_count =
835                                 intel_dp_dsc_get_slice_count(intel_dp,
836                                                              target_clock,
837                                                              mode->hdisplay,
838                                                              bigjoiner);
839                 }
840
841                 dsc = dsc_max_output_bpp && dsc_slice_count;
842         }
843
844         /* big joiner configuration needs DSC */
845         if (bigjoiner && !dsc)
846                 return MODE_CLOCK_HIGH;
847
848         if (mode_rate > max_rate && !dsc)
849                 return MODE_CLOCK_HIGH;
850
851         status = intel_dp_mode_valid_downstream(intel_connector,
852                                                 mode, target_clock);
853         if (status != MODE_OK)
854                 return status;
855
856         return intel_mode_valid_max_plane_size(dev_priv, mode, bigjoiner);
857 }
858
859 u32 intel_dp_pack_aux(const u8 *src, int src_bytes)
860 {
861         int i;
862         u32 v = 0;
863
864         if (src_bytes > 4)
865                 src_bytes = 4;
866         for (i = 0; i < src_bytes; i++)
867                 v |= ((u32)src[i]) << ((3 - i) * 8);
868         return v;
869 }
870
871 static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes)
872 {
873         int i;
874         if (dst_bytes > 4)
875                 dst_bytes = 4;
876         for (i = 0; i < dst_bytes; i++)
877                 dst[i] = src >> ((3-i) * 8);
878 }
879
880 static void
881 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp);
882 static void
883 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
884                                               bool force_disable_vdd);
885 static void
886 intel_dp_pps_init(struct intel_dp *intel_dp);
887
888 static intel_wakeref_t
889 pps_lock(struct intel_dp *intel_dp)
890 {
891         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
892         intel_wakeref_t wakeref;
893
894         /*
895          * See intel_power_sequencer_reset() why we need
896          * a power domain reference here.
897          */
898         wakeref = intel_display_power_get(dev_priv,
899                                           intel_aux_power_domain(dp_to_dig_port(intel_dp)));
900
901         mutex_lock(&dev_priv->pps_mutex);
902
903         return wakeref;
904 }
905
906 static intel_wakeref_t
907 pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref)
908 {
909         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
910
911         mutex_unlock(&dev_priv->pps_mutex);
912         intel_display_power_put(dev_priv,
913                                 intel_aux_power_domain(dp_to_dig_port(intel_dp)),
914                                 wakeref);
915         return 0;
916 }
917
918 #define with_pps_lock(dp, wf) \
919         for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf)))
920
921 static void
922 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
923 {
924         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
925         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
926         enum pipe pipe = intel_dp->pps_pipe;
927         bool pll_enabled, release_cl_override = false;
928         enum dpio_phy phy = DPIO_PHY(pipe);
929         enum dpio_channel ch = vlv_pipe_to_channel(pipe);
930         u32 DP;
931
932         if (drm_WARN(&dev_priv->drm,
933                      intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN,
934                      "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
935                      pipe_name(pipe), dig_port->base.base.base.id,
936                      dig_port->base.base.name))
937                 return;
938
939         drm_dbg_kms(&dev_priv->drm,
940                     "kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
941                     pipe_name(pipe), dig_port->base.base.base.id,
942                     dig_port->base.base.name);
943
944         /* Preserve the BIOS-computed detected bit. This is
945          * supposed to be read-only.
946          */
947         DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
948         DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
949         DP |= DP_PORT_WIDTH(1);
950         DP |= DP_LINK_TRAIN_PAT_1;
951
952         if (IS_CHERRYVIEW(dev_priv))
953                 DP |= DP_PIPE_SEL_CHV(pipe);
954         else
955                 DP |= DP_PIPE_SEL(pipe);
956
957         pll_enabled = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE;
958
959         /*
960          * The DPLL for the pipe must be enabled for this to work.
961          * So enable temporarily it if it's not already enabled.
962          */
963         if (!pll_enabled) {
964                 release_cl_override = IS_CHERRYVIEW(dev_priv) &&
965                         !chv_phy_powergate_ch(dev_priv, phy, ch, true);
966
967                 if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
968                                      &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
969                         drm_err(&dev_priv->drm,
970                                 "Failed to force on pll for pipe %c!\n",
971                                 pipe_name(pipe));
972                         return;
973                 }
974         }
975
976         /*
977          * Similar magic as in intel_dp_enable_port().
978          * We _must_ do this port enable + disable trick
979          * to make this power sequencer lock onto the port.
980          * Otherwise even VDD force bit won't work.
981          */
982         intel_de_write(dev_priv, intel_dp->output_reg, DP);
983         intel_de_posting_read(dev_priv, intel_dp->output_reg);
984
985         intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN);
986         intel_de_posting_read(dev_priv, intel_dp->output_reg);
987
988         intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN);
989         intel_de_posting_read(dev_priv, intel_dp->output_reg);
990
991         if (!pll_enabled) {
992                 vlv_force_pll_off(dev_priv, pipe);
993
994                 if (release_cl_override)
995                         chv_phy_powergate_ch(dev_priv, phy, ch, false);
996         }
997 }
998
999 static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
1000 {
1001         struct intel_encoder *encoder;
1002         unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
1003
1004         /*
1005          * We don't have power sequencer currently.
1006          * Pick one that's not used by other ports.
1007          */
1008         for_each_intel_dp(&dev_priv->drm, encoder) {
1009                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1010
1011                 if (encoder->type == INTEL_OUTPUT_EDP) {
1012                         drm_WARN_ON(&dev_priv->drm,
1013                                     intel_dp->active_pipe != INVALID_PIPE &&
1014                                     intel_dp->active_pipe !=
1015                                     intel_dp->pps_pipe);
1016
1017                         if (intel_dp->pps_pipe != INVALID_PIPE)
1018                                 pipes &= ~(1 << intel_dp->pps_pipe);
1019                 } else {
1020                         drm_WARN_ON(&dev_priv->drm,
1021                                     intel_dp->pps_pipe != INVALID_PIPE);
1022
1023                         if (intel_dp->active_pipe != INVALID_PIPE)
1024                                 pipes &= ~(1 << intel_dp->active_pipe);
1025                 }
1026         }
1027
1028         if (pipes == 0)
1029                 return INVALID_PIPE;
1030
1031         return ffs(pipes) - 1;
1032 }
1033
1034 static enum pipe
1035 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
1036 {
1037         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1038         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1039         enum pipe pipe;
1040
1041         lockdep_assert_held(&dev_priv->pps_mutex);
1042
1043         /* We should never land here with regular DP ports */
1044         drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
1045
1046         drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE &&
1047                     intel_dp->active_pipe != intel_dp->pps_pipe);
1048
1049         if (intel_dp->pps_pipe != INVALID_PIPE)
1050                 return intel_dp->pps_pipe;
1051
1052         pipe = vlv_find_free_pps(dev_priv);
1053
1054         /*
1055          * Didn't find one. This should not happen since there
1056          * are two power sequencers and up to two eDP ports.
1057          */
1058         if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE))
1059                 pipe = PIPE_A;
1060
1061         vlv_steal_power_sequencer(dev_priv, pipe);
1062         intel_dp->pps_pipe = pipe;
1063
1064         drm_dbg_kms(&dev_priv->drm,
1065                     "picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
1066                     pipe_name(intel_dp->pps_pipe),
1067                     dig_port->base.base.base.id,
1068                     dig_port->base.base.name);
1069
1070         /* init power sequencer on this pipe and port */
1071         intel_dp_init_panel_power_sequencer(intel_dp);
1072         intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
1073
1074         /*
1075          * Even vdd force doesn't work until we've made
1076          * the power sequencer lock in on the port.
1077          */
1078         vlv_power_sequencer_kick(intel_dp);
1079
1080         return intel_dp->pps_pipe;
1081 }
1082
1083 static int
1084 bxt_power_sequencer_idx(struct intel_dp *intel_dp)
1085 {
1086         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1087         int backlight_controller = dev_priv->vbt.backlight.controller;
1088
1089         lockdep_assert_held(&dev_priv->pps_mutex);
1090
1091         /* We should never land here with regular DP ports */
1092         drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
1093
1094         if (!intel_dp->pps_reset)
1095                 return backlight_controller;
1096
1097         intel_dp->pps_reset = false;
1098
1099         /*
1100          * Only the HW needs to be reprogrammed, the SW state is fixed and
1101          * has been setup during connector init.
1102          */
1103         intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
1104
1105         return backlight_controller;
1106 }
1107
1108 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
1109                                enum pipe pipe);
1110
1111 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
1112                                enum pipe pipe)
1113 {
1114         return intel_de_read(dev_priv, PP_STATUS(pipe)) & PP_ON;
1115 }
1116
1117 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
1118                                 enum pipe pipe)
1119 {
1120         return intel_de_read(dev_priv, PP_CONTROL(pipe)) & EDP_FORCE_VDD;
1121 }
1122
1123 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
1124                          enum pipe pipe)
1125 {
1126         return true;
1127 }
1128
1129 static enum pipe
1130 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
1131                      enum port port,
1132                      vlv_pipe_check pipe_check)
1133 {
1134         enum pipe pipe;
1135
1136         for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
1137                 u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(pipe)) &
1138                         PANEL_PORT_SELECT_MASK;
1139
1140                 if (port_sel != PANEL_PORT_SELECT_VLV(port))
1141                         continue;
1142
1143                 if (!pipe_check(dev_priv, pipe))
1144                         continue;
1145
1146                 return pipe;
1147         }
1148
1149         return INVALID_PIPE;
1150 }
1151
1152 static void
1153 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
1154 {
1155         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1156         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1157         enum port port = dig_port->base.port;
1158
1159         lockdep_assert_held(&dev_priv->pps_mutex);
1160
1161         /* try to find a pipe with this port selected */
1162         /* first pick one where the panel is on */
1163         intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
1164                                                   vlv_pipe_has_pp_on);
1165         /* didn't find one? pick one where vdd is on */
1166         if (intel_dp->pps_pipe == INVALID_PIPE)
1167                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
1168                                                           vlv_pipe_has_vdd_on);
1169         /* didn't find one? pick one with just the correct port */
1170         if (intel_dp->pps_pipe == INVALID_PIPE)
1171                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
1172                                                           vlv_pipe_any);
1173
1174         /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
1175         if (intel_dp->pps_pipe == INVALID_PIPE) {
1176                 drm_dbg_kms(&dev_priv->drm,
1177                             "no initial power sequencer for [ENCODER:%d:%s]\n",
1178                             dig_port->base.base.base.id,
1179                             dig_port->base.base.name);
1180                 return;
1181         }
1182
1183         drm_dbg_kms(&dev_priv->drm,
1184                     "initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
1185                     dig_port->base.base.base.id,
1186                     dig_port->base.base.name,
1187                     pipe_name(intel_dp->pps_pipe));
1188
1189         intel_dp_init_panel_power_sequencer(intel_dp);
1190         intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
1191 }
1192
1193 void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
1194 {
1195         struct intel_encoder *encoder;
1196
1197         if (drm_WARN_ON(&dev_priv->drm,
1198                         !(IS_VALLEYVIEW(dev_priv) ||
1199                           IS_CHERRYVIEW(dev_priv) ||
1200                           IS_GEN9_LP(dev_priv))))
1201                 return;
1202
1203         /*
1204          * We can't grab pps_mutex here due to deadlock with power_domain
1205          * mutex when power_domain functions are called while holding pps_mutex.
1206          * That also means that in order to use pps_pipe the code needs to
1207          * hold both a power domain reference and pps_mutex, and the power domain
1208          * reference get/put must be done while _not_ holding pps_mutex.
1209          * pps_{lock,unlock}() do these steps in the correct order, so one
1210          * should use them always.
1211          */
1212
1213         for_each_intel_dp(&dev_priv->drm, encoder) {
1214                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1215
1216                 drm_WARN_ON(&dev_priv->drm,
1217                             intel_dp->active_pipe != INVALID_PIPE);
1218
1219                 if (encoder->type != INTEL_OUTPUT_EDP)
1220                         continue;
1221
1222                 if (IS_GEN9_LP(dev_priv))
1223                         intel_dp->pps_reset = true;
1224                 else
1225                         intel_dp->pps_pipe = INVALID_PIPE;
1226         }
1227 }
1228
1229 struct pps_registers {
1230         i915_reg_t pp_ctrl;
1231         i915_reg_t pp_stat;
1232         i915_reg_t pp_on;
1233         i915_reg_t pp_off;
1234         i915_reg_t pp_div;
1235 };
1236
1237 static void intel_pps_get_registers(struct intel_dp *intel_dp,
1238                                     struct pps_registers *regs)
1239 {
1240         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1241         int pps_idx = 0;
1242
1243         memset(regs, 0, sizeof(*regs));
1244
1245         if (IS_GEN9_LP(dev_priv))
1246                 pps_idx = bxt_power_sequencer_idx(intel_dp);
1247         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1248                 pps_idx = vlv_power_sequencer_pipe(intel_dp);
1249
1250         regs->pp_ctrl = PP_CONTROL(pps_idx);
1251         regs->pp_stat = PP_STATUS(pps_idx);
1252         regs->pp_on = PP_ON_DELAYS(pps_idx);
1253         regs->pp_off = PP_OFF_DELAYS(pps_idx);
1254
1255         /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
1256         if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
1257                 regs->pp_div = INVALID_MMIO_REG;
1258         else
1259                 regs->pp_div = PP_DIVISOR(pps_idx);
1260 }
1261
1262 static i915_reg_t
1263 _pp_ctrl_reg(struct intel_dp *intel_dp)
1264 {
1265         struct pps_registers regs;
1266
1267         intel_pps_get_registers(intel_dp, &regs);
1268
1269         return regs.pp_ctrl;
1270 }
1271
1272 static i915_reg_t
1273 _pp_stat_reg(struct intel_dp *intel_dp)
1274 {
1275         struct pps_registers regs;
1276
1277         intel_pps_get_registers(intel_dp, &regs);
1278
1279         return regs.pp_stat;
1280 }
1281
1282 static bool edp_have_panel_power(struct intel_dp *intel_dp)
1283 {
1284         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1285
1286         lockdep_assert_held(&dev_priv->pps_mutex);
1287
1288         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1289             intel_dp->pps_pipe == INVALID_PIPE)
1290                 return false;
1291
1292         return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0;
1293 }
1294
1295 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
1296 {
1297         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1298
1299         lockdep_assert_held(&dev_priv->pps_mutex);
1300
1301         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1302             intel_dp->pps_pipe == INVALID_PIPE)
1303                 return false;
1304
1305         return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
1306 }
1307
1308 static void
1309 intel_dp_check_edp(struct intel_dp *intel_dp)
1310 {
1311         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1312
1313         if (!intel_dp_is_edp(intel_dp))
1314                 return;
1315
1316         if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
1317                 drm_WARN(&dev_priv->drm, 1,
1318                          "eDP powered off while attempting aux channel communication.\n");
1319                 drm_dbg_kms(&dev_priv->drm, "Status 0x%08x Control 0x%08x\n",
1320                             intel_de_read(dev_priv, _pp_stat_reg(intel_dp)),
1321                             intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)));
1322         }
1323 }
1324
1325 static u32
1326 intel_dp_aux_wait_done(struct intel_dp *intel_dp)
1327 {
1328         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1329         i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
1330         const unsigned int timeout_ms = 10;
1331         u32 status;
1332         bool done;
1333
1334 #define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1335         done = wait_event_timeout(i915->gmbus_wait_queue, C,
1336                                   msecs_to_jiffies_timeout(timeout_ms));
1337
1338         /* just trace the final value */
1339         trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
1340
1341         if (!done)
1342                 drm_err(&i915->drm,
1343                         "%s: did not complete or timeout within %ums (status 0x%08x)\n",
1344                         intel_dp->aux.name, timeout_ms, status);
1345 #undef C
1346
1347         return status;
1348 }
1349
1350 static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1351 {
1352         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1353
1354         if (index)
1355                 return 0;
1356
1357         /*
1358          * The clock divider is based off the hrawclk, and would like to run at
1359          * 2MHz.  So, take the hrawclk value and divide by 2000 and use that
1360          */
1361         return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 2000);
1362 }
1363
1364 static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1365 {
1366         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1367         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1368         u32 freq;
1369
1370         if (index)
1371                 return 0;
1372
1373         /*
1374          * The clock divider is based off the cdclk or PCH rawclk, and would
1375          * like to run at 2MHz.  So, take the cdclk or PCH rawclk value and
1376          * divide by 2000 and use that
1377          */
1378         if (dig_port->aux_ch == AUX_CH_A)
1379                 freq = dev_priv->cdclk.hw.cdclk;
1380         else
1381                 freq = RUNTIME_INFO(dev_priv)->rawclk_freq;
1382         return DIV_ROUND_CLOSEST(freq, 2000);
1383 }
1384
1385 static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1386 {
1387         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1388         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1389
1390         if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
1391                 /* Workaround for non-ULT HSW */
1392                 switch (index) {
1393                 case 0: return 63;
1394                 case 1: return 72;
1395                 default: return 0;
1396                 }
1397         }
1398
1399         return ilk_get_aux_clock_divider(intel_dp, index);
1400 }
1401
1402 static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1403 {
1404         /*
1405          * SKL doesn't need us to program the AUX clock divider (Hardware will
1406          * derive the clock from CDCLK automatically). We still implement the
1407          * get_aux_clock_divider vfunc to plug-in into the existing code.
1408          */
1409         return index ? 0 : 1;
1410 }
1411
1412 static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
1413                                 int send_bytes,
1414                                 u32 aux_clock_divider)
1415 {
1416         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1417         struct drm_i915_private *dev_priv =
1418                         to_i915(dig_port->base.base.dev);
1419         u32 precharge, timeout;
1420
1421         if (IS_GEN(dev_priv, 6))
1422                 precharge = 3;
1423         else
1424                 precharge = 5;
1425
1426         if (IS_BROADWELL(dev_priv))
1427                 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
1428         else
1429                 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
1430
1431         return DP_AUX_CH_CTL_SEND_BUSY |
1432                DP_AUX_CH_CTL_DONE |
1433                DP_AUX_CH_CTL_INTERRUPT |
1434                DP_AUX_CH_CTL_TIME_OUT_ERROR |
1435                timeout |
1436                DP_AUX_CH_CTL_RECEIVE_ERROR |
1437                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1438                (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
1439                (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
1440 }
1441
1442 static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
1443                                 int send_bytes,
1444                                 u32 unused)
1445 {
1446         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1447         struct drm_i915_private *i915 =
1448                         to_i915(dig_port->base.base.dev);
1449         enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
1450         u32 ret;
1451
1452         ret = DP_AUX_CH_CTL_SEND_BUSY |
1453               DP_AUX_CH_CTL_DONE |
1454               DP_AUX_CH_CTL_INTERRUPT |
1455               DP_AUX_CH_CTL_TIME_OUT_ERROR |
1456               DP_AUX_CH_CTL_TIME_OUT_MAX |
1457               DP_AUX_CH_CTL_RECEIVE_ERROR |
1458               (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1459               DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
1460               DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
1461
1462         if (intel_phy_is_tc(i915, phy) &&
1463             dig_port->tc_mode == TC_PORT_TBT_ALT)
1464                 ret |= DP_AUX_CH_CTL_TBT_IO;
1465
1466         return ret;
1467 }
1468
1469 static int
1470 intel_dp_aux_xfer(struct intel_dp *intel_dp,
1471                   const u8 *send, int send_bytes,
1472                   u8 *recv, int recv_size,
1473                   u32 aux_send_ctl_flags)
1474 {
1475         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1476         struct drm_i915_private *i915 =
1477                         to_i915(dig_port->base.base.dev);
1478         struct intel_uncore *uncore = &i915->uncore;
1479         enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
1480         bool is_tc_port = intel_phy_is_tc(i915, phy);
1481         i915_reg_t ch_ctl, ch_data[5];
1482         u32 aux_clock_divider;
1483         enum intel_display_power_domain aux_domain;
1484         intel_wakeref_t aux_wakeref;
1485         intel_wakeref_t pps_wakeref;
1486         int i, ret, recv_bytes;
1487         int try, clock = 0;
1488         u32 status;
1489         bool vdd;
1490
1491         ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
1492         for (i = 0; i < ARRAY_SIZE(ch_data); i++)
1493                 ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
1494
1495         if (is_tc_port)
1496                 intel_tc_port_lock(dig_port);
1497
1498         aux_domain = intel_aux_power_domain(dig_port);
1499
1500         aux_wakeref = intel_display_power_get(i915, aux_domain);
1501         pps_wakeref = pps_lock(intel_dp);
1502
1503         /*
1504          * We will be called with VDD already enabled for dpcd/edid/oui reads.
1505          * In such cases we want to leave VDD enabled and it's up to upper layers
1506          * to turn it off. But for eg. i2c-dev access we need to turn it on/off
1507          * ourselves.
1508          */
1509         vdd = edp_panel_vdd_on(intel_dp);
1510
1511         /* dp aux is extremely sensitive to irq latency, hence request the
1512          * lowest possible wakeup latency and so prevent the cpu from going into
1513          * deep sleep states.
1514          */
1515         cpu_latency_qos_update_request(&intel_dp->pm_qos, 0);
1516
1517         intel_dp_check_edp(intel_dp);
1518
1519         /* Try to wait for any previous AUX channel activity */
1520         for (try = 0; try < 3; try++) {
1521                 status = intel_uncore_read_notrace(uncore, ch_ctl);
1522                 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1523                         break;
1524                 msleep(1);
1525         }
1526         /* just trace the final value */
1527         trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
1528
1529         if (try == 3) {
1530                 const u32 status = intel_uncore_read(uncore, ch_ctl);
1531
1532                 if (status != intel_dp->aux_busy_last_status) {
1533                         drm_WARN(&i915->drm, 1,
1534                                  "%s: not started (status 0x%08x)\n",
1535                                  intel_dp->aux.name, status);
1536                         intel_dp->aux_busy_last_status = status;
1537                 }
1538
1539                 ret = -EBUSY;
1540                 goto out;
1541         }
1542
1543         /* Only 5 data registers! */
1544         if (drm_WARN_ON(&i915->drm, send_bytes > 20 || recv_size > 20)) {
1545                 ret = -E2BIG;
1546                 goto out;
1547         }
1548
1549         while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
1550                 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
1551                                                           send_bytes,
1552                                                           aux_clock_divider);
1553
1554                 send_ctl |= aux_send_ctl_flags;
1555
1556                 /* Must try at least 3 times according to DP spec */
1557                 for (try = 0; try < 5; try++) {
1558                         /* Load the send data into the aux channel data registers */
1559                         for (i = 0; i < send_bytes; i += 4)
1560                                 intel_uncore_write(uncore,
1561                                                    ch_data[i >> 2],
1562                                                    intel_dp_pack_aux(send + i,
1563                                                                      send_bytes - i));
1564
1565                         /* Send the command and wait for it to complete */
1566                         intel_uncore_write(uncore, ch_ctl, send_ctl);
1567
1568                         status = intel_dp_aux_wait_done(intel_dp);
1569
1570                         /* Clear done status and any errors */
1571                         intel_uncore_write(uncore,
1572                                            ch_ctl,
1573                                            status |
1574                                            DP_AUX_CH_CTL_DONE |
1575                                            DP_AUX_CH_CTL_TIME_OUT_ERROR |
1576                                            DP_AUX_CH_CTL_RECEIVE_ERROR);
1577
1578                         /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
1579                          *   400us delay required for errors and timeouts
1580                          *   Timeout errors from the HW already meet this
1581                          *   requirement so skip to next iteration
1582                          */
1583                         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
1584                                 continue;
1585
1586                         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1587                                 usleep_range(400, 500);
1588                                 continue;
1589                         }
1590                         if (status & DP_AUX_CH_CTL_DONE)
1591                                 goto done;
1592                 }
1593         }
1594
1595         if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1596                 drm_err(&i915->drm, "%s: not done (status 0x%08x)\n",
1597                         intel_dp->aux.name, status);
1598                 ret = -EBUSY;
1599                 goto out;
1600         }
1601
1602 done:
1603         /* Check for timeout or receive error.
1604          * Timeouts occur when the sink is not connected
1605          */
1606         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1607                 drm_err(&i915->drm, "%s: receive error (status 0x%08x)\n",
1608                         intel_dp->aux.name, status);
1609                 ret = -EIO;
1610                 goto out;
1611         }
1612
1613         /* Timeouts occur when the device isn't connected, so they're
1614          * "normal" -- don't fill the kernel log with these */
1615         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
1616                 drm_dbg_kms(&i915->drm, "%s: timeout (status 0x%08x)\n",
1617                             intel_dp->aux.name, status);
1618                 ret = -ETIMEDOUT;
1619                 goto out;
1620         }
1621
1622         /* Unload any bytes sent back from the other side */
1623         recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
1624                       DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
1625
1626         /*
1627          * By BSpec: "Message sizes of 0 or >20 are not allowed."
1628          * We have no idea of what happened so we return -EBUSY so
1629          * drm layer takes care for the necessary retries.
1630          */
1631         if (recv_bytes == 0 || recv_bytes > 20) {
1632                 drm_dbg_kms(&i915->drm,
1633                             "%s: Forbidden recv_bytes = %d on aux transaction\n",
1634                             intel_dp->aux.name, recv_bytes);
1635                 ret = -EBUSY;
1636                 goto out;
1637         }
1638
1639         if (recv_bytes > recv_size)
1640                 recv_bytes = recv_size;
1641
1642         for (i = 0; i < recv_bytes; i += 4)
1643                 intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]),
1644                                     recv + i, recv_bytes - i);
1645
1646         ret = recv_bytes;
1647 out:
1648         cpu_latency_qos_update_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
1649
1650         if (vdd)
1651                 edp_panel_vdd_off(intel_dp, false);
1652
1653         pps_unlock(intel_dp, pps_wakeref);
1654         intel_display_power_put_async(i915, aux_domain, aux_wakeref);
1655
1656         if (is_tc_port)
1657                 intel_tc_port_unlock(dig_port);
1658
1659         return ret;
1660 }
1661
1662 #define BARE_ADDRESS_SIZE       3
1663 #define HEADER_SIZE             (BARE_ADDRESS_SIZE + 1)
1664
1665 static void
1666 intel_dp_aux_header(u8 txbuf[HEADER_SIZE],
1667                     const struct drm_dp_aux_msg *msg)
1668 {
1669         txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf);
1670         txbuf[1] = (msg->address >> 8) & 0xff;
1671         txbuf[2] = msg->address & 0xff;
1672         txbuf[3] = msg->size - 1;
1673 }
1674
1675 static u32 intel_dp_aux_xfer_flags(const struct drm_dp_aux_msg *msg)
1676 {
1677         /*
1678          * If we're trying to send the HDCP Aksv, we need to set a the Aksv
1679          * select bit to inform the hardware to send the Aksv after our header
1680          * since we can't access that data from software.
1681          */
1682         if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_WRITE &&
1683             msg->address == DP_AUX_HDCP_AKSV)
1684                 return DP_AUX_CH_CTL_AUX_AKSV_SELECT;
1685
1686         return 0;
1687 }
1688
1689 static ssize_t
1690 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
1691 {
1692         struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
1693         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1694         u8 txbuf[20], rxbuf[20];
1695         size_t txsize, rxsize;
1696         u32 flags = intel_dp_aux_xfer_flags(msg);
1697         int ret;
1698
1699         intel_dp_aux_header(txbuf, msg);
1700
1701         switch (msg->request & ~DP_AUX_I2C_MOT) {
1702         case DP_AUX_NATIVE_WRITE:
1703         case DP_AUX_I2C_WRITE:
1704         case DP_AUX_I2C_WRITE_STATUS_UPDATE:
1705                 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
1706                 rxsize = 2; /* 0 or 1 data bytes */
1707
1708                 if (drm_WARN_ON(&i915->drm, txsize > 20))
1709                         return -E2BIG;
1710
1711                 drm_WARN_ON(&i915->drm, !msg->buffer != !msg->size);
1712
1713                 if (msg->buffer)
1714                         memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
1715
1716                 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
1717                                         rxbuf, rxsize, flags);
1718                 if (ret > 0) {
1719                         msg->reply = rxbuf[0] >> 4;
1720
1721                         if (ret > 1) {
1722                                 /* Number of bytes written in a short write. */
1723                                 ret = clamp_t(int, rxbuf[1], 0, msg->size);
1724                         } else {
1725                                 /* Return payload size. */
1726                                 ret = msg->size;
1727                         }
1728                 }
1729                 break;
1730
1731         case DP_AUX_NATIVE_READ:
1732         case DP_AUX_I2C_READ:
1733                 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1734                 rxsize = msg->size + 1;
1735
1736                 if (drm_WARN_ON(&i915->drm, rxsize > 20))
1737                         return -E2BIG;
1738
1739                 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
1740                                         rxbuf, rxsize, flags);
1741                 if (ret > 0) {
1742                         msg->reply = rxbuf[0] >> 4;
1743                         /*
1744                          * Assume happy day, and copy the data. The caller is
1745                          * expected to check msg->reply before touching it.
1746                          *
1747                          * Return payload size.
1748                          */
1749                         ret--;
1750                         memcpy(msg->buffer, rxbuf + 1, ret);
1751                 }
1752                 break;
1753
1754         default:
1755                 ret = -EINVAL;
1756                 break;
1757         }
1758
1759         return ret;
1760 }
1761
1762
1763 static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
1764 {
1765         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1766         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1767         enum aux_ch aux_ch = dig_port->aux_ch;
1768
1769         switch (aux_ch) {
1770         case AUX_CH_B:
1771         case AUX_CH_C:
1772         case AUX_CH_D:
1773                 return DP_AUX_CH_CTL(aux_ch);
1774         default:
1775                 MISSING_CASE(aux_ch);
1776                 return DP_AUX_CH_CTL(AUX_CH_B);
1777         }
1778 }
1779
1780 static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
1781 {
1782         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1783         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1784         enum aux_ch aux_ch = dig_port->aux_ch;
1785
1786         switch (aux_ch) {
1787         case AUX_CH_B:
1788         case AUX_CH_C:
1789         case AUX_CH_D:
1790                 return DP_AUX_CH_DATA(aux_ch, index);
1791         default:
1792                 MISSING_CASE(aux_ch);
1793                 return DP_AUX_CH_DATA(AUX_CH_B, index);
1794         }
1795 }
1796
1797 static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
1798 {
1799         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1800         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1801         enum aux_ch aux_ch = dig_port->aux_ch;
1802
1803         switch (aux_ch) {
1804         case AUX_CH_A:
1805                 return DP_AUX_CH_CTL(aux_ch);
1806         case AUX_CH_B:
1807         case AUX_CH_C:
1808         case AUX_CH_D:
1809                 return PCH_DP_AUX_CH_CTL(aux_ch);
1810         default:
1811                 MISSING_CASE(aux_ch);
1812                 return DP_AUX_CH_CTL(AUX_CH_A);
1813         }
1814 }
1815
1816 static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
1817 {
1818         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1819         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1820         enum aux_ch aux_ch = dig_port->aux_ch;
1821
1822         switch (aux_ch) {
1823         case AUX_CH_A:
1824                 return DP_AUX_CH_DATA(aux_ch, index);
1825         case AUX_CH_B:
1826         case AUX_CH_C:
1827         case AUX_CH_D:
1828                 return PCH_DP_AUX_CH_DATA(aux_ch, index);
1829         default:
1830                 MISSING_CASE(aux_ch);
1831                 return DP_AUX_CH_DATA(AUX_CH_A, index);
1832         }
1833 }
1834
1835 static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
1836 {
1837         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1838         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1839         enum aux_ch aux_ch = dig_port->aux_ch;
1840
1841         switch (aux_ch) {
1842         case AUX_CH_A:
1843         case AUX_CH_B:
1844         case AUX_CH_C:
1845         case AUX_CH_D:
1846         case AUX_CH_E:
1847         case AUX_CH_F:
1848                 return DP_AUX_CH_CTL(aux_ch);
1849         default:
1850                 MISSING_CASE(aux_ch);
1851                 return DP_AUX_CH_CTL(AUX_CH_A);
1852         }
1853 }
1854
1855 static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
1856 {
1857         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1858         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1859         enum aux_ch aux_ch = dig_port->aux_ch;
1860
1861         switch (aux_ch) {
1862         case AUX_CH_A:
1863         case AUX_CH_B:
1864         case AUX_CH_C:
1865         case AUX_CH_D:
1866         case AUX_CH_E:
1867         case AUX_CH_F:
1868                 return DP_AUX_CH_DATA(aux_ch, index);
1869         default:
1870                 MISSING_CASE(aux_ch);
1871                 return DP_AUX_CH_DATA(AUX_CH_A, index);
1872         }
1873 }
1874
1875 static i915_reg_t tgl_aux_ctl_reg(struct intel_dp *intel_dp)
1876 {
1877         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1878         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1879         enum aux_ch aux_ch = dig_port->aux_ch;
1880
1881         switch (aux_ch) {
1882         case AUX_CH_A:
1883         case AUX_CH_B:
1884         case AUX_CH_C:
1885         case AUX_CH_USBC1:
1886         case AUX_CH_USBC2:
1887         case AUX_CH_USBC3:
1888         case AUX_CH_USBC4:
1889         case AUX_CH_USBC5:
1890         case AUX_CH_USBC6:
1891                 return DP_AUX_CH_CTL(aux_ch);
1892         default:
1893                 MISSING_CASE(aux_ch);
1894                 return DP_AUX_CH_CTL(AUX_CH_A);
1895         }
1896 }
1897
1898 static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index)
1899 {
1900         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1901         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1902         enum aux_ch aux_ch = dig_port->aux_ch;
1903
1904         switch (aux_ch) {
1905         case AUX_CH_A:
1906         case AUX_CH_B:
1907         case AUX_CH_C:
1908         case AUX_CH_USBC1:
1909         case AUX_CH_USBC2:
1910         case AUX_CH_USBC3:
1911         case AUX_CH_USBC4:
1912         case AUX_CH_USBC5:
1913         case AUX_CH_USBC6:
1914                 return DP_AUX_CH_DATA(aux_ch, index);
1915         default:
1916                 MISSING_CASE(aux_ch);
1917                 return DP_AUX_CH_DATA(AUX_CH_A, index);
1918         }
1919 }
1920
1921 static void
1922 intel_dp_aux_fini(struct intel_dp *intel_dp)
1923 {
1924         if (cpu_latency_qos_request_active(&intel_dp->pm_qos))
1925                 cpu_latency_qos_remove_request(&intel_dp->pm_qos);
1926
1927         kfree(intel_dp->aux.name);
1928 }
1929
1930 static void
1931 intel_dp_aux_init(struct intel_dp *intel_dp)
1932 {
1933         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1934         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1935         struct intel_encoder *encoder = &dig_port->base;
1936         enum aux_ch aux_ch = dig_port->aux_ch;
1937
1938         if (INTEL_GEN(dev_priv) >= 12) {
1939                 intel_dp->aux_ch_ctl_reg = tgl_aux_ctl_reg;
1940                 intel_dp->aux_ch_data_reg = tgl_aux_data_reg;
1941         } else if (INTEL_GEN(dev_priv) >= 9) {
1942                 intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
1943                 intel_dp->aux_ch_data_reg = skl_aux_data_reg;
1944         } else if (HAS_PCH_SPLIT(dev_priv)) {
1945                 intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
1946                 intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
1947         } else {
1948                 intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
1949                 intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
1950         }
1951
1952         if (INTEL_GEN(dev_priv) >= 9)
1953                 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
1954         else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
1955                 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
1956         else if (HAS_PCH_SPLIT(dev_priv))
1957                 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
1958         else
1959                 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
1960
1961         if (INTEL_GEN(dev_priv) >= 9)
1962                 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
1963         else
1964                 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
1965
1966         drm_dp_aux_init(&intel_dp->aux);
1967
1968         /* Failure to allocate our preferred name is not critical */
1969         if (INTEL_GEN(dev_priv) >= 12 && aux_ch >= AUX_CH_USBC1)
1970                 intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX USBC%c/%s",
1971                                                aux_ch - AUX_CH_USBC1 + '1',
1972                                                encoder->base.name);
1973         else
1974                 intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/%s",
1975                                                aux_ch_name(aux_ch),
1976                                                encoder->base.name);
1977
1978         intel_dp->aux.transfer = intel_dp_aux_transfer;
1979         cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
1980 }
1981
1982 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1983 {
1984         int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
1985
1986         return max_rate >= 540000;
1987 }
1988
1989 bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp)
1990 {
1991         int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
1992
1993         return max_rate >= 810000;
1994 }
1995
1996 static void
1997 intel_dp_set_clock(struct intel_encoder *encoder,
1998                    struct intel_crtc_state *pipe_config)
1999 {
2000         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2001         const struct dp_link_dpll *divisor = NULL;
2002         int i, count = 0;
2003
2004         if (IS_G4X(dev_priv)) {
2005                 divisor = g4x_dpll;
2006                 count = ARRAY_SIZE(g4x_dpll);
2007         } else if (HAS_PCH_SPLIT(dev_priv)) {
2008                 divisor = pch_dpll;
2009                 count = ARRAY_SIZE(pch_dpll);
2010         } else if (IS_CHERRYVIEW(dev_priv)) {
2011                 divisor = chv_dpll;
2012                 count = ARRAY_SIZE(chv_dpll);
2013         } else if (IS_VALLEYVIEW(dev_priv)) {
2014                 divisor = vlv_dpll;
2015                 count = ARRAY_SIZE(vlv_dpll);
2016         }
2017
2018         if (divisor && count) {
2019                 for (i = 0; i < count; i++) {
2020                         if (pipe_config->port_clock == divisor[i].clock) {
2021                                 pipe_config->dpll = divisor[i].dpll;
2022                                 pipe_config->clock_set = true;
2023                                 break;
2024                         }
2025                 }
2026         }
2027 }
2028
2029 static void snprintf_int_array(char *str, size_t len,
2030                                const int *array, int nelem)
2031 {
2032         int i;
2033
2034         str[0] = '\0';
2035
2036         for (i = 0; i < nelem; i++) {
2037                 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
2038                 if (r >= len)
2039                         return;
2040                 str += r;
2041                 len -= r;
2042         }
2043 }
2044
2045 static void intel_dp_print_rates(struct intel_dp *intel_dp)
2046 {
2047         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2048         char str[128]; /* FIXME: too big for stack? */
2049
2050         if (!drm_debug_enabled(DRM_UT_KMS))
2051                 return;
2052
2053         snprintf_int_array(str, sizeof(str),
2054                            intel_dp->source_rates, intel_dp->num_source_rates);
2055         drm_dbg_kms(&i915->drm, "source rates: %s\n", str);
2056
2057         snprintf_int_array(str, sizeof(str),
2058                            intel_dp->sink_rates, intel_dp->num_sink_rates);
2059         drm_dbg_kms(&i915->drm, "sink rates: %s\n", str);
2060
2061         snprintf_int_array(str, sizeof(str),
2062                            intel_dp->common_rates, intel_dp->num_common_rates);
2063         drm_dbg_kms(&i915->drm, "common rates: %s\n", str);
2064 }
2065
2066 int
2067 intel_dp_max_link_rate(struct intel_dp *intel_dp)
2068 {
2069         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2070         int len;
2071
2072         len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate);
2073         if (drm_WARN_ON(&i915->drm, len <= 0))
2074                 return 162000;
2075
2076         return intel_dp->common_rates[len - 1];
2077 }
2078
2079 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
2080 {
2081         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2082         int i = intel_dp_rate_index(intel_dp->sink_rates,
2083                                     intel_dp->num_sink_rates, rate);
2084
2085         if (drm_WARN_ON(&i915->drm, i < 0))
2086                 i = 0;
2087
2088         return i;
2089 }
2090
2091 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
2092                            u8 *link_bw, u8 *rate_select)
2093 {
2094         /* eDP 1.4 rate select method. */
2095         if (intel_dp->use_rate_select) {
2096                 *link_bw = 0;
2097                 *rate_select =
2098                         intel_dp_rate_select(intel_dp, port_clock);
2099         } else {
2100                 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
2101                 *rate_select = 0;
2102         }
2103 }
2104
2105 static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
2106                                          const struct intel_crtc_state *pipe_config)
2107 {
2108         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2109
2110         /* On TGL, FEC is supported on all Pipes */
2111         if (INTEL_GEN(dev_priv) >= 12)
2112                 return true;
2113
2114         if (IS_GEN(dev_priv, 11) && pipe_config->cpu_transcoder != TRANSCODER_A)
2115                 return true;
2116
2117         return false;
2118 }
2119
2120 static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
2121                                   const struct intel_crtc_state *pipe_config)
2122 {
2123         return intel_dp_source_supports_fec(intel_dp, pipe_config) &&
2124                 drm_dp_sink_supports_fec(intel_dp->fec_capable);
2125 }
2126
2127 static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
2128                                   const struct intel_crtc_state *crtc_state)
2129 {
2130         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP) && !crtc_state->fec_enable)
2131                 return false;
2132
2133         return intel_dsc_source_support(crtc_state) &&
2134                 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd);
2135 }
2136
2137 static bool intel_dp_hdmi_ycbcr420(struct intel_dp *intel_dp,
2138                                    const struct intel_crtc_state *crtc_state)
2139 {
2140         return crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
2141                 (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 &&
2142                  intel_dp->dfp.ycbcr_444_to_420);
2143 }
2144
2145 static int intel_dp_hdmi_tmds_clock(struct intel_dp *intel_dp,
2146                                     const struct intel_crtc_state *crtc_state, int bpc)
2147 {
2148         int clock = crtc_state->hw.adjusted_mode.crtc_clock * bpc / 8;
2149
2150         if (intel_dp_hdmi_ycbcr420(intel_dp, crtc_state))
2151                 clock /= 2;
2152
2153         return clock;
2154 }
2155
2156 static bool intel_dp_hdmi_tmds_clock_valid(struct intel_dp *intel_dp,
2157                                            const struct intel_crtc_state *crtc_state, int bpc)
2158 {
2159         int tmds_clock = intel_dp_hdmi_tmds_clock(intel_dp, crtc_state, bpc);
2160
2161         if (intel_dp->dfp.min_tmds_clock &&
2162             tmds_clock < intel_dp->dfp.min_tmds_clock)
2163                 return false;
2164
2165         if (intel_dp->dfp.max_tmds_clock &&
2166             tmds_clock > intel_dp->dfp.max_tmds_clock)
2167                 return false;
2168
2169         return true;
2170 }
2171
2172 static bool intel_dp_hdmi_deep_color_possible(struct intel_dp *intel_dp,
2173                                               const struct intel_crtc_state *crtc_state,
2174                                               int bpc)
2175 {
2176
2177         return intel_hdmi_deep_color_possible(crtc_state, bpc,
2178                                               intel_dp->has_hdmi_sink,
2179                                               intel_dp_hdmi_ycbcr420(intel_dp, crtc_state)) &&
2180                 intel_dp_hdmi_tmds_clock_valid(intel_dp, crtc_state, bpc);
2181 }
2182
2183 static int intel_dp_max_bpp(struct intel_dp *intel_dp,
2184                             const struct intel_crtc_state *crtc_state)
2185 {
2186         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2187         struct intel_connector *intel_connector = intel_dp->attached_connector;
2188         int bpp, bpc;
2189
2190         bpc = crtc_state->pipe_bpp / 3;
2191
2192         if (intel_dp->dfp.max_bpc)
2193                 bpc = min_t(int, bpc, intel_dp->dfp.max_bpc);
2194
2195         if (intel_dp->dfp.min_tmds_clock) {
2196                 for (; bpc >= 10; bpc -= 2) {
2197                         if (intel_dp_hdmi_deep_color_possible(intel_dp, crtc_state, bpc))
2198                                 break;
2199                 }
2200         }
2201
2202         bpp = bpc * 3;
2203         if (intel_dp_is_edp(intel_dp)) {
2204                 /* Get bpp from vbt only for panels that dont have bpp in edid */
2205                 if (intel_connector->base.display_info.bpc == 0 &&
2206                     dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) {
2207                         drm_dbg_kms(&dev_priv->drm,
2208                                     "clamping bpp for eDP panel to BIOS-provided %i\n",
2209                                     dev_priv->vbt.edp.bpp);
2210                         bpp = dev_priv->vbt.edp.bpp;
2211                 }
2212         }
2213
2214         return bpp;
2215 }
2216
2217 /* Adjust link config limits based on compliance test requests. */
2218 void
2219 intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
2220                                   struct intel_crtc_state *pipe_config,
2221                                   struct link_config_limits *limits)
2222 {
2223         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2224
2225         /* For DP Compliance we override the computed bpp for the pipe */
2226         if (intel_dp->compliance.test_data.bpc != 0) {
2227                 int bpp = 3 * intel_dp->compliance.test_data.bpc;
2228
2229                 limits->min_bpp = limits->max_bpp = bpp;
2230                 pipe_config->dither_force_disable = bpp == 6 * 3;
2231
2232                 drm_dbg_kms(&i915->drm, "Setting pipe_bpp to %d\n", bpp);
2233         }
2234
2235         /* Use values requested by Compliance Test Request */
2236         if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
2237                 int index;
2238
2239                 /* Validate the compliance test data since max values
2240                  * might have changed due to link train fallback.
2241                  */
2242                 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate,
2243                                                intel_dp->compliance.test_lane_count)) {
2244                         index = intel_dp_rate_index(intel_dp->common_rates,
2245                                                     intel_dp->num_common_rates,
2246                                                     intel_dp->compliance.test_link_rate);
2247                         if (index >= 0)
2248                                 limits->min_clock = limits->max_clock = index;
2249                         limits->min_lane_count = limits->max_lane_count =
2250                                 intel_dp->compliance.test_lane_count;
2251                 }
2252         }
2253 }
2254
2255 /* Optimize link config in order: max bpp, min clock, min lanes */
2256 static int
2257 intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
2258                                   struct intel_crtc_state *pipe_config,
2259                                   const struct link_config_limits *limits)
2260 {
2261         struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
2262         int bpp, clock, lane_count;
2263         int mode_rate, link_clock, link_avail;
2264
2265         for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
2266                 int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp);
2267
2268                 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
2269                                                    output_bpp);
2270
2271                 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
2272                         for (lane_count = limits->min_lane_count;
2273                              lane_count <= limits->max_lane_count;
2274                              lane_count <<= 1) {
2275                                 link_clock = intel_dp->common_rates[clock];
2276                                 link_avail = intel_dp_max_data_rate(link_clock,
2277                                                                     lane_count);
2278
2279                                 if (mode_rate <= link_avail) {
2280                                         pipe_config->lane_count = lane_count;
2281                                         pipe_config->pipe_bpp = bpp;
2282                                         pipe_config->port_clock = link_clock;
2283
2284                                         return 0;
2285                                 }
2286                         }
2287                 }
2288         }
2289
2290         return -EINVAL;
2291 }
2292
2293 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
2294 {
2295         int i, num_bpc;
2296         u8 dsc_bpc[3] = {0};
2297
2298         num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd,
2299                                                        dsc_bpc);
2300         for (i = 0; i < num_bpc; i++) {
2301                 if (dsc_max_bpc >= dsc_bpc[i])
2302                         return dsc_bpc[i] * 3;
2303         }
2304
2305         return 0;
2306 }
2307
2308 #define DSC_SUPPORTED_VERSION_MIN               1
2309
2310 static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
2311                                        struct intel_crtc_state *crtc_state)
2312 {
2313         struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2314         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2315         struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
2316         u8 line_buf_depth;
2317         int ret;
2318
2319         /*
2320          * RC_MODEL_SIZE is currently a constant across all configurations.
2321          *
2322          * FIXME: Look into using sink defined DPCD DP_DSC_RC_BUF_BLK_SIZE and
2323          * DP_DSC_RC_BUF_SIZE for this.
2324          */
2325         vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
2326
2327         ret = intel_dsc_compute_params(encoder, crtc_state);
2328         if (ret)
2329                 return ret;
2330
2331         /*
2332          * Slice Height of 8 works for all currently available panels. So start
2333          * with that if pic_height is an integral multiple of 8. Eventually add
2334          * logic to try multiple slice heights.
2335          */
2336         if (vdsc_cfg->pic_height % 8 == 0)
2337                 vdsc_cfg->slice_height = 8;
2338         else if (vdsc_cfg->pic_height % 4 == 0)
2339                 vdsc_cfg->slice_height = 4;
2340         else
2341                 vdsc_cfg->slice_height = 2;
2342
2343         vdsc_cfg->dsc_version_major =
2344                 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
2345                  DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT;
2346         vdsc_cfg->dsc_version_minor =
2347                 min(DSC_SUPPORTED_VERSION_MIN,
2348                     (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
2349                      DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT);
2350
2351         vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] &
2352                 DP_DSC_RGB;
2353
2354         line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd);
2355         if (!line_buf_depth) {
2356                 drm_dbg_kms(&i915->drm,
2357                             "DSC Sink Line Buffer Depth invalid\n");
2358                 return -EINVAL;
2359         }
2360
2361         if (vdsc_cfg->dsc_version_minor == 2)
2362                 vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ?
2363                         DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth;
2364         else
2365                 vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ?
2366                         DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth;
2367
2368         vdsc_cfg->block_pred_enable =
2369                 intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] &
2370                 DP_DSC_BLK_PREDICTION_IS_SUPPORTED;
2371
2372         return drm_dsc_compute_rc_parameters(vdsc_cfg);
2373 }
2374
2375 static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
2376                                        struct intel_crtc_state *pipe_config,
2377                                        struct drm_connector_state *conn_state,
2378                                        struct link_config_limits *limits)
2379 {
2380         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2381         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2382         const struct drm_display_mode *adjusted_mode =
2383                 &pipe_config->hw.adjusted_mode;
2384         u8 dsc_max_bpc;
2385         int pipe_bpp;
2386         int ret;
2387
2388         pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
2389                 intel_dp_supports_fec(intel_dp, pipe_config);
2390
2391         if (!intel_dp_supports_dsc(intel_dp, pipe_config))
2392                 return -EINVAL;
2393
2394         /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */
2395         if (INTEL_GEN(dev_priv) >= 12)
2396                 dsc_max_bpc = min_t(u8, 12, conn_state->max_requested_bpc);
2397         else
2398                 dsc_max_bpc = min_t(u8, 10,
2399                                     conn_state->max_requested_bpc);
2400
2401         pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc);
2402
2403         /* Min Input BPC for ICL+ is 8 */
2404         if (pipe_bpp < 8 * 3) {
2405                 drm_dbg_kms(&dev_priv->drm,
2406                             "No DSC support for less than 8bpc\n");
2407                 return -EINVAL;
2408         }
2409
2410         /*
2411          * For now enable DSC for max bpp, max link rate, max lane count.
2412          * Optimize this later for the minimum possible link rate/lane count
2413          * with DSC enabled for the requested mode.
2414          */
2415         pipe_config->pipe_bpp = pipe_bpp;
2416         pipe_config->port_clock = intel_dp->common_rates[limits->max_clock];
2417         pipe_config->lane_count = limits->max_lane_count;
2418
2419         if (intel_dp_is_edp(intel_dp)) {
2420                 pipe_config->dsc.compressed_bpp =
2421                         min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4,
2422                               pipe_config->pipe_bpp);
2423                 pipe_config->dsc.slice_count =
2424                         drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
2425                                                         true);
2426         } else {
2427                 u16 dsc_max_output_bpp;
2428                 u8 dsc_dp_slice_count;
2429
2430                 dsc_max_output_bpp =
2431                         intel_dp_dsc_get_output_bpp(dev_priv,
2432                                                     pipe_config->port_clock,
2433                                                     pipe_config->lane_count,
2434                                                     adjusted_mode->crtc_clock,
2435                                                     adjusted_mode->crtc_hdisplay,
2436                                                     pipe_config->bigjoiner);
2437                 dsc_dp_slice_count =
2438                         intel_dp_dsc_get_slice_count(intel_dp,
2439                                                      adjusted_mode->crtc_clock,
2440                                                      adjusted_mode->crtc_hdisplay,
2441                                                      pipe_config->bigjoiner);
2442                 if (!dsc_max_output_bpp || !dsc_dp_slice_count) {
2443                         drm_dbg_kms(&dev_priv->drm,
2444                                     "Compressed BPP/Slice Count not supported\n");
2445                         return -EINVAL;
2446                 }
2447                 pipe_config->dsc.compressed_bpp = min_t(u16,
2448                                                                dsc_max_output_bpp >> 4,
2449                                                                pipe_config->pipe_bpp);
2450                 pipe_config->dsc.slice_count = dsc_dp_slice_count;
2451         }
2452         /*
2453          * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
2454          * is greater than the maximum Cdclock and if slice count is even
2455          * then we need to use 2 VDSC instances.
2456          */
2457         if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq ||
2458             pipe_config->bigjoiner) {
2459                 if (pipe_config->dsc.slice_count < 2) {
2460                         drm_dbg_kms(&dev_priv->drm,
2461                                     "Cannot split stream to use 2 VDSC instances\n");
2462                         return -EINVAL;
2463                 }
2464
2465                 pipe_config->dsc.dsc_split = true;
2466         }
2467
2468         ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config);
2469         if (ret < 0) {
2470                 drm_dbg_kms(&dev_priv->drm,
2471                             "Cannot compute valid DSC parameters for Input Bpp = %d "
2472                             "Compressed BPP = %d\n",
2473                             pipe_config->pipe_bpp,
2474                             pipe_config->dsc.compressed_bpp);
2475                 return ret;
2476         }
2477
2478         pipe_config->dsc.compression_enable = true;
2479         drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d "
2480                     "Compressed Bpp = %d Slice Count = %d\n",
2481                     pipe_config->pipe_bpp,
2482                     pipe_config->dsc.compressed_bpp,
2483                     pipe_config->dsc.slice_count);
2484
2485         return 0;
2486 }
2487
2488 static int
2489 intel_dp_compute_link_config(struct intel_encoder *encoder,
2490                              struct intel_crtc_state *pipe_config,
2491                              struct drm_connector_state *conn_state)
2492 {
2493         struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2494         const struct drm_display_mode *adjusted_mode =
2495                 &pipe_config->hw.adjusted_mode;
2496         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2497         struct link_config_limits limits;
2498         int common_len;
2499         int ret;
2500
2501         common_len = intel_dp_common_len_rate_limit(intel_dp,
2502                                                     intel_dp->max_link_rate);
2503
2504         /* No common link rates between source and sink */
2505         drm_WARN_ON(encoder->base.dev, common_len <= 0);
2506
2507         limits.min_clock = 0;
2508         limits.max_clock = common_len - 1;
2509
2510         limits.min_lane_count = 1;
2511         limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
2512
2513         limits.min_bpp = intel_dp_min_bpp(pipe_config->output_format);
2514         limits.max_bpp = intel_dp_max_bpp(intel_dp, pipe_config);
2515
2516         if (intel_dp_is_edp(intel_dp)) {
2517                 /*
2518                  * Use the maximum clock and number of lanes the eDP panel
2519                  * advertizes being capable of. The panels are generally
2520                  * designed to support only a single clock and lane
2521                  * configuration, and typically these values correspond to the
2522                  * native resolution of the panel.
2523                  */
2524                 limits.min_lane_count = limits.max_lane_count;
2525                 limits.min_clock = limits.max_clock;
2526         }
2527
2528         intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
2529
2530         drm_dbg_kms(&i915->drm, "DP link computation with max lane count %i "
2531                     "max rate %d max bpp %d pixel clock %iKHz\n",
2532                     limits.max_lane_count,
2533                     intel_dp->common_rates[limits.max_clock],
2534                     limits.max_bpp, adjusted_mode->crtc_clock);
2535
2536         if ((adjusted_mode->crtc_clock > i915->max_dotclk_freq ||
2537              adjusted_mode->crtc_hdisplay > 5120) &&
2538             intel_dp_can_bigjoiner(intel_dp))
2539                 pipe_config->bigjoiner = true;
2540
2541         /*
2542          * Optimize for slow and wide. This is the place to add alternative
2543          * optimization policy.
2544          */
2545         ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
2546
2547         /* enable compression if the mode doesn't fit available BW */
2548         drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en);
2549         if (ret || intel_dp->force_dsc_en || pipe_config->bigjoiner) {
2550                 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
2551                                                   conn_state, &limits);
2552                 if (ret < 0)
2553                         return ret;
2554         }
2555
2556         if (pipe_config->dsc.compression_enable) {
2557                 drm_dbg_kms(&i915->drm,
2558                             "DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
2559                             pipe_config->lane_count, pipe_config->port_clock,
2560                             pipe_config->pipe_bpp,
2561                             pipe_config->dsc.compressed_bpp);
2562
2563                 drm_dbg_kms(&i915->drm,
2564                             "DP link rate required %i available %i\n",
2565                             intel_dp_link_required(adjusted_mode->crtc_clock,
2566                                                    pipe_config->dsc.compressed_bpp),
2567                             intel_dp_max_data_rate(pipe_config->port_clock,
2568                                                    pipe_config->lane_count));
2569         } else {
2570                 drm_dbg_kms(&i915->drm, "DP lane count %d clock %d bpp %d\n",
2571                             pipe_config->lane_count, pipe_config->port_clock,
2572                             pipe_config->pipe_bpp);
2573
2574                 drm_dbg_kms(&i915->drm,
2575                             "DP link rate required %i available %i\n",
2576                             intel_dp_link_required(adjusted_mode->crtc_clock,
2577                                                    pipe_config->pipe_bpp),
2578                             intel_dp_max_data_rate(pipe_config->port_clock,
2579                                                    pipe_config->lane_count));
2580         }
2581         return 0;
2582 }
2583
2584 bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
2585                                   const struct drm_connector_state *conn_state)
2586 {
2587         const struct intel_digital_connector_state *intel_conn_state =
2588                 to_intel_digital_connector_state(conn_state);
2589         const struct drm_display_mode *adjusted_mode =
2590                 &crtc_state->hw.adjusted_mode;
2591
2592         /*
2593          * Our YCbCr output is always limited range.
2594          * crtc_state->limited_color_range only applies to RGB,
2595          * and it must never be set for YCbCr or we risk setting
2596          * some conflicting bits in PIPECONF which will mess up
2597          * the colors on the monitor.
2598          */
2599         if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
2600                 return false;
2601
2602         if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
2603                 /*
2604                  * See:
2605                  * CEA-861-E - 5.1 Default Encoding Parameters
2606                  * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
2607                  */
2608                 return crtc_state->pipe_bpp != 18 &&
2609                         drm_default_rgb_quant_range(adjusted_mode) ==
2610                         HDMI_QUANTIZATION_RANGE_LIMITED;
2611         } else {
2612                 return intel_conn_state->broadcast_rgb ==
2613                         INTEL_BROADCAST_RGB_LIMITED;
2614         }
2615 }
2616
2617 static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv,
2618                                     enum port port)
2619 {
2620         if (IS_G4X(dev_priv))
2621                 return false;
2622         if (INTEL_GEN(dev_priv) < 12 && port == PORT_A)
2623                 return false;
2624
2625         return true;
2626 }
2627
2628 static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc_state,
2629                                              const struct drm_connector_state *conn_state,
2630                                              struct drm_dp_vsc_sdp *vsc)
2631 {
2632         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2633         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2634
2635         /*
2636          * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118
2637          * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/
2638          * Colorimetry Format indication.
2639          */
2640         vsc->revision = 0x5;
2641         vsc->length = 0x13;
2642
2643         /* DP 1.4a spec, Table 2-120 */
2644         switch (crtc_state->output_format) {
2645         case INTEL_OUTPUT_FORMAT_YCBCR444:
2646                 vsc->pixelformat = DP_PIXELFORMAT_YUV444;
2647                 break;
2648         case INTEL_OUTPUT_FORMAT_YCBCR420:
2649                 vsc->pixelformat = DP_PIXELFORMAT_YUV420;
2650                 break;
2651         case INTEL_OUTPUT_FORMAT_RGB:
2652         default:
2653                 vsc->pixelformat = DP_PIXELFORMAT_RGB;
2654         }
2655
2656         switch (conn_state->colorspace) {
2657         case DRM_MODE_COLORIMETRY_BT709_YCC:
2658                 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC;
2659                 break;
2660         case DRM_MODE_COLORIMETRY_XVYCC_601:
2661                 vsc->colorimetry = DP_COLORIMETRY_XVYCC_601;
2662                 break;
2663         case DRM_MODE_COLORIMETRY_XVYCC_709:
2664                 vsc->colorimetry = DP_COLORIMETRY_XVYCC_709;
2665                 break;
2666         case DRM_MODE_COLORIMETRY_SYCC_601:
2667                 vsc->colorimetry = DP_COLORIMETRY_SYCC_601;
2668                 break;
2669         case DRM_MODE_COLORIMETRY_OPYCC_601:
2670                 vsc->colorimetry = DP_COLORIMETRY_OPYCC_601;
2671                 break;
2672         case DRM_MODE_COLORIMETRY_BT2020_CYCC:
2673                 vsc->colorimetry = DP_COLORIMETRY_BT2020_CYCC;
2674                 break;
2675         case DRM_MODE_COLORIMETRY_BT2020_RGB:
2676                 vsc->colorimetry = DP_COLORIMETRY_BT2020_RGB;
2677                 break;
2678         case DRM_MODE_COLORIMETRY_BT2020_YCC:
2679                 vsc->colorimetry = DP_COLORIMETRY_BT2020_YCC;
2680                 break;
2681         case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65:
2682         case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER:
2683                 vsc->colorimetry = DP_COLORIMETRY_DCI_P3_RGB;
2684                 break;
2685         default:
2686                 /*
2687                  * RGB->YCBCR color conversion uses the BT.709
2688                  * color space.
2689                  */
2690                 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
2691                         vsc->colorimetry = DP_COLORIMETRY_BT709_YCC;
2692                 else
2693                         vsc->colorimetry = DP_COLORIMETRY_DEFAULT;
2694                 break;
2695         }
2696
2697         vsc->bpc = crtc_state->pipe_bpp / 3;
2698
2699         /* only RGB pixelformat supports 6 bpc */
2700         drm_WARN_ON(&dev_priv->drm,
2701                     vsc->bpc == 6 && vsc->pixelformat != DP_PIXELFORMAT_RGB);
2702
2703         /* all YCbCr are always limited range */
2704         vsc->dynamic_range = DP_DYNAMIC_RANGE_CTA;
2705         vsc->content_type = DP_CONTENT_TYPE_NOT_DEFINED;
2706 }
2707
2708 static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp,
2709                                      struct intel_crtc_state *crtc_state,
2710                                      const struct drm_connector_state *conn_state)
2711 {
2712         struct drm_dp_vsc_sdp *vsc = &crtc_state->infoframes.vsc;
2713
2714         /* When a crtc state has PSR, VSC SDP will be handled by PSR routine */
2715         if (crtc_state->has_psr)
2716                 return;
2717
2718         if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state))
2719                 return;
2720
2721         crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
2722         vsc->sdp_type = DP_SDP_VSC;
2723         intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
2724                                          &crtc_state->infoframes.vsc);
2725 }
2726
2727 void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp,
2728                                   const struct intel_crtc_state *crtc_state,
2729                                   const struct drm_connector_state *conn_state,
2730                                   struct drm_dp_vsc_sdp *vsc)
2731 {
2732         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2733
2734         vsc->sdp_type = DP_SDP_VSC;
2735
2736         if (dev_priv->psr.psr2_enabled) {
2737                 if (dev_priv->psr.colorimetry_support &&
2738                     intel_dp_needs_vsc_sdp(crtc_state, conn_state)) {
2739                         /* [PSR2, +Colorimetry] */
2740                         intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
2741                                                          vsc);
2742                 } else {
2743                         /*
2744                          * [PSR2, -Colorimetry]
2745                          * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11
2746                          * 3D stereo + PSR/PSR2 + Y-coordinate.
2747                          */
2748                         vsc->revision = 0x4;
2749                         vsc->length = 0xe;
2750                 }
2751         } else {
2752                 /*
2753                  * [PSR1]
2754                  * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118
2755                  * VSC SDP supporting 3D stereo + PSR (applies to eDP v1.3 or
2756                  * higher).
2757                  */
2758                 vsc->revision = 0x2;
2759                 vsc->length = 0x8;
2760         }
2761 }
2762
2763 static void
2764 intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
2765                                             struct intel_crtc_state *crtc_state,
2766                                             const struct drm_connector_state *conn_state)
2767 {
2768         int ret;
2769         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2770         struct hdmi_drm_infoframe *drm_infoframe = &crtc_state->infoframes.drm.drm;
2771
2772         if (!conn_state->hdr_output_metadata)
2773                 return;
2774
2775         ret = drm_hdmi_infoframe_set_hdr_metadata(drm_infoframe, conn_state);
2776
2777         if (ret) {
2778                 drm_dbg_kms(&dev_priv->drm, "couldn't set HDR metadata in infoframe\n");
2779                 return;
2780         }
2781
2782         crtc_state->infoframes.enable |=
2783                 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA);
2784 }
2785
2786 static void
2787 intel_dp_drrs_compute_config(struct intel_dp *intel_dp,
2788                              struct intel_crtc_state *pipe_config,
2789                              int output_bpp, bool constant_n)
2790 {
2791         struct intel_connector *intel_connector = intel_dp->attached_connector;
2792         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2793
2794         /*
2795          * DRRS and PSR can't be enable together, so giving preference to PSR
2796          * as it allows more power-savings by complete shutting down display,
2797          * so to guarantee this, intel_dp_drrs_compute_config() must be called
2798          * after intel_psr_compute_config().
2799          */
2800         if (pipe_config->has_psr)
2801                 return;
2802
2803         if (!intel_connector->panel.downclock_mode ||
2804             dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT)
2805                 return;
2806
2807         pipe_config->has_drrs = true;
2808         intel_link_compute_m_n(output_bpp, pipe_config->lane_count,
2809                                intel_connector->panel.downclock_mode->clock,
2810                                pipe_config->port_clock, &pipe_config->dp_m2_n2,
2811                                constant_n, pipe_config->fec_enable);
2812 }
2813
2814 int
2815 intel_dp_compute_config(struct intel_encoder *encoder,
2816                         struct intel_crtc_state *pipe_config,
2817                         struct drm_connector_state *conn_state)
2818 {
2819         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2820         struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
2821         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2822         enum port port = encoder->port;
2823         struct intel_connector *intel_connector = intel_dp->attached_connector;
2824         struct intel_digital_connector_state *intel_conn_state =
2825                 to_intel_digital_connector_state(conn_state);
2826         bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0,
2827                                            DP_DPCD_QUIRK_CONSTANT_N);
2828         int ret = 0, output_bpp;
2829
2830         if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
2831                 pipe_config->has_pch_encoder = true;
2832
2833         pipe_config->output_format = intel_dp_output_format(&intel_connector->base,
2834                                                             adjusted_mode);
2835
2836         if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
2837                 ret = intel_pch_panel_fitting(pipe_config, conn_state);
2838                 if (ret)
2839                         return ret;
2840         }
2841
2842         if (!intel_dp_port_has_audio(dev_priv, port))
2843                 pipe_config->has_audio = false;
2844         else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
2845                 pipe_config->has_audio = intel_dp->has_audio;
2846         else
2847                 pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON;
2848
2849         if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
2850                 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
2851                                        adjusted_mode);
2852
2853                 if (HAS_GMCH(dev_priv))
2854                         ret = intel_gmch_panel_fitting(pipe_config, conn_state);
2855                 else
2856                         ret = intel_pch_panel_fitting(pipe_config, conn_state);
2857                 if (ret)
2858                         return ret;
2859         }
2860
2861         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
2862                 return -EINVAL;
2863
2864         if (HAS_GMCH(dev_priv) &&
2865             adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
2866                 return -EINVAL;
2867
2868         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
2869                 return -EINVAL;
2870
2871         if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay))
2872                 return -EINVAL;
2873
2874         ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state);
2875         if (ret < 0)
2876                 return ret;
2877
2878         pipe_config->limited_color_range =
2879                 intel_dp_limited_color_range(pipe_config, conn_state);
2880
2881         if (pipe_config->dsc.compression_enable)
2882                 output_bpp = pipe_config->dsc.compressed_bpp;
2883         else
2884                 output_bpp = intel_dp_output_bpp(pipe_config->output_format,
2885                                                  pipe_config->pipe_bpp);
2886
2887         intel_link_compute_m_n(output_bpp,
2888                                pipe_config->lane_count,
2889                                adjusted_mode->crtc_clock,
2890                                pipe_config->port_clock,
2891                                &pipe_config->dp_m_n,
2892                                constant_n, pipe_config->fec_enable);
2893
2894         if (!HAS_DDI(dev_priv))
2895                 intel_dp_set_clock(encoder, pipe_config);
2896
2897         intel_psr_compute_config(intel_dp, pipe_config);
2898         intel_dp_drrs_compute_config(intel_dp, pipe_config, output_bpp,
2899                                      constant_n);
2900         intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state);
2901         intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state);
2902
2903         return 0;
2904 }
2905
2906 void intel_dp_set_link_params(struct intel_dp *intel_dp,
2907                               int link_rate, int lane_count)
2908 {
2909         intel_dp->link_trained = false;
2910         intel_dp->link_rate = link_rate;
2911         intel_dp->lane_count = lane_count;
2912 }
2913
2914 static void intel_dp_prepare(struct intel_encoder *encoder,
2915                              const struct intel_crtc_state *pipe_config)
2916 {
2917         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2918         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2919         enum port port = encoder->port;
2920         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
2921         const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
2922
2923         intel_dp_set_link_params(intel_dp,
2924                                  pipe_config->port_clock,
2925                                  pipe_config->lane_count);
2926
2927         /*
2928          * There are four kinds of DP registers:
2929          *
2930          *      IBX PCH
2931          *      SNB CPU
2932          *      IVB CPU
2933          *      CPT PCH
2934          *
2935          * IBX PCH and CPU are the same for almost everything,
2936          * except that the CPU DP PLL is configured in this
2937          * register
2938          *
2939          * CPT PCH is quite different, having many bits moved
2940          * to the TRANS_DP_CTL register instead. That
2941          * configuration happens (oddly) in ilk_pch_enable
2942          */
2943
2944         /* Preserve the BIOS-computed detected bit. This is
2945          * supposed to be read-only.
2946          */
2947         intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
2948
2949         /* Handle DP bits in common between all three register formats */
2950         intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
2951         intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count);
2952
2953         /* Split out the IBX/CPU vs CPT settings */
2954
2955         if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
2956                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2957                         intel_dp->DP |= DP_SYNC_HS_HIGH;
2958                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2959                         intel_dp->DP |= DP_SYNC_VS_HIGH;
2960                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
2961
2962                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2963                         intel_dp->DP |= DP_ENHANCED_FRAMING;
2964
2965                 intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe);
2966         } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
2967                 u32 trans_dp;
2968
2969                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
2970
2971                 trans_dp = intel_de_read(dev_priv, TRANS_DP_CTL(crtc->pipe));
2972                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2973                         trans_dp |= TRANS_DP_ENH_FRAMING;
2974                 else
2975                         trans_dp &= ~TRANS_DP_ENH_FRAMING;
2976                 intel_de_write(dev_priv, TRANS_DP_CTL(crtc->pipe), trans_dp);
2977         } else {
2978                 if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
2979                         intel_dp->DP |= DP_COLOR_RANGE_16_235;
2980
2981                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2982                         intel_dp->DP |= DP_SYNC_HS_HIGH;
2983                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2984                         intel_dp->DP |= DP_SYNC_VS_HIGH;
2985                 intel_dp->DP |= DP_LINK_TRAIN_OFF;
2986
2987                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2988                         intel_dp->DP |= DP_ENHANCED_FRAMING;
2989
2990                 if (IS_CHERRYVIEW(dev_priv))
2991                         intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe);
2992                 else
2993                         intel_dp->DP |= DP_PIPE_SEL(crtc->pipe);
2994         }
2995 }
2996
2997 #define IDLE_ON_MASK            (PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
2998 #define IDLE_ON_VALUE           (PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
2999
3000 #define IDLE_OFF_MASK           (PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
3001 #define IDLE_OFF_VALUE          (0     | PP_SEQUENCE_NONE | 0                     | 0)
3002
3003 #define IDLE_CYCLE_MASK         (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
3004 #define IDLE_CYCLE_VALUE        (0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
3005
3006 static void intel_pps_verify_state(struct intel_dp *intel_dp);
3007
3008 static void wait_panel_status(struct intel_dp *intel_dp,
3009                                        u32 mask,
3010                                        u32 value)
3011 {
3012         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3013         i915_reg_t pp_stat_reg, pp_ctrl_reg;
3014
3015         lockdep_assert_held(&dev_priv->pps_mutex);
3016
3017         intel_pps_verify_state(intel_dp);
3018
3019         pp_stat_reg = _pp_stat_reg(intel_dp);
3020         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3021
3022         drm_dbg_kms(&dev_priv->drm,
3023                     "mask %08x value %08x status %08x control %08x\n",
3024                     mask, value,
3025                     intel_de_read(dev_priv, pp_stat_reg),
3026                     intel_de_read(dev_priv, pp_ctrl_reg));
3027
3028         if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
3029                                        mask, value, 5000))
3030                 drm_err(&dev_priv->drm,
3031                         "Panel status timeout: status %08x control %08x\n",
3032                         intel_de_read(dev_priv, pp_stat_reg),
3033                         intel_de_read(dev_priv, pp_ctrl_reg));
3034
3035         drm_dbg_kms(&dev_priv->drm, "Wait complete\n");
3036 }
3037
3038 static void wait_panel_on(struct intel_dp *intel_dp)
3039 {
3040         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3041
3042         drm_dbg_kms(&i915->drm, "Wait for panel power on\n");
3043         wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
3044 }
3045
3046 static void wait_panel_off(struct intel_dp *intel_dp)
3047 {
3048         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3049
3050         drm_dbg_kms(&i915->drm, "Wait for panel power off time\n");
3051         wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
3052 }
3053
3054 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
3055 {
3056         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3057         ktime_t panel_power_on_time;
3058         s64 panel_power_off_duration;
3059
3060         drm_dbg_kms(&i915->drm, "Wait for panel power cycle\n");
3061
3062         /* take the difference of currrent time and panel power off time
3063          * and then make panel wait for t11_t12 if needed. */
3064         panel_power_on_time = ktime_get_boottime();
3065         panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
3066
3067         /* When we disable the VDD override bit last we have to do the manual
3068          * wait. */
3069         if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
3070                 wait_remaining_ms_from_jiffies(jiffies,
3071                                        intel_dp->panel_power_cycle_delay - panel_power_off_duration);
3072
3073         wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
3074 }
3075
3076 static void wait_backlight_on(struct intel_dp *intel_dp)
3077 {
3078         wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
3079                                        intel_dp->backlight_on_delay);
3080 }
3081
3082 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
3083 {
3084         wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
3085                                        intel_dp->backlight_off_delay);
3086 }
3087
3088 /* Read the current pp_control value, unlocking the register if it
3089  * is locked
3090  */
3091
3092 static  u32 ilk_get_pp_control(struct intel_dp *intel_dp)
3093 {
3094         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3095         u32 control;
3096
3097         lockdep_assert_held(&dev_priv->pps_mutex);
3098
3099         control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp));
3100         if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) &&
3101                         (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
3102                 control &= ~PANEL_UNLOCK_MASK;
3103                 control |= PANEL_UNLOCK_REGS;
3104         }
3105         return control;
3106 }
3107
3108 /*
3109  * Must be paired with edp_panel_vdd_off().
3110  * Must hold pps_mutex around the whole on/off sequence.
3111  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
3112  */
3113 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
3114 {
3115         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3116         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3117         u32 pp;
3118         i915_reg_t pp_stat_reg, pp_ctrl_reg;
3119         bool need_to_disable = !intel_dp->want_panel_vdd;
3120
3121         lockdep_assert_held(&dev_priv->pps_mutex);
3122
3123         if (!intel_dp_is_edp(intel_dp))
3124                 return false;
3125
3126         cancel_delayed_work(&intel_dp->panel_vdd_work);
3127         intel_dp->want_panel_vdd = true;
3128
3129         if (edp_have_panel_vdd(intel_dp))
3130                 return need_to_disable;
3131
3132         drm_WARN_ON(&dev_priv->drm, intel_dp->vdd_wakeref);
3133         intel_dp->vdd_wakeref = intel_display_power_get(dev_priv,
3134                                                         intel_aux_power_domain(dig_port));
3135
3136         drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n",
3137                     dig_port->base.base.base.id,
3138                     dig_port->base.base.name);
3139
3140         if (!edp_have_panel_power(intel_dp))
3141                 wait_panel_power_cycle(intel_dp);
3142
3143         pp = ilk_get_pp_control(intel_dp);
3144         pp |= EDP_FORCE_VDD;
3145
3146         pp_stat_reg = _pp_stat_reg(intel_dp);
3147         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3148
3149         intel_de_write(dev_priv, pp_ctrl_reg, pp);
3150         intel_de_posting_read(dev_priv, pp_ctrl_reg);
3151         drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
3152                     intel_de_read(dev_priv, pp_stat_reg),
3153                     intel_de_read(dev_priv, pp_ctrl_reg));
3154         /*
3155          * If the panel wasn't on, delay before accessing aux channel
3156          */
3157         if (!edp_have_panel_power(intel_dp)) {
3158                 drm_dbg_kms(&dev_priv->drm,
3159                             "[ENCODER:%d:%s] panel power wasn't enabled\n",
3160                             dig_port->base.base.base.id,
3161                             dig_port->base.base.name);
3162                 msleep(intel_dp->panel_power_up_delay);
3163         }
3164
3165         return need_to_disable;
3166 }
3167
3168 /*
3169  * Must be paired with intel_edp_panel_vdd_off() or
3170  * intel_edp_panel_off().
3171  * Nested calls to these functions are not allowed since
3172  * we drop the lock. Caller must use some higher level
3173  * locking to prevent nested calls from other threads.
3174  */
3175 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
3176 {
3177         intel_wakeref_t wakeref;
3178         bool vdd;
3179
3180         if (!intel_dp_is_edp(intel_dp))
3181                 return;
3182
3183         vdd = false;
3184         with_pps_lock(intel_dp, wakeref)
3185                 vdd = edp_panel_vdd_on(intel_dp);
3186         I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n",
3187                         dp_to_dig_port(intel_dp)->base.base.base.id,
3188                         dp_to_dig_port(intel_dp)->base.base.name);
3189 }
3190
3191 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
3192 {
3193         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3194         struct intel_digital_port *dig_port =
3195                 dp_to_dig_port(intel_dp);
3196         u32 pp;
3197         i915_reg_t pp_stat_reg, pp_ctrl_reg;
3198
3199         lockdep_assert_held(&dev_priv->pps_mutex);
3200
3201         drm_WARN_ON(&dev_priv->drm, intel_dp->want_panel_vdd);
3202
3203         if (!edp_have_panel_vdd(intel_dp))
3204                 return;
3205
3206         drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n",
3207                     dig_port->base.base.base.id,
3208                     dig_port->base.base.name);
3209
3210         pp = ilk_get_pp_control(intel_dp);
3211         pp &= ~EDP_FORCE_VDD;
3212
3213         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3214         pp_stat_reg = _pp_stat_reg(intel_dp);
3215
3216         intel_de_write(dev_priv, pp_ctrl_reg, pp);
3217         intel_de_posting_read(dev_priv, pp_ctrl_reg);
3218
3219         /* Make sure sequencer is idle before allowing subsequent activity */
3220         drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
3221                     intel_de_read(dev_priv, pp_stat_reg),
3222                     intel_de_read(dev_priv, pp_ctrl_reg));
3223
3224         if ((pp & PANEL_POWER_ON) == 0)
3225                 intel_dp->panel_power_off_time = ktime_get_boottime();
3226
3227         intel_display_power_put(dev_priv,
3228                                 intel_aux_power_domain(dig_port),
3229                                 fetch_and_zero(&intel_dp->vdd_wakeref));
3230 }
3231
3232 static void edp_panel_vdd_work(struct work_struct *__work)
3233 {
3234         struct intel_dp *intel_dp =
3235                 container_of(to_delayed_work(__work),
3236                              struct intel_dp, panel_vdd_work);
3237         intel_wakeref_t wakeref;
3238
3239         with_pps_lock(intel_dp, wakeref) {
3240                 if (!intel_dp->want_panel_vdd)
3241                         edp_panel_vdd_off_sync(intel_dp);
3242         }
3243 }
3244
3245 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
3246 {
3247         unsigned long delay;
3248
3249         /*
3250          * Queue the timer to fire a long time from now (relative to the power
3251          * down delay) to keep the panel power up across a sequence of
3252          * operations.
3253          */
3254         delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
3255         schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
3256 }
3257
3258 /*
3259  * Must be paired with edp_panel_vdd_on().
3260  * Must hold pps_mutex around the whole on/off sequence.
3261  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
3262  */
3263 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
3264 {
3265         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3266
3267         lockdep_assert_held(&dev_priv->pps_mutex);
3268
3269         if (!intel_dp_is_edp(intel_dp))
3270                 return;
3271
3272         I915_STATE_WARN(!intel_dp->want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on",
3273                         dp_to_dig_port(intel_dp)->base.base.base.id,
3274                         dp_to_dig_port(intel_dp)->base.base.name);
3275
3276         intel_dp->want_panel_vdd = false;
3277
3278         if (sync)
3279                 edp_panel_vdd_off_sync(intel_dp);
3280         else
3281                 edp_panel_vdd_schedule_off(intel_dp);
3282 }
3283
3284 static void edp_panel_on(struct intel_dp *intel_dp)
3285 {
3286         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3287         u32 pp;
3288         i915_reg_t pp_ctrl_reg;
3289
3290         lockdep_assert_held(&dev_priv->pps_mutex);
3291
3292         if (!intel_dp_is_edp(intel_dp))
3293                 return;
3294
3295         drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power on\n",
3296                     dp_to_dig_port(intel_dp)->base.base.base.id,
3297                     dp_to_dig_port(intel_dp)->base.base.name);
3298
3299         if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp),
3300                      "[ENCODER:%d:%s] panel power already on\n",
3301                      dp_to_dig_port(intel_dp)->base.base.base.id,
3302                      dp_to_dig_port(intel_dp)->base.base.name))
3303                 return;
3304
3305         wait_panel_power_cycle(intel_dp);
3306
3307         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3308         pp = ilk_get_pp_control(intel_dp);
3309         if (IS_GEN(dev_priv, 5)) {
3310                 /* ILK workaround: disable reset around power sequence */
3311                 pp &= ~PANEL_POWER_RESET;
3312                 intel_de_write(dev_priv, pp_ctrl_reg, pp);
3313                 intel_de_posting_read(dev_priv, pp_ctrl_reg);
3314         }
3315
3316         pp |= PANEL_POWER_ON;
3317         if (!IS_GEN(dev_priv, 5))
3318                 pp |= PANEL_POWER_RESET;
3319
3320         intel_de_write(dev_priv, pp_ctrl_reg, pp);
3321         intel_de_posting_read(dev_priv, pp_ctrl_reg);
3322
3323         wait_panel_on(intel_dp);
3324         intel_dp->last_power_on = jiffies;
3325
3326         if (IS_GEN(dev_priv, 5)) {
3327                 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
3328                 intel_de_write(dev_priv, pp_ctrl_reg, pp);
3329                 intel_de_posting_read(dev_priv, pp_ctrl_reg);
3330         }
3331 }
3332
3333 void intel_edp_panel_on(struct intel_dp *intel_dp)
3334 {
3335         intel_wakeref_t wakeref;
3336
3337         if (!intel_dp_is_edp(intel_dp))
3338                 return;
3339
3340         with_pps_lock(intel_dp, wakeref)
3341                 edp_panel_on(intel_dp);
3342 }
3343
3344
3345 static void edp_panel_off(struct intel_dp *intel_dp)
3346 {
3347         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3348         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3349         u32 pp;
3350         i915_reg_t pp_ctrl_reg;
3351
3352         lockdep_assert_held(&dev_priv->pps_mutex);
3353
3354         if (!intel_dp_is_edp(intel_dp))
3355                 return;
3356
3357         drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power off\n",
3358                     dig_port->base.base.base.id, dig_port->base.base.name);
3359
3360         drm_WARN(&dev_priv->drm, !intel_dp->want_panel_vdd,
3361                  "Need [ENCODER:%d:%s] VDD to turn off panel\n",
3362                  dig_port->base.base.base.id, dig_port->base.base.name);
3363
3364         pp = ilk_get_pp_control(intel_dp);
3365         /* We need to switch off panel power _and_ force vdd, for otherwise some
3366          * panels get very unhappy and cease to work. */
3367         pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
3368                 EDP_BLC_ENABLE);
3369
3370         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3371
3372         intel_dp->want_panel_vdd = false;
3373
3374         intel_de_write(dev_priv, pp_ctrl_reg, pp);
3375         intel_de_posting_read(dev_priv, pp_ctrl_reg);
3376
3377         wait_panel_off(intel_dp);
3378         intel_dp->panel_power_off_time = ktime_get_boottime();
3379
3380         /* We got a reference when we enabled the VDD. */
3381         intel_display_power_put(dev_priv,
3382                                 intel_aux_power_domain(dig_port),
3383                                 fetch_and_zero(&intel_dp->vdd_wakeref));
3384 }
3385
3386 void intel_edp_panel_off(struct intel_dp *intel_dp)
3387 {
3388         intel_wakeref_t wakeref;
3389
3390         if (!intel_dp_is_edp(intel_dp))
3391                 return;
3392
3393         with_pps_lock(intel_dp, wakeref)
3394                 edp_panel_off(intel_dp);
3395 }
3396
3397 /* Enable backlight in the panel power control. */
3398 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
3399 {
3400         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3401         intel_wakeref_t wakeref;
3402
3403         /*
3404          * If we enable the backlight right away following a panel power
3405          * on, we may see slight flicker as the panel syncs with the eDP
3406          * link.  So delay a bit to make sure the image is solid before
3407          * allowing it to appear.
3408          */
3409         wait_backlight_on(intel_dp);
3410
3411         with_pps_lock(intel_dp, wakeref) {
3412                 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3413                 u32 pp;
3414
3415                 pp = ilk_get_pp_control(intel_dp);
3416                 pp |= EDP_BLC_ENABLE;
3417
3418                 intel_de_write(dev_priv, pp_ctrl_reg, pp);
3419                 intel_de_posting_read(dev_priv, pp_ctrl_reg);
3420         }
3421 }
3422
3423 /* Enable backlight PWM and backlight PP control. */
3424 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
3425                             const struct drm_connector_state *conn_state)
3426 {
3427         struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder));
3428         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3429
3430         if (!intel_dp_is_edp(intel_dp))
3431                 return;
3432
3433         drm_dbg_kms(&i915->drm, "\n");
3434
3435         intel_panel_enable_backlight(crtc_state, conn_state);
3436         _intel_edp_backlight_on(intel_dp);
3437 }
3438
3439 /* Disable backlight in the panel power control. */
3440 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
3441 {
3442         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3443         intel_wakeref_t wakeref;
3444
3445         if (!intel_dp_is_edp(intel_dp))
3446                 return;
3447
3448         with_pps_lock(intel_dp, wakeref) {
3449                 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3450                 u32 pp;
3451
3452                 pp = ilk_get_pp_control(intel_dp);
3453                 pp &= ~EDP_BLC_ENABLE;
3454
3455                 intel_de_write(dev_priv, pp_ctrl_reg, pp);
3456                 intel_de_posting_read(dev_priv, pp_ctrl_reg);
3457         }
3458
3459         intel_dp->last_backlight_off = jiffies;
3460         edp_wait_backlight_off(intel_dp);
3461 }
3462
3463 /* Disable backlight PP control and backlight PWM. */
3464 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
3465 {
3466         struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder));
3467         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3468
3469         if (!intel_dp_is_edp(intel_dp))
3470                 return;
3471
3472         drm_dbg_kms(&i915->drm, "\n");
3473
3474         _intel_edp_backlight_off(intel_dp);
3475         intel_panel_disable_backlight(old_conn_state);
3476 }
3477
3478 /*
3479  * Hook for controlling the panel power control backlight through the bl_power
3480  * sysfs attribute. Take care to handle multiple calls.
3481  */
3482 static void intel_edp_backlight_power(struct intel_connector *connector,
3483                                       bool enable)
3484 {
3485         struct drm_i915_private *i915 = to_i915(connector->base.dev);
3486         struct intel_dp *intel_dp = intel_attached_dp(connector);
3487         intel_wakeref_t wakeref;
3488         bool is_enabled;
3489
3490         is_enabled = false;
3491         with_pps_lock(intel_dp, wakeref)
3492                 is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
3493         if (is_enabled == enable)
3494                 return;
3495
3496         drm_dbg_kms(&i915->drm, "panel power control backlight %s\n",
3497                     enable ? "enable" : "disable");
3498
3499         if (enable)
3500                 _intel_edp_backlight_on(intel_dp);
3501         else
3502                 _intel_edp_backlight_off(intel_dp);
3503 }
3504
3505 static void assert_dp_port(struct intel_dp *intel_dp, bool state)
3506 {
3507         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3508         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
3509         bool cur_state = intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN;
3510
3511         I915_STATE_WARN(cur_state != state,
3512                         "[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n",
3513                         dig_port->base.base.base.id, dig_port->base.base.name,
3514                         onoff(state), onoff(cur_state));
3515 }
3516 #define assert_dp_port_disabled(d) assert_dp_port((d), false)
3517
3518 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
3519 {
3520         bool cur_state = intel_de_read(dev_priv, DP_A) & DP_PLL_ENABLE;
3521
3522         I915_STATE_WARN(cur_state != state,
3523                         "eDP PLL state assertion failure (expected %s, current %s)\n",
3524                         onoff(state), onoff(cur_state));
3525 }
3526 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
3527 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
3528
3529 static void ilk_edp_pll_on(struct intel_dp *intel_dp,
3530                            const struct intel_crtc_state *pipe_config)
3531 {
3532         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
3533         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3534
3535         assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
3536         assert_dp_port_disabled(intel_dp);
3537         assert_edp_pll_disabled(dev_priv);
3538
3539         drm_dbg_kms(&dev_priv->drm, "enabling eDP PLL for clock %d\n",
3540                     pipe_config->port_clock);
3541
3542         intel_dp->DP &= ~DP_PLL_FREQ_MASK;
3543
3544         if (pipe_config->port_clock == 162000)
3545                 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
3546         else
3547                 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
3548
3549         intel_de_write(dev_priv, DP_A, intel_dp->DP);
3550         intel_de_posting_read(dev_priv, DP_A);
3551         udelay(500);
3552
3553         /*
3554          * [DevILK] Work around required when enabling DP PLL
3555          * while a pipe is enabled going to FDI:
3556          * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
3557          * 2. Program DP PLL enable
3558          */
3559         if (IS_GEN(dev_priv, 5))
3560                 intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
3561
3562         intel_dp->DP |= DP_PLL_ENABLE;
3563
3564         intel_de_write(dev_priv, DP_A, intel_dp->DP);
3565         intel_de_posting_read(dev_priv, DP_A);
3566         udelay(200);
3567 }
3568
3569 static void ilk_edp_pll_off(struct intel_dp *intel_dp,
3570                             const struct intel_crtc_state *old_crtc_state)
3571 {
3572         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
3573         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3574
3575         assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
3576         assert_dp_port_disabled(intel_dp);
3577         assert_edp_pll_enabled(dev_priv);
3578
3579         drm_dbg_kms(&dev_priv->drm, "disabling eDP PLL\n");
3580
3581         intel_dp->DP &= ~DP_PLL_ENABLE;
3582
3583         intel_de_write(dev_priv, DP_A, intel_dp->DP);
3584         intel_de_posting_read(dev_priv, DP_A);
3585         udelay(200);
3586 }
3587
3588 static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
3589 {
3590         /*
3591          * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus
3592          * be capable of signalling downstream hpd with a long pulse.
3593          * Whether or not that means D3 is safe to use is not clear,
3594          * but let's assume so until proven otherwise.
3595          *
3596          * FIXME should really check all downstream ports...
3597          */
3598         return intel_dp->dpcd[DP_DPCD_REV] == 0x11 &&
3599                 drm_dp_is_branch(intel_dp->dpcd) &&
3600                 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
3601 }
3602
3603 void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
3604                                            const struct intel_crtc_state *crtc_state,
3605                                            bool enable)
3606 {
3607         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3608         int ret;
3609
3610         if (!crtc_state->dsc.compression_enable)
3611                 return;
3612
3613         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE,
3614                                  enable ? DP_DECOMPRESSION_EN : 0);
3615         if (ret < 0)
3616                 drm_dbg_kms(&i915->drm,
3617                             "Failed to %s sink decompression state\n",
3618                             enable ? "enable" : "disable");
3619 }
3620
3621 static void
3622 intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful)
3623 {
3624         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3625         u8 oui[] = { 0x00, 0xaa, 0x01 };
3626         u8 buf[3] = { 0 };
3627
3628         /*
3629          * During driver init, we want to be careful and avoid changing the source OUI if it's
3630          * already set to what we want, so as to avoid clearing any state by accident
3631          */
3632         if (careful) {
3633                 if (drm_dp_dpcd_read(&intel_dp->aux, DP_SOURCE_OUI, buf, sizeof(buf)) < 0)
3634                         drm_err(&i915->drm, "Failed to read source OUI\n");
3635
3636                 if (memcmp(oui, buf, sizeof(oui)) == 0)
3637                         return;
3638         }
3639
3640         if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0)
3641                 drm_err(&i915->drm, "Failed to write source OUI\n");
3642 }
3643
3644 /* If the device supports it, try to set the power state appropriately */
3645 void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode)
3646 {
3647         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3648         struct drm_i915_private *i915 = to_i915(encoder->base.dev);
3649         int ret, i;
3650
3651         /* Should have a valid DPCD by this point */
3652         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
3653                 return;
3654
3655         if (mode != DP_SET_POWER_D0) {
3656                 if (downstream_hpd_needs_d0(intel_dp))
3657                         return;
3658
3659                 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode);
3660         } else {
3661                 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
3662
3663                 lspcon_resume(dp_to_dig_port(intel_dp));
3664
3665                 /* Write the source OUI as early as possible */
3666                 if (intel_dp_is_edp(intel_dp))
3667                         intel_edp_init_source_oui(intel_dp, false);
3668
3669                 /*
3670                  * When turning on, we need to retry for 1ms to give the sink
3671                  * time to wake up.
3672                  */
3673                 for (i = 0; i < 3; i++) {
3674                         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode);
3675                         if (ret == 1)
3676                                 break;
3677                         msleep(1);
3678                 }
3679
3680                 if (ret == 1 && lspcon->active)
3681                         lspcon_wait_pcon_mode(lspcon);
3682         }
3683
3684         if (ret != 1)
3685                 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Set power to %s failed\n",
3686                             encoder->base.base.id, encoder->base.name,
3687                             mode == DP_SET_POWER_D0 ? "D0" : "D3");
3688 }
3689
3690 static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
3691                                  enum port port, enum pipe *pipe)
3692 {
3693         enum pipe p;
3694
3695         for_each_pipe(dev_priv, p) {
3696                 u32 val = intel_de_read(dev_priv, TRANS_DP_CTL(p));
3697
3698                 if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) {
3699                         *pipe = p;
3700                         return true;
3701                 }
3702         }
3703
3704         drm_dbg_kms(&dev_priv->drm, "No pipe for DP port %c found\n",
3705                     port_name(port));
3706
3707         /* must initialize pipe to something for the asserts */
3708         *pipe = PIPE_A;
3709
3710         return false;
3711 }
3712
3713 bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
3714                            i915_reg_t dp_reg, enum port port,
3715                            enum pipe *pipe)
3716 {
3717         bool ret;
3718         u32 val;
3719
3720         val = intel_de_read(dev_priv, dp_reg);
3721
3722         ret = val & DP_PORT_EN;
3723
3724         /* asserts want to know the pipe even if the port is disabled */
3725         if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
3726                 *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB;
3727         else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
3728                 ret &= cpt_dp_port_selected(dev_priv, port, pipe);
3729         else if (IS_CHERRYVIEW(dev_priv))
3730                 *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV;
3731         else
3732                 *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT;
3733
3734         return ret;
3735 }
3736
3737 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
3738                                   enum pipe *pipe)
3739 {
3740         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3741         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3742         intel_wakeref_t wakeref;
3743         bool ret;
3744
3745         wakeref = intel_display_power_get_if_enabled(dev_priv,
3746                                                      encoder->power_domain);
3747         if (!wakeref)
3748                 return false;
3749
3750         ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
3751                                     encoder->port, pipe);
3752
3753         intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
3754
3755         return ret;
3756 }
3757
3758 static void intel_dp_get_config(struct intel_encoder *encoder,
3759                                 struct intel_crtc_state *pipe_config)
3760 {
3761         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3762         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3763         u32 tmp, flags = 0;
3764         enum port port = encoder->port;
3765         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
3766
3767         if (encoder->type == INTEL_OUTPUT_EDP)
3768                 pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
3769         else
3770                 pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
3771
3772         tmp = intel_de_read(dev_priv, intel_dp->output_reg);
3773
3774         pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
3775
3776         if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
3777                 u32 trans_dp = intel_de_read(dev_priv,
3778                                              TRANS_DP_CTL(crtc->pipe));
3779
3780                 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
3781                         flags |= DRM_MODE_FLAG_PHSYNC;
3782                 else
3783                         flags |= DRM_MODE_FLAG_NHSYNC;
3784
3785                 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
3786                         flags |= DRM_MODE_FLAG_PVSYNC;
3787                 else
3788                         flags |= DRM_MODE_FLAG_NVSYNC;
3789         } else {
3790                 if (tmp & DP_SYNC_HS_HIGH)
3791                         flags |= DRM_MODE_FLAG_PHSYNC;
3792                 else
3793                         flags |= DRM_MODE_FLAG_NHSYNC;
3794
3795                 if (tmp & DP_SYNC_VS_HIGH)
3796                         flags |= DRM_MODE_FLAG_PVSYNC;
3797                 else
3798                         flags |= DRM_MODE_FLAG_NVSYNC;
3799         }
3800
3801         pipe_config->hw.adjusted_mode.flags |= flags;
3802
3803         if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
3804                 pipe_config->limited_color_range = true;
3805
3806         pipe_config->lane_count =
3807                 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
3808
3809         intel_dp_get_m_n(crtc, pipe_config);
3810
3811         if (port == PORT_A) {
3812                 if ((intel_de_read(dev_priv, DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
3813                         pipe_config->port_clock = 162000;
3814                 else
3815                         pipe_config->port_clock = 270000;
3816         }
3817
3818         pipe_config->hw.adjusted_mode.crtc_clock =
3819                 intel_dotclock_calculate(pipe_config->port_clock,
3820                                          &pipe_config->dp_m_n);
3821
3822         if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
3823             pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
3824                 /*
3825                  * This is a big fat ugly hack.
3826                  *
3827                  * Some machines in UEFI boot mode provide us a VBT that has 18
3828                  * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
3829                  * unknown we fail to light up. Yet the same BIOS boots up with
3830                  * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
3831                  * max, not what it tells us to use.
3832                  *
3833                  * Note: This will still be broken if the eDP panel is not lit
3834                  * up by the BIOS, and thus we can't get the mode at module
3835                  * load.
3836                  */
3837                 drm_dbg_kms(&dev_priv->drm,
3838                             "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
3839                             pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
3840                 dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
3841         }
3842 }
3843
3844 static bool
3845 intel_dp_get_dpcd(struct intel_dp *intel_dp);
3846
3847 /**
3848  * intel_dp_sync_state - sync the encoder state during init/resume
3849  * @encoder: intel encoder to sync
3850  * @crtc_state: state for the CRTC connected to the encoder
3851  *
3852  * Sync any state stored in the encoder wrt. HW state during driver init
3853  * and system resume.
3854  */
3855 void intel_dp_sync_state(struct intel_encoder *encoder,
3856                          const struct intel_crtc_state *crtc_state)
3857 {
3858         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3859
3860         /*
3861          * Don't clobber DPCD if it's been already read out during output
3862          * setup (eDP) or detect.
3863          */
3864         if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3865                 intel_dp_get_dpcd(intel_dp);
3866
3867         intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
3868         intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
3869 }
3870
3871 bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
3872                                     struct intel_crtc_state *crtc_state)
3873 {
3874         struct drm_i915_private *i915 = to_i915(encoder->base.dev);
3875         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3876
3877         /*
3878          * If BIOS has set an unsupported or non-standard link rate for some
3879          * reason force an encoder recompute and full modeset.
3880          */
3881         if (intel_dp_rate_index(intel_dp->source_rates, intel_dp->num_source_rates,
3882                                 crtc_state->port_clock) < 0) {
3883                 drm_dbg_kms(&i915->drm, "Forcing full modeset due to unsupported link rate\n");
3884                 crtc_state->uapi.connectors_changed = true;
3885                 return false;
3886         }
3887
3888         /*
3889          * FIXME hack to force full modeset when DSC is being used.
3890          *
3891          * As long as we do not have full state readout and config comparison
3892          * of crtc_state->dsc, we have no way to ensure reliable fastset.
3893          * Remove once we have readout for DSC.
3894          */
3895         if (crtc_state->dsc.compression_enable) {
3896                 drm_dbg_kms(&i915->drm, "Forcing full modeset due to DSC being enabled\n");
3897                 crtc_state->uapi.mode_changed = true;
3898                 return false;
3899         }
3900
3901         if (CAN_PSR(i915) && intel_dp_is_edp(intel_dp)) {
3902                 drm_dbg_kms(&i915->drm, "Forcing full modeset to compute PSR state\n");
3903                 crtc_state->uapi.mode_changed = true;
3904                 return false;
3905         }
3906
3907         return true;
3908 }
3909
3910 static void intel_disable_dp(struct intel_atomic_state *state,
3911                              struct intel_encoder *encoder,
3912                              const struct intel_crtc_state *old_crtc_state,
3913                              const struct drm_connector_state *old_conn_state)
3914 {
3915         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3916
3917         intel_dp->link_trained = false;
3918
3919         if (old_crtc_state->has_audio)
3920                 intel_audio_codec_disable(encoder,
3921                                           old_crtc_state, old_conn_state);
3922
3923         /* Make sure the panel is off before trying to change the mode. But also
3924          * ensure that we have vdd while we switch off the panel. */
3925         intel_edp_panel_vdd_on(intel_dp);
3926         intel_edp_backlight_off(old_conn_state);
3927         intel_dp_set_power(intel_dp, DP_SET_POWER_D3);
3928         intel_edp_panel_off(intel_dp);
3929         intel_dp->frl.is_trained = false;
3930         intel_dp->frl.trained_rate_gbps = 0;
3931 }
3932
3933 static void g4x_disable_dp(struct intel_atomic_state *state,
3934                            struct intel_encoder *encoder,
3935                            const struct intel_crtc_state *old_crtc_state,
3936                            const struct drm_connector_state *old_conn_state)
3937 {
3938         intel_disable_dp(state, encoder, old_crtc_state, old_conn_state);
3939 }
3940
3941 static void vlv_disable_dp(struct intel_atomic_state *state,
3942                            struct intel_encoder *encoder,
3943                            const struct intel_crtc_state *old_crtc_state,
3944                            const struct drm_connector_state *old_conn_state)
3945 {
3946         intel_disable_dp(state, encoder, old_crtc_state, old_conn_state);
3947 }
3948
3949 static void g4x_post_disable_dp(struct intel_atomic_state *state,
3950                                 struct intel_encoder *encoder,
3951                                 const struct intel_crtc_state *old_crtc_state,
3952                                 const struct drm_connector_state *old_conn_state)
3953 {
3954         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3955         enum port port = encoder->port;
3956
3957         /*
3958          * Bspec does not list a specific disable sequence for g4x DP.
3959          * Follow the ilk+ sequence (disable pipe before the port) for
3960          * g4x DP as it does not suffer from underruns like the normal
3961          * g4x modeset sequence (disable pipe after the port).
3962          */
3963         intel_dp_link_down(encoder, old_crtc_state);
3964
3965         /* Only ilk+ has port A */
3966         if (port == PORT_A)
3967                 ilk_edp_pll_off(intel_dp, old_crtc_state);
3968 }
3969
3970 static void vlv_post_disable_dp(struct intel_atomic_state *state,
3971                                 struct intel_encoder *encoder,
3972                                 const struct intel_crtc_state *old_crtc_state,
3973                                 const struct drm_connector_state *old_conn_state)
3974 {
3975         intel_dp_link_down(encoder, old_crtc_state);
3976 }
3977
3978 static void chv_post_disable_dp(struct intel_atomic_state *state,
3979                                 struct intel_encoder *encoder,
3980                                 const struct intel_crtc_state *old_crtc_state,
3981                                 const struct drm_connector_state *old_conn_state)
3982 {
3983         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3984
3985         intel_dp_link_down(encoder, old_crtc_state);
3986
3987         vlv_dpio_get(dev_priv);
3988
3989         /* Assert data lane reset */
3990         chv_data_lane_soft_reset(encoder, old_crtc_state, true);
3991
3992         vlv_dpio_put(dev_priv);
3993 }
3994
3995 static void
3996 cpt_set_link_train(struct intel_dp *intel_dp,
3997                    const struct intel_crtc_state *crtc_state,
3998                    u8 dp_train_pat)
3999 {
4000         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4001         u32 *DP = &intel_dp->DP;
4002
4003         *DP &= ~DP_LINK_TRAIN_MASK_CPT;
4004
4005         switch (intel_dp_training_pattern_symbol(dp_train_pat)) {
4006         case DP_TRAINING_PATTERN_DISABLE:
4007                 *DP |= DP_LINK_TRAIN_OFF_CPT;
4008                 break;
4009         case DP_TRAINING_PATTERN_1:
4010                 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
4011                 break;
4012         case DP_TRAINING_PATTERN_2:
4013                 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
4014                 break;
4015         case DP_TRAINING_PATTERN_3:
4016                 drm_dbg_kms(&dev_priv->drm,
4017                             "TPS3 not supported, using TPS2 instead\n");
4018                 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
4019                 break;
4020         }
4021
4022         intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
4023         intel_de_posting_read(dev_priv, intel_dp->output_reg);
4024 }
4025
4026 static void intel_dp_get_pcon_dsc_cap(struct intel_dp *intel_dp)
4027 {
4028         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4029
4030         /* Clear the cached register set to avoid using stale values */
4031
4032         memset(intel_dp->pcon_dsc_dpcd, 0, sizeof(intel_dp->pcon_dsc_dpcd));
4033
4034         if (drm_dp_dpcd_read(&intel_dp->aux, DP_PCON_DSC_ENCODER,
4035                              intel_dp->pcon_dsc_dpcd,
4036                              sizeof(intel_dp->pcon_dsc_dpcd)) < 0)
4037                 drm_err(&i915->drm, "Failed to read DPCD register 0x%x\n",
4038                         DP_PCON_DSC_ENCODER);
4039
4040         drm_dbg_kms(&i915->drm, "PCON ENCODER DSC DPCD: %*ph\n",
4041                     (int)sizeof(intel_dp->pcon_dsc_dpcd), intel_dp->pcon_dsc_dpcd);
4042 }
4043
4044 static int intel_dp_pcon_get_frl_mask(u8 frl_bw_mask)
4045 {
4046         int bw_gbps[] = {9, 18, 24, 32, 40, 48};
4047         int i;
4048
4049         for (i = ARRAY_SIZE(bw_gbps) - 1; i >= 0; i--) {
4050                 if (frl_bw_mask & (1 << i))
4051                         return bw_gbps[i];
4052         }
4053         return 0;
4054 }
4055
4056 static int intel_dp_pcon_set_frl_mask(int max_frl)
4057 {
4058         switch (max_frl) {
4059         case 48:
4060                 return DP_PCON_FRL_BW_MASK_48GBPS;
4061         case 40:
4062                 return DP_PCON_FRL_BW_MASK_40GBPS;
4063         case 32:
4064                 return DP_PCON_FRL_BW_MASK_32GBPS;
4065         case 24:
4066                 return DP_PCON_FRL_BW_MASK_24GBPS;
4067         case 18:
4068                 return DP_PCON_FRL_BW_MASK_18GBPS;
4069         case 9:
4070                 return DP_PCON_FRL_BW_MASK_9GBPS;
4071         }
4072
4073         return 0;
4074 }
4075
4076 static int intel_dp_hdmi_sink_max_frl(struct intel_dp *intel_dp)
4077 {
4078         struct intel_connector *intel_connector = intel_dp->attached_connector;
4079         struct drm_connector *connector = &intel_connector->base;
4080         int max_frl_rate;
4081         int max_lanes, rate_per_lane;
4082         int max_dsc_lanes, dsc_rate_per_lane;
4083
4084         max_lanes = connector->display_info.hdmi.max_lanes;
4085         rate_per_lane = connector->display_info.hdmi.max_frl_rate_per_lane;
4086         max_frl_rate = max_lanes * rate_per_lane;
4087
4088         if (connector->display_info.hdmi.dsc_cap.v_1p2) {
4089                 max_dsc_lanes = connector->display_info.hdmi.dsc_cap.max_lanes;
4090                 dsc_rate_per_lane = connector->display_info.hdmi.dsc_cap.max_frl_rate_per_lane;
4091                 if (max_dsc_lanes && dsc_rate_per_lane)
4092                         max_frl_rate = min(max_frl_rate, max_dsc_lanes * dsc_rate_per_lane);
4093         }
4094
4095         return max_frl_rate;
4096 }
4097
4098 static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp)
4099 {
4100 #define PCON_EXTENDED_TRAIN_MODE (1 > 0)
4101 #define PCON_CONCURRENT_MODE (1 > 0)
4102 #define PCON_SEQUENTIAL_MODE !PCON_CONCURRENT_MODE
4103 #define PCON_NORMAL_TRAIN_MODE !PCON_EXTENDED_TRAIN_MODE
4104 #define TIMEOUT_FRL_READY_MS 500
4105 #define TIMEOUT_HDMI_LINK_ACTIVE_MS 1000
4106
4107         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4108         int max_frl_bw, max_pcon_frl_bw, max_edid_frl_bw, ret;
4109         u8 max_frl_bw_mask = 0, frl_trained_mask;
4110         bool is_active;
4111
4112         ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux);
4113         if (ret < 0)
4114                 return ret;
4115
4116         max_pcon_frl_bw = intel_dp->dfp.pcon_max_frl_bw;
4117         drm_dbg(&i915->drm, "PCON max rate = %d Gbps\n", max_pcon_frl_bw);
4118
4119         max_edid_frl_bw = intel_dp_hdmi_sink_max_frl(intel_dp);
4120         drm_dbg(&i915->drm, "Sink max rate from EDID = %d Gbps\n", max_edid_frl_bw);
4121
4122         max_frl_bw = min(max_edid_frl_bw, max_pcon_frl_bw);
4123
4124         if (max_frl_bw <= 0)
4125                 return -EINVAL;
4126
4127         ret = drm_dp_pcon_frl_prepare(&intel_dp->aux, false);
4128         if (ret < 0)
4129                 return ret;
4130         /* Wait for PCON to be FRL Ready */
4131         wait_for(is_active = drm_dp_pcon_is_frl_ready(&intel_dp->aux) == true, TIMEOUT_FRL_READY_MS);
4132
4133         if (!is_active)
4134                 return -ETIMEDOUT;
4135
4136         max_frl_bw_mask = intel_dp_pcon_set_frl_mask(max_frl_bw);
4137         ret = drm_dp_pcon_frl_configure_1(&intel_dp->aux, max_frl_bw, PCON_SEQUENTIAL_MODE);
4138         if (ret < 0)
4139                 return ret;
4140         ret = drm_dp_pcon_frl_configure_2(&intel_dp->aux, max_frl_bw_mask, PCON_NORMAL_TRAIN_MODE);
4141         if (ret < 0)
4142                 return ret;
4143         ret = drm_dp_pcon_frl_enable(&intel_dp->aux);
4144         if (ret < 0)
4145                 return ret;
4146         /*
4147          * Wait for FRL to be completed
4148          * Check if the HDMI Link is up and active.
4149          */
4150         wait_for(is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux) == true, TIMEOUT_HDMI_LINK_ACTIVE_MS);
4151
4152         if (!is_active)
4153                 return -ETIMEDOUT;
4154
4155         /* Verify HDMI Link configuration shows FRL Mode */
4156         if (drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, &frl_trained_mask) !=
4157             DP_PCON_HDMI_MODE_FRL) {
4158                 drm_dbg(&i915->drm, "HDMI couldn't be trained in FRL Mode\n");
4159                 return -EINVAL;
4160         }
4161         drm_dbg(&i915->drm, "MAX_FRL_MASK = %u, FRL_TRAINED_MASK = %u\n", max_frl_bw_mask, frl_trained_mask);
4162
4163         intel_dp->frl.trained_rate_gbps = intel_dp_pcon_get_frl_mask(frl_trained_mask);
4164         intel_dp->frl.is_trained = true;
4165         drm_dbg(&i915->drm, "FRL trained with : %d Gbps\n", intel_dp->frl.trained_rate_gbps);
4166
4167         return 0;
4168 }
4169
4170 static bool intel_dp_is_hdmi_2_1_sink(struct intel_dp *intel_dp)
4171 {
4172         if (drm_dp_is_branch(intel_dp->dpcd) &&
4173             intel_dp->has_hdmi_sink &&
4174             intel_dp_hdmi_sink_max_frl(intel_dp) > 0)
4175                 return true;
4176
4177         return false;
4178 }
4179
4180 void intel_dp_check_frl_training(struct intel_dp *intel_dp)
4181 {
4182         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4183
4184         /* Always go for FRL training if supported */
4185         if (!intel_dp_is_hdmi_2_1_sink(intel_dp) ||
4186             intel_dp->frl.is_trained)
4187                 return;
4188
4189         if (intel_dp_pcon_start_frl_training(intel_dp) < 0) {
4190                 int ret, mode;
4191
4192                 drm_dbg(&dev_priv->drm, "Couldnt set FRL mode, continuing with TMDS mode\n");
4193                 ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux);
4194                 mode = drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, NULL);
4195
4196                 if (ret < 0 || mode != DP_PCON_HDMI_MODE_TMDS)
4197                         drm_dbg(&dev_priv->drm, "Issue with PCON, cannot set TMDS mode\n");
4198         } else {
4199                 drm_dbg(&dev_priv->drm, "FRL training Completed\n");
4200         }
4201 }
4202
4203 static int
4204 intel_dp_pcon_dsc_enc_slice_height(const struct intel_crtc_state *crtc_state)
4205 {
4206         int vactive = crtc_state->hw.adjusted_mode.vdisplay;
4207
4208         return intel_hdmi_dsc_get_slice_height(vactive);
4209 }
4210
4211 static int
4212 intel_dp_pcon_dsc_enc_slices(struct intel_dp *intel_dp,
4213                              const struct intel_crtc_state *crtc_state)
4214 {
4215         struct intel_connector *intel_connector = intel_dp->attached_connector;
4216         struct drm_connector *connector = &intel_connector->base;
4217         int hdmi_throughput = connector->display_info.hdmi.dsc_cap.clk_per_slice;
4218         int hdmi_max_slices = connector->display_info.hdmi.dsc_cap.max_slices;
4219         int pcon_max_slices = drm_dp_pcon_dsc_max_slices(intel_dp->pcon_dsc_dpcd);
4220         int pcon_max_slice_width = drm_dp_pcon_dsc_max_slice_width(intel_dp->pcon_dsc_dpcd);
4221
4222         return intel_hdmi_dsc_get_num_slices(crtc_state, pcon_max_slices,
4223                                              pcon_max_slice_width,
4224                                              hdmi_max_slices, hdmi_throughput);
4225 }
4226
4227 static int
4228 intel_dp_pcon_dsc_enc_bpp(struct intel_dp *intel_dp,
4229                           const struct intel_crtc_state *crtc_state,
4230                           int num_slices, int slice_width)
4231 {
4232         struct intel_connector *intel_connector = intel_dp->attached_connector;
4233         struct drm_connector *connector = &intel_connector->base;
4234         int output_format = crtc_state->output_format;
4235         bool hdmi_all_bpp = connector->display_info.hdmi.dsc_cap.all_bpp;
4236         int pcon_fractional_bpp = drm_dp_pcon_dsc_bpp_incr(intel_dp->pcon_dsc_dpcd);
4237         int hdmi_max_chunk_bytes =
4238                 connector->display_info.hdmi.dsc_cap.total_chunk_kbytes * 1024;
4239
4240         return intel_hdmi_dsc_get_bpp(pcon_fractional_bpp, slice_width,
4241                                       num_slices, output_format, hdmi_all_bpp,
4242                                       hdmi_max_chunk_bytes);
4243 }
4244
4245 void
4246 intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp,
4247                             const struct intel_crtc_state *crtc_state)
4248 {
4249         u8 pps_param[6];
4250         int slice_height;
4251         int slice_width;
4252         int num_slices;
4253         int bits_per_pixel;
4254         int ret;
4255         struct intel_connector *intel_connector = intel_dp->attached_connector;
4256         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4257         struct drm_connector *connector;
4258         bool hdmi_is_dsc_1_2;
4259
4260         if (!intel_dp_is_hdmi_2_1_sink(intel_dp))
4261                 return;
4262
4263         if (!intel_connector)
4264                 return;
4265         connector = &intel_connector->base;
4266         hdmi_is_dsc_1_2 = connector->display_info.hdmi.dsc_cap.v_1p2;
4267
4268         if (!drm_dp_pcon_enc_is_dsc_1_2(intel_dp->pcon_dsc_dpcd) ||
4269             !hdmi_is_dsc_1_2)
4270                 return;
4271
4272         slice_height = intel_dp_pcon_dsc_enc_slice_height(crtc_state);
4273         if (!slice_height)
4274                 return;
4275
4276         num_slices = intel_dp_pcon_dsc_enc_slices(intel_dp, crtc_state);
4277         if (!num_slices)
4278                 return;
4279
4280         slice_width = DIV_ROUND_UP(crtc_state->hw.adjusted_mode.hdisplay,
4281                                    num_slices);
4282
4283         bits_per_pixel = intel_dp_pcon_dsc_enc_bpp(intel_dp, crtc_state,
4284                                                    num_slices, slice_width);
4285         if (!bits_per_pixel)
4286                 return;
4287
4288         pps_param[0] = slice_height & 0xFF;
4289         pps_param[1] = slice_height >> 8;
4290         pps_param[2] = slice_width & 0xFF;
4291         pps_param[3] = slice_width >> 8;
4292         pps_param[4] = bits_per_pixel & 0xFF;
4293         pps_param[5] = (bits_per_pixel >> 8) & 0x3;
4294
4295         ret = drm_dp_pcon_pps_override_param(&intel_dp->aux, pps_param);
4296         if (ret < 0)
4297                 drm_dbg_kms(&i915->drm, "Failed to set pcon DSC\n");
4298 }
4299
4300 static void
4301 g4x_set_link_train(struct intel_dp *intel_dp,
4302                    const struct intel_crtc_state *crtc_state,
4303                    u8 dp_train_pat)
4304 {
4305         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4306         u32 *DP = &intel_dp->DP;
4307
4308         *DP &= ~DP_LINK_TRAIN_MASK;
4309
4310         switch (intel_dp_training_pattern_symbol(dp_train_pat)) {
4311         case DP_TRAINING_PATTERN_DISABLE:
4312                 *DP |= DP_LINK_TRAIN_OFF;
4313                 break;
4314         case DP_TRAINING_PATTERN_1:
4315                 *DP |= DP_LINK_TRAIN_PAT_1;
4316                 break;
4317         case DP_TRAINING_PATTERN_2:
4318                 *DP |= DP_LINK_TRAIN_PAT_2;
4319                 break;
4320         case DP_TRAINING_PATTERN_3:
4321                 drm_dbg_kms(&dev_priv->drm,
4322                             "TPS3 not supported, using TPS2 instead\n");
4323                 *DP |= DP_LINK_TRAIN_PAT_2;
4324                 break;
4325         }
4326
4327         intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
4328         intel_de_posting_read(dev_priv, intel_dp->output_reg);
4329 }
4330
4331 static void intel_dp_enable_port(struct intel_dp *intel_dp,
4332                                  const struct intel_crtc_state *crtc_state)
4333 {
4334         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4335
4336         /* enable with pattern 1 (as per spec) */
4337
4338         intel_dp_program_link_training_pattern(intel_dp, crtc_state,
4339                                                DP_TRAINING_PATTERN_1);
4340
4341         /*
4342          * Magic for VLV/CHV. We _must_ first set up the register
4343          * without actually enabling the port, and then do another
4344          * write to enable the port. Otherwise link training will
4345          * fail when the power sequencer is freshly used for this port.
4346          */
4347         intel_dp->DP |= DP_PORT_EN;
4348         if (crtc_state->has_audio)
4349                 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
4350
4351         intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
4352         intel_de_posting_read(dev_priv, intel_dp->output_reg);
4353 }
4354
4355 void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
4356                                            const struct intel_crtc_state *crtc_state)
4357 {
4358         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4359         u8 tmp;
4360
4361         if (intel_dp->dpcd[DP_DPCD_REV] < 0x13)
4362                 return;
4363
4364         if (!drm_dp_is_branch(intel_dp->dpcd))
4365                 return;
4366
4367         tmp = intel_dp->has_hdmi_sink ?
4368                 DP_HDMI_DVI_OUTPUT_CONFIG : 0;
4369
4370         if (drm_dp_dpcd_writeb(&intel_dp->aux,
4371                                DP_PROTOCOL_CONVERTER_CONTROL_0, tmp) != 1)
4372                 drm_dbg_kms(&i915->drm, "Failed to set protocol converter HDMI mode to %s\n",
4373                             enableddisabled(intel_dp->has_hdmi_sink));
4374
4375         tmp = intel_dp->dfp.ycbcr_444_to_420 ?
4376                 DP_CONVERSION_TO_YCBCR420_ENABLE : 0;
4377
4378         if (drm_dp_dpcd_writeb(&intel_dp->aux,
4379                                DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1)
4380                 drm_dbg_kms(&i915->drm,
4381                             "Failed to set protocol converter YCbCr 4:2:0 conversion mode to %s\n",
4382                             enableddisabled(intel_dp->dfp.ycbcr_444_to_420));
4383
4384         tmp = 0;
4385         if (intel_dp->dfp.rgb_to_ycbcr) {
4386                 bool bt2020, bt709;
4387
4388                 /*
4389                  * FIXME: Currently if userspace selects BT2020 or BT709, but PCON supports only
4390                  * RGB->YCbCr for BT601 colorspace, we go ahead with BT601, as default.
4391                  *
4392                  */
4393                 tmp = DP_CONVERSION_BT601_RGB_YCBCR_ENABLE;
4394
4395                 bt2020 = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd,
4396                                                                    intel_dp->downstream_ports,
4397                                                                    DP_DS_HDMI_BT2020_RGB_YCBCR_CONV);
4398                 bt709 = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd,
4399                                                                   intel_dp->downstream_ports,
4400                                                                   DP_DS_HDMI_BT709_RGB_YCBCR_CONV);
4401                 switch (crtc_state->infoframes.vsc.colorimetry) {
4402                 case DP_COLORIMETRY_BT2020_RGB:
4403                 case DP_COLORIMETRY_BT2020_YCC:
4404                         if (bt2020)
4405                                 tmp = DP_CONVERSION_BT2020_RGB_YCBCR_ENABLE;
4406                         break;
4407                 case DP_COLORIMETRY_BT709_YCC:
4408                 case DP_COLORIMETRY_XVYCC_709:
4409                         if (bt709)
4410                                 tmp = DP_CONVERSION_BT709_RGB_YCBCR_ENABLE;
4411                         break;
4412                 default:
4413                         break;
4414                 }
4415         }
4416
4417         if (drm_dp_pcon_convert_rgb_to_ycbcr(&intel_dp->aux, tmp) < 0)
4418                 drm_dbg_kms(&i915->drm,
4419                            "Failed to set protocol converter RGB->YCbCr conversion mode to %s\n",
4420                            enableddisabled(tmp ? true : false));
4421 }
4422
4423 static void intel_enable_dp(struct intel_atomic_state *state,
4424                             struct intel_encoder *encoder,
4425                             const struct intel_crtc_state *pipe_config,
4426                             const struct drm_connector_state *conn_state)
4427 {
4428         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4429         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4430         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
4431         u32 dp_reg = intel_de_read(dev_priv, intel_dp->output_reg);
4432         enum pipe pipe = crtc->pipe;
4433         intel_wakeref_t wakeref;
4434
4435         if (drm_WARN_ON(&dev_priv->drm, dp_reg & DP_PORT_EN))
4436                 return;
4437
4438         with_pps_lock(intel_dp, wakeref) {
4439                 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4440                         vlv_init_panel_power_sequencer(encoder, pipe_config);
4441
4442                 intel_dp_enable_port(intel_dp, pipe_config);
4443
4444                 edp_panel_vdd_on(intel_dp);
4445                 edp_panel_on(intel_dp);
4446                 edp_panel_vdd_off(intel_dp, true);
4447         }
4448
4449         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
4450                 unsigned int lane_mask = 0x0;
4451
4452                 if (IS_CHERRYVIEW(dev_priv))
4453                         lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
4454
4455                 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
4456                                     lane_mask);
4457         }
4458
4459         intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
4460         intel_dp_configure_protocol_converter(intel_dp, pipe_config);
4461         intel_dp_check_frl_training(intel_dp);
4462         intel_dp_pcon_dsc_configure(intel_dp, pipe_config);
4463         intel_dp_start_link_train(intel_dp, pipe_config);
4464         intel_dp_stop_link_train(intel_dp, pipe_config);
4465
4466         if (pipe_config->has_audio) {
4467                 drm_dbg(&dev_priv->drm, "Enabling DP audio on pipe %c\n",
4468                         pipe_name(pipe));
4469                 intel_audio_codec_enable(encoder, pipe_config, conn_state);
4470         }
4471 }
4472
4473 static void g4x_enable_dp(struct intel_atomic_state *state,
4474                           struct intel_encoder *encoder,
4475                           const struct intel_crtc_state *pipe_config,
4476                           const struct drm_connector_state *conn_state)
4477 {
4478         intel_enable_dp(state, encoder, pipe_config, conn_state);
4479         intel_edp_backlight_on(pipe_config, conn_state);
4480 }
4481
4482 static void vlv_enable_dp(struct intel_atomic_state *state,
4483                           struct intel_encoder *encoder,
4484                           const struct intel_crtc_state *pipe_config,
4485                           const struct drm_connector_state *conn_state)
4486 {
4487         intel_edp_backlight_on(pipe_config, conn_state);
4488 }
4489
4490 static void g4x_pre_enable_dp(struct intel_atomic_state *state,
4491                               struct intel_encoder *encoder,
4492                               const struct intel_crtc_state *pipe_config,
4493                               const struct drm_connector_state *conn_state)
4494 {
4495         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4496         enum port port = encoder->port;
4497
4498         intel_dp_prepare(encoder, pipe_config);
4499
4500         /* Only ilk+ has port A */
4501         if (port == PORT_A)
4502                 ilk_edp_pll_on(intel_dp, pipe_config);
4503 }
4504
4505 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
4506 {
4507         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4508         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
4509         enum pipe pipe = intel_dp->pps_pipe;
4510         i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
4511
4512         drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE);
4513
4514         if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
4515                 return;
4516
4517         edp_panel_vdd_off_sync(intel_dp);
4518
4519         /*
4520          * VLV seems to get confused when multiple power sequencers
4521          * have the same port selected (even if only one has power/vdd
4522          * enabled). The failure manifests as vlv_wait_port_ready() failing
4523          * CHV on the other hand doesn't seem to mind having the same port
4524          * selected in multiple power sequencers, but let's clear the
4525          * port select always when logically disconnecting a power sequencer
4526          * from a port.
4527          */
4528         drm_dbg_kms(&dev_priv->drm,
4529                     "detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
4530                     pipe_name(pipe), dig_port->base.base.base.id,
4531                     dig_port->base.base.name);
4532         intel_de_write(dev_priv, pp_on_reg, 0);
4533         intel_de_posting_read(dev_priv, pp_on_reg);
4534
4535         intel_dp->pps_pipe = INVALID_PIPE;
4536 }
4537
4538 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
4539                                       enum pipe pipe)
4540 {
4541         struct intel_encoder *encoder;
4542
4543         lockdep_assert_held(&dev_priv->pps_mutex);
4544
4545         for_each_intel_dp(&dev_priv->drm, encoder) {
4546                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4547
4548                 drm_WARN(&dev_priv->drm, intel_dp->active_pipe == pipe,
4549                          "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
4550                          pipe_name(pipe), encoder->base.base.id,
4551                          encoder->base.name);
4552
4553                 if (intel_dp->pps_pipe != pipe)
4554                         continue;
4555
4556                 drm_dbg_kms(&dev_priv->drm,
4557                             "stealing pipe %c power sequencer from [ENCODER:%d:%s]\n",
4558                             pipe_name(pipe), encoder->base.base.id,
4559                             encoder->base.name);
4560
4561                 /* make sure vdd is off before we steal it */
4562                 vlv_detach_power_sequencer(intel_dp);
4563         }
4564 }
4565
4566 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
4567                                            const struct intel_crtc_state *crtc_state)
4568 {
4569         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4570         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4571         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4572
4573         lockdep_assert_held(&dev_priv->pps_mutex);
4574
4575         drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE);
4576
4577         if (intel_dp->pps_pipe != INVALID_PIPE &&
4578             intel_dp->pps_pipe != crtc->pipe) {
4579                 /*
4580                  * If another power sequencer was being used on this
4581                  * port previously make sure to turn off vdd there while
4582                  * we still have control of it.
4583                  */
4584                 vlv_detach_power_sequencer(intel_dp);
4585         }
4586
4587         /*
4588          * We may be stealing the power
4589          * sequencer from another port.
4590          */
4591         vlv_steal_power_sequencer(dev_priv, crtc->pipe);
4592
4593         intel_dp->active_pipe = crtc->pipe;
4594
4595         if (!intel_dp_is_edp(intel_dp))
4596                 return;
4597
4598         /* now it's all ours */
4599         intel_dp->pps_pipe = crtc->pipe;
4600
4601         drm_dbg_kms(&dev_priv->drm,
4602                     "initializing pipe %c power sequencer for [ENCODER:%d:%s]\n",
4603                     pipe_name(intel_dp->pps_pipe), encoder->base.base.id,
4604                     encoder->base.name);
4605
4606         /* init power sequencer on this pipe and port */
4607         intel_dp_init_panel_power_sequencer(intel_dp);
4608         intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
4609 }
4610
4611 static void vlv_pre_enable_dp(struct intel_atomic_state *state,
4612                               struct intel_encoder *encoder,
4613                               const struct intel_crtc_state *pipe_config,
4614                               const struct drm_connector_state *conn_state)
4615 {
4616         vlv_phy_pre_encoder_enable(encoder, pipe_config);
4617
4618         intel_enable_dp(state, encoder, pipe_config, conn_state);
4619 }
4620
4621 static void vlv_dp_pre_pll_enable(struct intel_atomic_state *state,
4622                                   struct intel_encoder *encoder,
4623                                   const struct intel_crtc_state *pipe_config,
4624                                   const struct drm_connector_state *conn_state)
4625 {
4626         intel_dp_prepare(encoder, pipe_config);
4627
4628         vlv_phy_pre_pll_enable(encoder, pipe_config);
4629 }
4630
4631 static void chv_pre_enable_dp(struct intel_atomic_state *state,
4632                               struct intel_encoder *encoder,
4633                               const struct intel_crtc_state *pipe_config,
4634                               const struct drm_connector_state *conn_state)
4635 {
4636         chv_phy_pre_encoder_enable(encoder, pipe_config);
4637
4638         intel_enable_dp(state, encoder, pipe_config, conn_state);
4639
4640         /* Second common lane will stay alive on its own now */
4641         chv_phy_release_cl2_override(encoder);
4642 }
4643
4644 static void chv_dp_pre_pll_enable(struct intel_atomic_state *state,
4645                                   struct intel_encoder *encoder,
4646                                   const struct intel_crtc_state *pipe_config,
4647                                   const struct drm_connector_state *conn_state)
4648 {
4649         intel_dp_prepare(encoder, pipe_config);
4650
4651         chv_phy_pre_pll_enable(encoder, pipe_config);
4652 }
4653
4654 static void chv_dp_post_pll_disable(struct intel_atomic_state *state,
4655                                     struct intel_encoder *encoder,
4656                                     const struct intel_crtc_state *old_crtc_state,
4657                                     const struct drm_connector_state *old_conn_state)
4658 {
4659         chv_phy_post_pll_disable(encoder, old_crtc_state);
4660 }
4661
4662 static u8 intel_dp_voltage_max_2(struct intel_dp *intel_dp,
4663                                  const struct intel_crtc_state *crtc_state)
4664 {
4665         return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
4666 }
4667
4668 static u8 intel_dp_voltage_max_3(struct intel_dp *intel_dp,
4669                                  const struct intel_crtc_state *crtc_state)
4670 {
4671         return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
4672 }
4673
4674 static u8 intel_dp_preemph_max_2(struct intel_dp *intel_dp)
4675 {
4676         return DP_TRAIN_PRE_EMPH_LEVEL_2;
4677 }
4678
4679 static u8 intel_dp_preemph_max_3(struct intel_dp *intel_dp)
4680 {
4681         return DP_TRAIN_PRE_EMPH_LEVEL_3;
4682 }
4683
4684 static void vlv_set_signal_levels(struct intel_dp *intel_dp,
4685                                   const struct intel_crtc_state *crtc_state)
4686 {
4687         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
4688         unsigned long demph_reg_value, preemph_reg_value,
4689                 uniqtranscale_reg_value;
4690         u8 train_set = intel_dp->train_set[0];
4691
4692         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
4693         case DP_TRAIN_PRE_EMPH_LEVEL_0:
4694                 preemph_reg_value = 0x0004000;
4695                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4696                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4697                         demph_reg_value = 0x2B405555;
4698                         uniqtranscale_reg_value = 0x552AB83A;
4699                         break;
4700                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4701                         demph_reg_value = 0x2B404040;
4702                         uniqtranscale_reg_value = 0x5548B83A;
4703                         break;
4704                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
4705                         demph_reg_value = 0x2B245555;
4706                         uniqtranscale_reg_value = 0x5560B83A;
4707                         break;
4708                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
4709                         demph_reg_value = 0x2B405555;
4710                         uniqtranscale_reg_value = 0x5598DA3A;
4711                         break;
4712                 default:
4713                         return;
4714                 }
4715                 break;
4716         case DP_TRAIN_PRE_EMPH_LEVEL_1:
4717                 preemph_reg_value = 0x0002000;
4718                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4719                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4720                         demph_reg_value = 0x2B404040;
4721                         uniqtranscale_reg_value = 0x5552B83A;
4722                         break;
4723                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4724                         demph_reg_value = 0x2B404848;
4725                         uniqtranscale_reg_value = 0x5580B83A;
4726                         break;
4727                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
4728                         demph_reg_value = 0x2B404040;
4729                         uniqtranscale_reg_value = 0x55ADDA3A;
4730                         break;
4731                 default:
4732                         return;
4733                 }
4734                 break;
4735         case DP_TRAIN_PRE_EMPH_LEVEL_2:
4736                 preemph_reg_value = 0x0000000;
4737                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4738                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4739                         demph_reg_value = 0x2B305555;
4740                         uniqtranscale_reg_value = 0x5570B83A;
4741                         break;
4742                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4743                         demph_reg_value = 0x2B2B4040;
4744                         uniqtranscale_reg_value = 0x55ADDA3A;
4745                         break;
4746                 default:
4747                         return;
4748                 }
4749                 break;
4750         case DP_TRAIN_PRE_EMPH_LEVEL_3:
4751                 preemph_reg_value = 0x0006000;
4752                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4753                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4754                         demph_reg_value = 0x1B405555;
4755                         uniqtranscale_reg_value = 0x55ADDA3A;
4756                         break;
4757                 default:
4758                         return;
4759                 }
4760                 break;
4761         default:
4762                 return;
4763         }
4764
4765         vlv_set_phy_signal_level(encoder, crtc_state,
4766                                  demph_reg_value, preemph_reg_value,
4767                                  uniqtranscale_reg_value, 0);
4768 }
4769
4770 static void chv_set_signal_levels(struct intel_dp *intel_dp,
4771                                   const struct intel_crtc_state *crtc_state)
4772 {
4773         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
4774         u32 deemph_reg_value, margin_reg_value;
4775         bool uniq_trans_scale = false;
4776         u8 train_set = intel_dp->train_set[0];
4777
4778         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
4779         case DP_TRAIN_PRE_EMPH_LEVEL_0:
4780                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4781                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4782                         deemph_reg_value = 128;
4783                         margin_reg_value = 52;
4784                         break;
4785                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4786                         deemph_reg_value = 128;
4787                         margin_reg_value = 77;
4788                         break;
4789                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
4790                         deemph_reg_value = 128;
4791                         margin_reg_value = 102;
4792                         break;
4793                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
4794                         deemph_reg_value = 128;
4795                         margin_reg_value = 154;
4796                         uniq_trans_scale = true;
4797                         break;
4798                 default:
4799                         return;
4800                 }
4801                 break;
4802         case DP_TRAIN_PRE_EMPH_LEVEL_1:
4803                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4804                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4805                         deemph_reg_value = 85;
4806                         margin_reg_value = 78;
4807                         break;
4808                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4809                         deemph_reg_value = 85;
4810                         margin_reg_value = 116;
4811                         break;
4812                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
4813                         deemph_reg_value = 85;
4814                         margin_reg_value = 154;
4815                         break;
4816                 default:
4817                         return;
4818                 }
4819                 break;
4820         case DP_TRAIN_PRE_EMPH_LEVEL_2:
4821                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4822                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4823                         deemph_reg_value = 64;
4824                         margin_reg_value = 104;
4825                         break;
4826                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4827                         deemph_reg_value = 64;
4828                         margin_reg_value = 154;
4829                         break;
4830                 default:
4831                         return;
4832                 }
4833                 break;
4834         case DP_TRAIN_PRE_EMPH_LEVEL_3:
4835                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4836                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4837                         deemph_reg_value = 43;
4838                         margin_reg_value = 154;
4839                         break;
4840                 default:
4841                         return;
4842                 }
4843                 break;
4844         default:
4845                 return;
4846         }
4847
4848         chv_set_phy_signal_level(encoder, crtc_state,
4849                                  deemph_reg_value, margin_reg_value,
4850                                  uniq_trans_scale);
4851 }
4852
4853 static u32 g4x_signal_levels(u8 train_set)
4854 {
4855         u32 signal_levels = 0;
4856
4857         switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4858         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4859         default:
4860                 signal_levels |= DP_VOLTAGE_0_4;
4861                 break;
4862         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4863                 signal_levels |= DP_VOLTAGE_0_6;
4864                 break;
4865         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
4866                 signal_levels |= DP_VOLTAGE_0_8;
4867                 break;
4868         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
4869                 signal_levels |= DP_VOLTAGE_1_2;
4870                 break;
4871         }
4872         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
4873         case DP_TRAIN_PRE_EMPH_LEVEL_0:
4874         default:
4875                 signal_levels |= DP_PRE_EMPHASIS_0;
4876                 break;
4877         case DP_TRAIN_PRE_EMPH_LEVEL_1:
4878                 signal_levels |= DP_PRE_EMPHASIS_3_5;
4879                 break;
4880         case DP_TRAIN_PRE_EMPH_LEVEL_2:
4881                 signal_levels |= DP_PRE_EMPHASIS_6;
4882                 break;
4883         case DP_TRAIN_PRE_EMPH_LEVEL_3:
4884                 signal_levels |= DP_PRE_EMPHASIS_9_5;
4885                 break;
4886         }
4887         return signal_levels;
4888 }
4889
4890 static void
4891 g4x_set_signal_levels(struct intel_dp *intel_dp,
4892                       const struct intel_crtc_state *crtc_state)
4893 {
4894         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4895         u8 train_set = intel_dp->train_set[0];
4896         u32 signal_levels;
4897
4898         signal_levels = g4x_signal_levels(train_set);
4899
4900         drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
4901                     signal_levels);
4902
4903         intel_dp->DP &= ~(DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK);
4904         intel_dp->DP |= signal_levels;
4905
4906         intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
4907         intel_de_posting_read(dev_priv, intel_dp->output_reg);
4908 }
4909
4910 /* SNB CPU eDP voltage swing and pre-emphasis control */
4911 static u32 snb_cpu_edp_signal_levels(u8 train_set)
4912 {
4913         u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
4914                                         DP_TRAIN_PRE_EMPHASIS_MASK);
4915
4916         switch (signal_levels) {
4917         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
4918         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
4919                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
4920         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
4921                 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
4922         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
4923         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
4924                 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
4925         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
4926         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
4927                 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
4928         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
4929         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
4930                 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
4931         default:
4932                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
4933                               "0x%x\n", signal_levels);
4934                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
4935         }
4936 }
4937
4938 static void
4939 snb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp,
4940                               const struct intel_crtc_state *crtc_state)
4941 {
4942         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4943         u8 train_set = intel_dp->train_set[0];
4944         u32 signal_levels;
4945
4946         signal_levels = snb_cpu_edp_signal_levels(train_set);
4947
4948         drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
4949                     signal_levels);
4950
4951         intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
4952         intel_dp->DP |= signal_levels;
4953
4954         intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
4955         intel_de_posting_read(dev_priv, intel_dp->output_reg);
4956 }
4957
4958 /* IVB CPU eDP voltage swing and pre-emphasis control */
4959 static u32 ivb_cpu_edp_signal_levels(u8 train_set)
4960 {
4961         u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
4962                                         DP_TRAIN_PRE_EMPHASIS_MASK);
4963
4964         switch (signal_levels) {
4965         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
4966                 return EDP_LINK_TRAIN_400MV_0DB_IVB;
4967         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
4968                 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
4969         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
4970         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
4971                 return EDP_LINK_TRAIN_400MV_6DB_IVB;
4972
4973         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
4974                 return EDP_LINK_TRAIN_600MV_0DB_IVB;
4975         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
4976                 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
4977
4978         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
4979                 return EDP_LINK_TRAIN_800MV_0DB_IVB;
4980         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
4981                 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
4982
4983         default:
4984                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
4985                               "0x%x\n", signal_levels);
4986                 return EDP_LINK_TRAIN_500MV_0DB_IVB;
4987         }
4988 }
4989
4990 static void
4991 ivb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp,
4992                               const struct intel_crtc_state *crtc_state)
4993 {
4994         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4995         u8 train_set = intel_dp->train_set[0];
4996         u32 signal_levels;
4997
4998         signal_levels = ivb_cpu_edp_signal_levels(train_set);
4999
5000         drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
5001                     signal_levels);
5002
5003         intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
5004         intel_dp->DP |= signal_levels;
5005
5006         intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
5007         intel_de_posting_read(dev_priv, intel_dp->output_reg);
5008 }
5009
5010 void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
5011                                 const struct intel_crtc_state *crtc_state)
5012 {
5013         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
5014         u8 train_set = intel_dp->train_set[0];
5015
5016         drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s\n",
5017                     train_set & DP_TRAIN_VOLTAGE_SWING_MASK,
5018                     train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : "");
5019         drm_dbg_kms(&dev_priv->drm, "Using pre-emphasis level %d%s\n",
5020                     (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
5021                     DP_TRAIN_PRE_EMPHASIS_SHIFT,
5022                     train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ?
5023                     " (max)" : "");
5024
5025         intel_dp->set_signal_levels(intel_dp, crtc_state);
5026 }
5027
5028 void
5029 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
5030                                        const struct intel_crtc_state *crtc_state,
5031                                        u8 dp_train_pat)
5032 {
5033         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
5034
5035         if ((intel_dp_training_pattern_symbol(dp_train_pat)) !=
5036             DP_TRAINING_PATTERN_DISABLE)
5037                 drm_dbg_kms(&dev_priv->drm,
5038                             "Using DP training pattern TPS%d\n",
5039                             intel_dp_training_pattern_symbol(dp_train_pat));
5040
5041         intel_dp->set_link_train(intel_dp, crtc_state, dp_train_pat);
5042 }
5043
5044 static void
5045 intel_dp_link_down(struct intel_encoder *encoder,
5046                    const struct intel_crtc_state *old_crtc_state)
5047 {
5048         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5049         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
5050         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
5051         enum port port = encoder->port;
5052         u32 DP = intel_dp->DP;
5053
5054         if (drm_WARN_ON(&dev_priv->drm,
5055                         (intel_de_read(dev_priv, intel_dp->output_reg) &
5056                          DP_PORT_EN) == 0))
5057                 return;
5058
5059         drm_dbg_kms(&dev_priv->drm, "\n");
5060
5061         if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
5062             (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
5063                 DP &= ~DP_LINK_TRAIN_MASK_CPT;
5064                 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
5065         } else {
5066                 DP &= ~DP_LINK_TRAIN_MASK;
5067                 DP |= DP_LINK_TRAIN_PAT_IDLE;
5068         }
5069         intel_de_write(dev_priv, intel_dp->output_reg, DP);
5070         intel_de_posting_read(dev_priv, intel_dp->output_reg);
5071
5072         DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
5073         intel_de_write(dev_priv, intel_dp->output_reg, DP);
5074         intel_de_posting_read(dev_priv, intel_dp->output_reg);
5075
5076         /*
5077          * HW workaround for IBX, we need to move the port
5078          * to transcoder A after disabling it to allow the
5079          * matching HDMI port to be enabled on transcoder A.
5080          */
5081         if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
5082                 /*
5083                  * We get CPU/PCH FIFO underruns on the other pipe when
5084                  * doing the workaround. Sweep them under the rug.
5085                  */
5086                 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
5087                 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
5088
5089                 /* always enable with pattern 1 (as per spec) */
5090                 DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
5091                 DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) |
5092                         DP_LINK_TRAIN_PAT_1;
5093                 intel_de_write(dev_priv, intel_dp->output_reg, DP);
5094                 intel_de_posting_read(dev_priv, intel_dp->output_reg);
5095
5096                 DP &= ~DP_PORT_EN;
5097                 intel_de_write(dev_priv, intel_dp->output_reg, DP);
5098                 intel_de_posting_read(dev_priv, intel_dp->output_reg);
5099
5100                 intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
5101                 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
5102                 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
5103         }
5104
5105         msleep(intel_dp->panel_power_down_delay);
5106
5107         intel_dp->DP = DP;
5108
5109         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
5110                 intel_wakeref_t wakeref;
5111
5112                 with_pps_lock(intel_dp, wakeref)
5113                         intel_dp->active_pipe = INVALID_PIPE;
5114         }
5115 }
5116
5117 bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
5118 {
5119         u8 dprx = 0;
5120
5121         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
5122                               &dprx) != 1)
5123                 return false;
5124         return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
5125 }
5126
5127 static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
5128 {
5129         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5130
5131         /*
5132          * Clear the cached register set to avoid using stale values
5133          * for the sinks that do not support DSC.
5134          */
5135         memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
5136
5137         /* Clear fec_capable to avoid using stale values */
5138         intel_dp->fec_capable = 0;
5139
5140         /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */
5141         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 ||
5142             intel_dp->edp_dpcd[0] >= DP_EDP_14) {
5143                 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT,
5144                                      intel_dp->dsc_dpcd,
5145                                      sizeof(intel_dp->dsc_dpcd)) < 0)
5146                         drm_err(&i915->drm,
5147                                 "Failed to read DPCD register 0x%x\n",
5148                                 DP_DSC_SUPPORT);
5149
5150                 drm_dbg_kms(&i915->drm, "DSC DPCD: %*ph\n",
5151                             (int)sizeof(intel_dp->dsc_dpcd),
5152                             intel_dp->dsc_dpcd);
5153
5154                 /* FEC is supported only on DP 1.4 */
5155                 if (!intel_dp_is_edp(intel_dp) &&
5156                     drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY,
5157                                       &intel_dp->fec_capable) < 0)
5158                         drm_err(&i915->drm,
5159                                 "Failed to read FEC DPCD register\n");
5160
5161                 drm_dbg_kms(&i915->drm, "FEC CAPABILITY: %x\n",
5162                             intel_dp->fec_capable);
5163         }
5164 }
5165
5166 static bool
5167 intel_edp_init_dpcd(struct intel_dp *intel_dp)
5168 {
5169         struct drm_i915_private *dev_priv =
5170                 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
5171
5172         /* this function is meant to be called only once */
5173         drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0);
5174
5175         if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd) != 0)
5176                 return false;
5177
5178         drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
5179                          drm_dp_is_branch(intel_dp->dpcd));
5180
5181         /*
5182          * Read the eDP display control registers.
5183          *
5184          * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
5185          * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
5186          * set, but require eDP 1.4+ detection (e.g. for supported link rates
5187          * method). The display control registers should read zero if they're
5188          * not supported anyway.
5189          */
5190         if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
5191                              intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
5192                              sizeof(intel_dp->edp_dpcd))
5193                 drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n",
5194                             (int)sizeof(intel_dp->edp_dpcd),
5195                             intel_dp->edp_dpcd);
5196
5197         /*
5198          * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
5199          * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
5200          */
5201         intel_psr_init_dpcd(intel_dp);
5202
5203         /* Read the eDP 1.4+ supported link rates. */
5204         if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
5205                 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
5206                 int i;
5207
5208                 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
5209                                 sink_rates, sizeof(sink_rates));
5210
5211                 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
5212                         int val = le16_to_cpu(sink_rates[i]);
5213
5214                         if (val == 0)
5215                                 break;
5216
5217                         /* Value read multiplied by 200kHz gives the per-lane
5218                          * link rate in kHz. The source rates are, however,
5219                          * stored in terms of LS_Clk kHz. The full conversion
5220                          * back to symbols is
5221                          * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
5222                          */
5223                         intel_dp->sink_rates[i] = (val * 200) / 10;
5224                 }
5225                 intel_dp->num_sink_rates = i;
5226         }
5227
5228         /*
5229          * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available,
5230          * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise.
5231          */
5232         if (intel_dp->num_sink_rates)
5233                 intel_dp->use_rate_select = true;
5234         else
5235                 intel_dp_set_sink_rates(intel_dp);
5236
5237         intel_dp_set_common_rates(intel_dp);
5238
5239         /* Read the eDP DSC DPCD registers */
5240         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
5241                 intel_dp_get_dsc_sink_cap(intel_dp);
5242
5243         /*
5244          * If needed, program our source OUI so we can make various Intel-specific AUX services
5245          * available (such as HDR backlight controls)
5246          */
5247         intel_edp_init_source_oui(intel_dp, true);
5248
5249         return true;
5250 }
5251
5252 static bool
5253 intel_dp_has_sink_count(struct intel_dp *intel_dp)
5254 {
5255         if (!intel_dp->attached_connector)
5256                 return false;
5257
5258         return drm_dp_read_sink_count_cap(&intel_dp->attached_connector->base,
5259                                           intel_dp->dpcd,
5260                                           &intel_dp->desc);
5261 }
5262
5263 static bool
5264 intel_dp_get_dpcd(struct intel_dp *intel_dp)
5265 {
5266         int ret;
5267
5268         intel_dp_lttpr_init(intel_dp);
5269
5270         if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd))
5271                 return false;
5272
5273         /*
5274          * Don't clobber cached eDP rates. Also skip re-reading
5275          * the OUI/ID since we know it won't change.
5276          */
5277         if (!intel_dp_is_edp(intel_dp)) {
5278                 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
5279                                  drm_dp_is_branch(intel_dp->dpcd));
5280
5281                 intel_dp_set_sink_rates(intel_dp);
5282                 intel_dp_set_common_rates(intel_dp);
5283         }
5284
5285         if (intel_dp_has_sink_count(intel_dp)) {
5286                 ret = drm_dp_read_sink_count(&intel_dp->aux);
5287                 if (ret < 0)
5288                         return false;
5289
5290                 /*
5291                  * Sink count can change between short pulse hpd hence
5292                  * a member variable in intel_dp will track any changes
5293                  * between short pulse interrupts.
5294                  */
5295                 intel_dp->sink_count = ret;
5296
5297                 /*
5298                  * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
5299                  * a dongle is present but no display. Unless we require to know
5300                  * if a dongle is present or not, we don't need to update
5301                  * downstream port information. So, an early return here saves
5302                  * time from performing other operations which are not required.
5303                  */
5304                 if (!intel_dp->sink_count)
5305                         return false;
5306         }
5307
5308         return drm_dp_read_downstream_info(&intel_dp->aux, intel_dp->dpcd,
5309                                            intel_dp->downstream_ports) == 0;
5310 }
5311
5312 static bool
5313 intel_dp_can_mst(struct intel_dp *intel_dp)
5314 {
5315         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5316
5317         return i915->params.enable_dp_mst &&
5318                 intel_dp->can_mst &&
5319                 drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd);
5320 }
5321
5322 static void
5323 intel_dp_configure_mst(struct intel_dp *intel_dp)
5324 {
5325         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5326         struct intel_encoder *encoder =
5327                 &dp_to_dig_port(intel_dp)->base;
5328         bool sink_can_mst = drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd);
5329
5330         drm_dbg_kms(&i915->drm,
5331                     "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n",
5332                     encoder->base.base.id, encoder->base.name,
5333                     yesno(intel_dp->can_mst), yesno(sink_can_mst),
5334                     yesno(i915->params.enable_dp_mst));
5335
5336         if (!intel_dp->can_mst)
5337                 return;
5338
5339         intel_dp->is_mst = sink_can_mst &&
5340                 i915->params.enable_dp_mst;
5341
5342         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
5343                                         intel_dp->is_mst);
5344 }
5345
5346 static bool
5347 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
5348 {
5349         return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI,
5350                                 sink_irq_vector, DP_DPRX_ESI_LEN) ==
5351                 DP_DPRX_ESI_LEN;
5352 }
5353
5354 bool
5355 intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
5356                        const struct drm_connector_state *conn_state)
5357 {
5358         /*
5359          * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication
5360          * of Color Encoding Format and Content Color Gamut], in order to
5361          * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP.
5362          */
5363         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
5364                 return true;
5365
5366         switch (conn_state->colorspace) {
5367         case DRM_MODE_COLORIMETRY_SYCC_601:
5368         case DRM_MODE_COLORIMETRY_OPYCC_601:
5369         case DRM_MODE_COLORIMETRY_BT2020_YCC:
5370         case DRM_MODE_COLORIMETRY_BT2020_RGB:
5371         case DRM_MODE_COLORIMETRY_BT2020_CYCC:
5372                 return true;
5373         default:
5374                 break;
5375         }
5376
5377         return false;
5378 }
5379
5380 static ssize_t intel_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc,
5381                                      struct dp_sdp *sdp, size_t size)
5382 {
5383         size_t length = sizeof(struct dp_sdp);
5384
5385         if (size < length)
5386                 return -ENOSPC;
5387
5388         memset(sdp, 0, size);
5389
5390         /*
5391          * Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119
5392          * VSC SDP Header Bytes
5393          */
5394         sdp->sdp_header.HB0 = 0; /* Secondary-Data Packet ID = 0 */
5395         sdp->sdp_header.HB1 = vsc->sdp_type; /* Secondary-data Packet Type */
5396         sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */
5397         sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */
5398
5399         /*
5400          * Only revision 0x5 supports Pixel Encoding/Colorimetry Format as
5401          * per DP 1.4a spec.
5402          */
5403         if (vsc->revision != 0x5)
5404                 goto out;
5405
5406         /* VSC SDP Payload for DB16 through DB18 */
5407         /* Pixel Encoding and Colorimetry Formats  */
5408         sdp->db[16] = (vsc->pixelformat & 0xf) << 4; /* DB16[7:4] */
5409         sdp->db[16] |= vsc->colorimetry & 0xf; /* DB16[3:0] */
5410
5411         switch (vsc->bpc) {
5412         case 6:
5413                 /* 6bpc: 0x0 */
5414                 break;
5415         case 8:
5416                 sdp->db[17] = 0x1; /* DB17[3:0] */
5417                 break;
5418         case 10:
5419                 sdp->db[17] = 0x2;
5420                 break;
5421         case 12:
5422                 sdp->db[17] = 0x3;
5423                 break;
5424         case 16:
5425                 sdp->db[17] = 0x4;
5426                 break;
5427         default:
5428                 MISSING_CASE(vsc->bpc);
5429                 break;
5430         }
5431         /* Dynamic Range and Component Bit Depth */
5432         if (vsc->dynamic_range == DP_DYNAMIC_RANGE_CTA)
5433                 sdp->db[17] |= 0x80;  /* DB17[7] */
5434
5435         /* Content Type */
5436         sdp->db[18] = vsc->content_type & 0x7;
5437
5438 out:
5439         return length;
5440 }
5441
5442 static ssize_t
5443 intel_dp_hdr_metadata_infoframe_sdp_pack(const struct hdmi_drm_infoframe *drm_infoframe,
5444                                          struct dp_sdp *sdp,
5445                                          size_t size)
5446 {
5447         size_t length = sizeof(struct dp_sdp);
5448         const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE;
5449         unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE];
5450         ssize_t len;
5451
5452         if (size < length)
5453                 return -ENOSPC;
5454
5455         memset(sdp, 0, size);
5456
5457         len = hdmi_drm_infoframe_pack_only(drm_infoframe, buf, sizeof(buf));
5458         if (len < 0) {
5459                 DRM_DEBUG_KMS("buffer size is smaller than hdr metadata infoframe\n");
5460                 return -ENOSPC;
5461         }
5462
5463         if (len != infoframe_size) {
5464                 DRM_DEBUG_KMS("wrong static hdr metadata size\n");
5465                 return -ENOSPC;
5466         }
5467
5468         /*
5469          * Set up the infoframe sdp packet for HDR static metadata.
5470          * Prepare VSC Header for SU as per DP 1.4a spec,
5471          * Table 2-100 and Table 2-101
5472          */
5473
5474         /* Secondary-Data Packet ID, 00h for non-Audio INFOFRAME */
5475         sdp->sdp_header.HB0 = 0;
5476         /*
5477          * Packet Type 80h + Non-audio INFOFRAME Type value
5478          * HDMI_INFOFRAME_TYPE_DRM: 0x87
5479          * - 80h + Non-audio INFOFRAME Type value
5480          * - InfoFrame Type: 0x07
5481          *    [CTA-861-G Table-42 Dynamic Range and Mastering InfoFrame]
5482          */
5483         sdp->sdp_header.HB1 = drm_infoframe->type;
5484         /*
5485          * Least Significant Eight Bits of (Data Byte Count – 1)
5486          * infoframe_size - 1
5487          */
5488         sdp->sdp_header.HB2 = 0x1D;
5489         /* INFOFRAME SDP Version Number */
5490         sdp->sdp_header.HB3 = (0x13 << 2);
5491         /* CTA Header Byte 2 (INFOFRAME Version Number) */
5492         sdp->db[0] = drm_infoframe->version;
5493         /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */
5494         sdp->db[1] = drm_infoframe->length;
5495         /*
5496          * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after
5497          * HDMI_INFOFRAME_HEADER_SIZE
5498          */
5499         BUILD_BUG_ON(sizeof(sdp->db) < HDMI_DRM_INFOFRAME_SIZE + 2);
5500         memcpy(&sdp->db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE],
5501                HDMI_DRM_INFOFRAME_SIZE);
5502
5503         /*
5504          * Size of DP infoframe sdp packet for HDR static metadata consists of
5505          * - DP SDP Header(struct dp_sdp_header): 4 bytes
5506          * - Two Data Blocks: 2 bytes
5507          *    CTA Header Byte2 (INFOFRAME Version Number)
5508          *    CTA Header Byte3 (Length of INFOFRAME)
5509          * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes
5510          *
5511          * Prior to GEN11's GMP register size is identical to DP HDR static metadata
5512          * infoframe size. But GEN11+ has larger than that size, write_infoframe
5513          * will pad rest of the size.
5514          */
5515         return sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE;
5516 }
5517
5518 static void intel_write_dp_sdp(struct intel_encoder *encoder,
5519                                const struct intel_crtc_state *crtc_state,
5520                                unsigned int type)
5521 {
5522         struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
5523         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5524         struct dp_sdp sdp = {};
5525         ssize_t len;
5526
5527         if ((crtc_state->infoframes.enable &
5528              intel_hdmi_infoframe_enable(type)) == 0)
5529                 return;
5530
5531         switch (type) {
5532         case DP_SDP_VSC:
5533                 len = intel_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp,
5534                                             sizeof(sdp));
5535                 break;
5536         case HDMI_PACKET_TYPE_GAMUT_METADATA:
5537                 len = intel_dp_hdr_metadata_infoframe_sdp_pack(&crtc_state->infoframes.drm.drm,
5538                                                                &sdp, sizeof(sdp));
5539                 break;
5540         default:
5541                 MISSING_CASE(type);
5542                 return;
5543         }
5544
5545         if (drm_WARN_ON(&dev_priv->drm, len < 0))
5546                 return;
5547
5548         dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len);
5549 }
5550
5551 void intel_write_dp_vsc_sdp(struct intel_encoder *encoder,
5552                             const struct intel_crtc_state *crtc_state,
5553                             struct drm_dp_vsc_sdp *vsc)
5554 {
5555         struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
5556         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5557         struct dp_sdp sdp = {};
5558         ssize_t len;
5559
5560         len = intel_dp_vsc_sdp_pack(vsc, &sdp, sizeof(sdp));
5561
5562         if (drm_WARN_ON(&dev_priv->drm, len < 0))
5563                 return;
5564
5565         dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC,
5566                                         &sdp, len);
5567 }
5568
5569 void intel_dp_set_infoframes(struct intel_encoder *encoder,
5570                              bool enable,
5571                              const struct intel_crtc_state *crtc_state,
5572                              const struct drm_connector_state *conn_state)
5573 {
5574         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5575         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
5576         i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder);
5577         u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW |
5578                          VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW |
5579                          VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK;
5580         u32 val = intel_de_read(dev_priv, reg);
5581
5582         /* TODO: Add DSC case (DIP_ENABLE_PPS) */
5583         /* When PSR is enabled, this routine doesn't disable VSC DIP */
5584         if (intel_psr_enabled(intel_dp))
5585                 val &= ~dip_enable;
5586         else
5587                 val &= ~(dip_enable | VIDEO_DIP_ENABLE_VSC_HSW);
5588
5589         if (!enable) {
5590                 intel_de_write(dev_priv, reg, val);
5591                 intel_de_posting_read(dev_priv, reg);
5592                 return;
5593         }
5594
5595         intel_de_write(dev_priv, reg, val);
5596         intel_de_posting_read(dev_priv, reg);
5597
5598         /* When PSR is enabled, VSC SDP is handled by PSR routine */
5599         if (!intel_psr_enabled(intel_dp))
5600                 intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC);
5601
5602         intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA);
5603 }
5604
5605 static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc,
5606                                    const void *buffer, size_t size)
5607 {
5608         const struct dp_sdp *sdp = buffer;
5609
5610         if (size < sizeof(struct dp_sdp))
5611                 return -EINVAL;
5612
5613         memset(vsc, 0, size);
5614
5615         if (sdp->sdp_header.HB0 != 0)
5616                 return -EINVAL;
5617
5618         if (sdp->sdp_header.HB1 != DP_SDP_VSC)
5619                 return -EINVAL;
5620
5621         vsc->sdp_type = sdp->sdp_header.HB1;
5622         vsc->revision = sdp->sdp_header.HB2;
5623         vsc->length = sdp->sdp_header.HB3;
5624
5625         if ((sdp->sdp_header.HB2 == 0x2 && sdp->sdp_header.HB3 == 0x8) ||
5626             (sdp->sdp_header.HB2 == 0x4 && sdp->sdp_header.HB3 == 0xe)) {
5627                 /*
5628                  * - HB2 = 0x2, HB3 = 0x8
5629                  *   VSC SDP supporting 3D stereo + PSR
5630                  * - HB2 = 0x4, HB3 = 0xe
5631                  *   VSC SDP supporting 3D stereo + PSR2 with Y-coordinate of
5632                  *   first scan line of the SU region (applies to eDP v1.4b
5633                  *   and higher).
5634                  */
5635                 return 0;
5636         } else if (sdp->sdp_header.HB2 == 0x5 && sdp->sdp_header.HB3 == 0x13) {
5637                 /*
5638                  * - HB2 = 0x5, HB3 = 0x13
5639                  *   VSC SDP supporting 3D stereo + PSR2 + Pixel Encoding/Colorimetry
5640                  *   Format.
5641                  */
5642                 vsc->pixelformat = (sdp->db[16] >> 4) & 0xf;
5643                 vsc->colorimetry = sdp->db[16] & 0xf;
5644                 vsc->dynamic_range = (sdp->db[17] >> 7) & 0x1;
5645
5646                 switch (sdp->db[17] & 0x7) {
5647                 case 0x0:
5648                         vsc->bpc = 6;
5649                         break;
5650                 case 0x1:
5651                         vsc->bpc = 8;
5652                         break;
5653                 case 0x2:
5654                         vsc->bpc = 10;
5655                         break;
5656                 case 0x3:
5657                         vsc->bpc = 12;
5658                         break;
5659                 case 0x4:
5660                         vsc->bpc = 16;
5661                         break;
5662                 default:
5663                         MISSING_CASE(sdp->db[17] & 0x7);
5664                         return -EINVAL;
5665                 }
5666
5667                 vsc->content_type = sdp->db[18] & 0x7;
5668         } else {
5669                 return -EINVAL;
5670         }
5671
5672         return 0;
5673 }
5674
5675 static int
5676 intel_dp_hdr_metadata_infoframe_sdp_unpack(struct hdmi_drm_infoframe *drm_infoframe,
5677                                            const void *buffer, size_t size)
5678 {
5679         int ret;
5680
5681         const struct dp_sdp *sdp = buffer;
5682
5683         if (size < sizeof(struct dp_sdp))
5684                 return -EINVAL;
5685
5686         if (sdp->sdp_header.HB0 != 0)
5687                 return -EINVAL;
5688
5689         if (sdp->sdp_header.HB1 != HDMI_INFOFRAME_TYPE_DRM)
5690                 return -EINVAL;
5691
5692         /*
5693          * Least Significant Eight Bits of (Data Byte Count – 1)
5694          * 1Dh (i.e., Data Byte Count = 30 bytes).
5695          */
5696         if (sdp->sdp_header.HB2 != 0x1D)
5697                 return -EINVAL;
5698
5699         /* Most Significant Two Bits of (Data Byte Count – 1), Clear to 00b. */
5700         if ((sdp->sdp_header.HB3 & 0x3) != 0)
5701                 return -EINVAL;
5702
5703         /* INFOFRAME SDP Version Number */
5704         if (((sdp->sdp_header.HB3 >> 2) & 0x3f) != 0x13)
5705                 return -EINVAL;
5706
5707         /* CTA Header Byte 2 (INFOFRAME Version Number) */
5708         if (sdp->db[0] != 1)
5709                 return -EINVAL;
5710
5711         /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */
5712         if (sdp->db[1] != HDMI_DRM_INFOFRAME_SIZE)
5713                 return -EINVAL;
5714
5715         ret = hdmi_drm_infoframe_unpack_only(drm_infoframe, &sdp->db[2],
5716                                              HDMI_DRM_INFOFRAME_SIZE);
5717
5718         return ret;
5719 }
5720
5721 static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder,
5722                                   struct intel_crtc_state *crtc_state,
5723                                   struct drm_dp_vsc_sdp *vsc)
5724 {
5725         struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
5726         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
5727         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5728         unsigned int type = DP_SDP_VSC;
5729         struct dp_sdp sdp = {};
5730         int ret;
5731
5732         /* When PSR is enabled, VSC SDP is handled by PSR routine */
5733         if (intel_psr_enabled(intel_dp))
5734                 return;
5735
5736         if ((crtc_state->infoframes.enable &
5737              intel_hdmi_infoframe_enable(type)) == 0)
5738                 return;
5739
5740         dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp));
5741
5742         ret = intel_dp_vsc_sdp_unpack(vsc, &sdp, sizeof(sdp));
5743
5744         if (ret)
5745                 drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP VSC SDP\n");
5746 }
5747
5748 static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encoder,
5749                                                      struct intel_crtc_state *crtc_state,
5750                                                      struct hdmi_drm_infoframe *drm_infoframe)
5751 {
5752         struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
5753         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5754         unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA;
5755         struct dp_sdp sdp = {};
5756         int ret;
5757
5758         if ((crtc_state->infoframes.enable &
5759             intel_hdmi_infoframe_enable(type)) == 0)
5760                 return;
5761
5762         dig_port->read_infoframe(encoder, crtc_state, type, &sdp,
5763                                  sizeof(sdp));
5764
5765         ret = intel_dp_hdr_metadata_infoframe_sdp_unpack(drm_infoframe, &sdp,
5766                                                          sizeof(sdp));
5767
5768         if (ret)
5769                 drm_dbg_kms(&dev_priv->drm,
5770                             "Failed to unpack DP HDR Metadata Infoframe SDP\n");
5771 }
5772
5773 void intel_read_dp_sdp(struct intel_encoder *encoder,
5774                        struct intel_crtc_state *crtc_state,
5775                        unsigned int type)
5776 {
5777         if (encoder->type != INTEL_OUTPUT_DDI)
5778                 return;
5779
5780         switch (type) {
5781         case DP_SDP_VSC:
5782                 intel_read_dp_vsc_sdp(encoder, crtc_state,
5783                                       &crtc_state->infoframes.vsc);
5784                 break;
5785         case HDMI_PACKET_TYPE_GAMUT_METADATA:
5786                 intel_read_dp_hdr_metadata_infoframe_sdp(encoder, crtc_state,
5787                                                          &crtc_state->infoframes.drm.drm);
5788                 break;
5789         default:
5790                 MISSING_CASE(type);
5791                 break;
5792         }
5793 }
5794
5795 static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
5796 {
5797         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5798         int status = 0;
5799         int test_link_rate;
5800         u8 test_lane_count, test_link_bw;
5801         /* (DP CTS 1.2)
5802          * 4.3.1.11
5803          */
5804         /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */
5805         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT,
5806                                    &test_lane_count);
5807
5808         if (status <= 0) {
5809                 drm_dbg_kms(&i915->drm, "Lane count read failed\n");
5810                 return DP_TEST_NAK;
5811         }
5812         test_lane_count &= DP_MAX_LANE_COUNT_MASK;
5813
5814         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
5815                                    &test_link_bw);
5816         if (status <= 0) {
5817                 drm_dbg_kms(&i915->drm, "Link Rate read failed\n");
5818                 return DP_TEST_NAK;
5819         }
5820         test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
5821
5822         /* Validate the requested link rate and lane count */
5823         if (!intel_dp_link_params_valid(intel_dp, test_link_rate,
5824                                         test_lane_count))
5825                 return DP_TEST_NAK;
5826
5827         intel_dp->compliance.test_lane_count = test_lane_count;
5828         intel_dp->compliance.test_link_rate = test_link_rate;
5829
5830         return DP_TEST_ACK;
5831 }
5832
5833 static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
5834 {
5835         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5836         u8 test_pattern;
5837         u8 test_misc;
5838         __be16 h_width, v_height;
5839         int status = 0;
5840
5841         /* Read the TEST_PATTERN (DP CTS 3.1.5) */
5842         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN,
5843                                    &test_pattern);
5844         if (status <= 0) {
5845                 drm_dbg_kms(&i915->drm, "Test pattern read failed\n");
5846                 return DP_TEST_NAK;
5847         }
5848         if (test_pattern != DP_COLOR_RAMP)
5849                 return DP_TEST_NAK;
5850
5851         status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
5852                                   &h_width, 2);
5853         if (status <= 0) {
5854                 drm_dbg_kms(&i915->drm, "H Width read failed\n");
5855                 return DP_TEST_NAK;
5856         }
5857
5858         status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
5859                                   &v_height, 2);
5860         if (status <= 0) {
5861                 drm_dbg_kms(&i915->drm, "V Height read failed\n");
5862                 return DP_TEST_NAK;
5863         }
5864
5865         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0,
5866                                    &test_misc);
5867         if (status <= 0) {
5868                 drm_dbg_kms(&i915->drm, "TEST MISC read failed\n");
5869                 return DP_TEST_NAK;
5870         }
5871         if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
5872                 return DP_TEST_NAK;
5873         if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA)
5874                 return DP_TEST_NAK;
5875         switch (test_misc & DP_TEST_BIT_DEPTH_MASK) {
5876         case DP_TEST_BIT_DEPTH_6:
5877                 intel_dp->compliance.test_data.bpc = 6;
5878                 break;
5879         case DP_TEST_BIT_DEPTH_8:
5880                 intel_dp->compliance.test_data.bpc = 8;
5881                 break;
5882         default:
5883                 return DP_TEST_NAK;
5884         }
5885
5886         intel_dp->compliance.test_data.video_pattern = test_pattern;
5887         intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
5888         intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
5889         /* Set test active flag here so userspace doesn't interrupt things */
5890         intel_dp->compliance.test_active = true;
5891
5892         return DP_TEST_ACK;
5893 }
5894
5895 static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
5896 {
5897         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5898         u8 test_result = DP_TEST_ACK;
5899         struct intel_connector *intel_connector = intel_dp->attached_connector;
5900         struct drm_connector *connector = &intel_connector->base;
5901
5902         if (intel_connector->detect_edid == NULL ||
5903             connector->edid_corrupt ||
5904             intel_dp->aux.i2c_defer_count > 6) {
5905                 /* Check EDID read for NACKs, DEFERs and corruption
5906                  * (DP CTS 1.2 Core r1.1)
5907                  *    4.2.2.4 : Failed EDID read, I2C_NAK
5908                  *    4.2.2.5 : Failed EDID read, I2C_DEFER
5909                  *    4.2.2.6 : EDID corruption detected
5910                  * Use failsafe mode for all cases
5911                  */
5912                 if (intel_dp->aux.i2c_nack_count > 0 ||
5913                         intel_dp->aux.i2c_defer_count > 0)
5914                         drm_dbg_kms(&i915->drm,
5915                                     "EDID read had %d NACKs, %d DEFERs\n",
5916                                     intel_dp->aux.i2c_nack_count,
5917                                     intel_dp->aux.i2c_defer_count);
5918                 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
5919         } else {
5920                 struct edid *block = intel_connector->detect_edid;
5921
5922                 /* We have to write the checksum
5923                  * of the last block read
5924                  */
5925                 block += intel_connector->detect_edid->extensions;
5926
5927                 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM,
5928                                        block->checksum) <= 0)
5929                         drm_dbg_kms(&i915->drm,
5930                                     "Failed to write EDID checksum\n");
5931
5932                 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
5933                 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
5934         }
5935
5936         /* Set test active flag here so userspace doesn't interrupt things */
5937         intel_dp->compliance.test_active = true;
5938
5939         return test_result;
5940 }
5941
5942 static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp,
5943                                         const struct intel_crtc_state *crtc_state)
5944 {
5945         struct drm_i915_private *dev_priv =
5946                         to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
5947         struct drm_dp_phy_test_params *data =
5948                         &intel_dp->compliance.test_data.phytest;
5949         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5950         enum pipe pipe = crtc->pipe;
5951         u32 pattern_val;
5952
5953         switch (data->phy_pattern) {
5954         case DP_PHY_TEST_PATTERN_NONE:
5955                 DRM_DEBUG_KMS("Disable Phy Test Pattern\n");
5956                 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0);
5957                 break;
5958         case DP_PHY_TEST_PATTERN_D10_2:
5959                 DRM_DEBUG_KMS("Set D10.2 Phy Test Pattern\n");
5960                 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
5961                                DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2);
5962                 break;
5963         case DP_PHY_TEST_PATTERN_ERROR_COUNT:
5964                 DRM_DEBUG_KMS("Set Error Count Phy Test Pattern\n");
5965                 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
5966                                DDI_DP_COMP_CTL_ENABLE |
5967                                DDI_DP_COMP_CTL_SCRAMBLED_0);
5968                 break;
5969         case DP_PHY_TEST_PATTERN_PRBS7:
5970                 DRM_DEBUG_KMS("Set PRBS7 Phy Test Pattern\n");
5971                 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
5972                                DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7);
5973                 break;
5974         case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
5975                 /*
5976                  * FIXME: Ideally pattern should come from DPCD 0x250. As
5977                  * current firmware of DPR-100 could not set it, so hardcoding
5978                  * now for complaince test.
5979                  */
5980                 DRM_DEBUG_KMS("Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n");
5981                 pattern_val = 0x3e0f83e0;
5982                 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 0), pattern_val);
5983                 pattern_val = 0x0f83e0f8;
5984                 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 1), pattern_val);
5985                 pattern_val = 0x0000f83e;
5986                 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 2), pattern_val);
5987                 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
5988                                DDI_DP_COMP_CTL_ENABLE |
5989                                DDI_DP_COMP_CTL_CUSTOM80);
5990                 break;
5991         case DP_PHY_TEST_PATTERN_CP2520:
5992                 /*
5993                  * FIXME: Ideally pattern should come from DPCD 0x24A. As
5994                  * current firmware of DPR-100 could not set it, so hardcoding
5995                  * now for complaince test.
5996                  */
5997                 DRM_DEBUG_KMS("Set HBR2 compliance Phy Test Pattern\n");
5998                 pattern_val = 0xFB;
5999                 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
6000                                DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 |
6001                                pattern_val);
6002                 break;
6003         default:
6004                 WARN(1, "Invalid Phy Test Pattern\n");
6005         }
6006 }
6007
6008 static void
6009 intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp,
6010                                   const struct intel_crtc_state *crtc_state)
6011 {
6012         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
6013         struct drm_device *dev = dig_port->base.base.dev;
6014         struct drm_i915_private *dev_priv = to_i915(dev);
6015         struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
6016         enum pipe pipe = crtc->pipe;
6017         u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;
6018
6019         trans_ddi_func_ctl_value = intel_de_read(dev_priv,
6020                                                  TRANS_DDI_FUNC_CTL(pipe));
6021         trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe));
6022         dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe));
6023
6024         trans_ddi_func_ctl_value &= ~(TRANS_DDI_FUNC_ENABLE |
6025                                       TGL_TRANS_DDI_PORT_MASK);
6026         trans_conf_value &= ~PIPECONF_ENABLE;
6027         dp_tp_ctl_value &= ~DP_TP_CTL_ENABLE;
6028
6029         intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value);
6030         intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe),
6031                        trans_ddi_func_ctl_value);
6032         intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value);
6033 }
6034
6035 static void
6036 intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp,
6037                                  const struct intel_crtc_state *crtc_state)
6038 {
6039         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
6040         struct drm_device *dev = dig_port->base.base.dev;
6041         struct drm_i915_private *dev_priv = to_i915(dev);
6042         enum port port = dig_port->base.port;
6043         struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
6044         enum pipe pipe = crtc->pipe;
6045         u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;
6046
6047         trans_ddi_func_ctl_value = intel_de_read(dev_priv,
6048                                                  TRANS_DDI_FUNC_CTL(pipe));
6049         trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe));
6050         dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe));
6051
6052         trans_ddi_func_ctl_value |= TRANS_DDI_FUNC_ENABLE |
6053                                     TGL_TRANS_DDI_SELECT_PORT(port);
6054         trans_conf_value |= PIPECONF_ENABLE;
6055         dp_tp_ctl_value |= DP_TP_CTL_ENABLE;
6056
6057         intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value);
6058         intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value);
6059         intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe),
6060                        trans_ddi_func_ctl_value);
6061 }
6062
6063 static void intel_dp_process_phy_request(struct intel_dp *intel_dp,
6064                                          const struct intel_crtc_state *crtc_state)
6065 {
6066         struct drm_dp_phy_test_params *data =
6067                 &intel_dp->compliance.test_data.phytest;
6068         u8 link_status[DP_LINK_STATUS_SIZE];
6069
6070         if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX,
6071                                              link_status) < 0) {
6072                 DRM_DEBUG_KMS("failed to get link status\n");
6073                 return;
6074         }
6075
6076         /* retrieve vswing & pre-emphasis setting */
6077         intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX,
6078                                   link_status);
6079
6080         intel_dp_autotest_phy_ddi_disable(intel_dp, crtc_state);
6081
6082         intel_dp_set_signal_levels(intel_dp, crtc_state);
6083
6084         intel_dp_phy_pattern_update(intel_dp, crtc_state);
6085
6086         intel_dp_autotest_phy_ddi_enable(intel_dp, crtc_state);
6087
6088         drm_dp_set_phy_test_pattern(&intel_dp->aux, data,
6089                                     link_status[DP_DPCD_REV]);
6090 }
6091
6092 static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
6093 {
6094         struct drm_dp_phy_test_params *data =
6095                 &intel_dp->compliance.test_data.phytest;
6096
6097         if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) {
6098                 DRM_DEBUG_KMS("DP Phy Test pattern AUX read failure\n");
6099                 return DP_TEST_NAK;
6100         }
6101
6102         /* Set test active flag here so userspace doesn't interrupt things */
6103         intel_dp->compliance.test_active = true;
6104
6105         return DP_TEST_ACK;
6106 }
6107
6108 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
6109 {
6110         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
6111         u8 response = DP_TEST_NAK;
6112         u8 request = 0;
6113         int status;
6114
6115         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
6116         if (status <= 0) {
6117                 drm_dbg_kms(&i915->drm,
6118                             "Could not read test request from sink\n");
6119                 goto update_status;
6120         }
6121
6122         switch (request) {
6123         case DP_TEST_LINK_TRAINING:
6124                 drm_dbg_kms(&i915->drm, "LINK_TRAINING test requested\n");
6125                 response = intel_dp_autotest_link_training(intel_dp);
6126                 break;
6127         case DP_TEST_LINK_VIDEO_PATTERN:
6128                 drm_dbg_kms(&i915->drm, "TEST_PATTERN test requested\n");
6129                 response = intel_dp_autotest_video_pattern(intel_dp);
6130                 break;
6131         case DP_TEST_LINK_EDID_READ:
6132                 drm_dbg_kms(&i915->drm, "EDID test requested\n");
6133                 response = intel_dp_autotest_edid(intel_dp);
6134                 break;
6135         case DP_TEST_LINK_PHY_TEST_PATTERN:
6136                 drm_dbg_kms(&i915->drm, "PHY_PATTERN test requested\n");
6137                 response = intel_dp_autotest_phy_pattern(intel_dp);
6138                 break;
6139         default:
6140                 drm_dbg_kms(&i915->drm, "Invalid test request '%02x'\n",
6141                             request);
6142                 break;
6143         }
6144
6145         if (response & DP_TEST_ACK)
6146                 intel_dp->compliance.test_type = request;
6147
6148 update_status:
6149         status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response);
6150         if (status <= 0)
6151                 drm_dbg_kms(&i915->drm,
6152                             "Could not write test response to sink\n");
6153 }
6154
6155 /**
6156  * intel_dp_check_mst_status - service any pending MST interrupts, check link status
6157  * @intel_dp: Intel DP struct
6158  *
6159  * Read any pending MST interrupts, call MST core to handle these and ack the
6160  * interrupts. Check if the main and AUX link state is ok.
6161  *
6162  * Returns:
6163  * - %true if pending interrupts were serviced (or no interrupts were
6164  *   pending) w/o detecting an error condition.
6165  * - %false if an error condition - like AUX failure or a loss of link - is
6166  *   detected, which needs servicing from the hotplug work.
6167  */
6168 static bool
6169 intel_dp_check_mst_status(struct intel_dp *intel_dp)
6170 {
6171         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
6172         bool link_ok = true;
6173
6174         drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0);
6175
6176         for (;;) {
6177                 u8 esi[DP_DPRX_ESI_LEN] = {};
6178                 bool handled;
6179                 int retry;
6180
6181                 if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) {
6182                         drm_dbg_kms(&i915->drm,
6183                                     "failed to get ESI - device may have failed\n");
6184                         link_ok = false;
6185
6186                         break;
6187                 }
6188
6189                 /* check link status - esi[10] = 0x200c */
6190                 if (intel_dp->active_mst_links > 0 && link_ok &&
6191                     !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
6192                         drm_dbg_kms(&i915->drm,
6193                                     "channel EQ not ok, retraining\n");
6194                         link_ok = false;
6195                 }
6196
6197                 drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi);
6198
6199                 drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
6200                 if (!handled)
6201                         break;
6202
6203                 for (retry = 0; retry < 3; retry++) {
6204                         int wret;
6205
6206                         wret = drm_dp_dpcd_write(&intel_dp->aux,
6207                                                  DP_SINK_COUNT_ESI+1,
6208                                                  &esi[1], 3);
6209                         if (wret == 3)
6210                                 break;
6211                 }
6212         }
6213
6214         return link_ok;
6215 }
6216
6217 static void
6218 intel_dp_handle_hdmi_link_status_change(struct intel_dp *intel_dp)
6219 {
6220         bool is_active;
6221         u8 buf = 0;
6222
6223         is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux);
6224         if (intel_dp->frl.is_trained && !is_active) {
6225                 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf) < 0)
6226                         return;
6227
6228                 buf &=  ~DP_PCON_ENABLE_HDMI_LINK;
6229                 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf) < 0)
6230                         return;
6231
6232                 drm_dp_pcon_hdmi_frl_link_error_count(&intel_dp->aux, &intel_dp->attached_connector->base);
6233
6234                 /* Restart FRL training or fall back to TMDS mode */
6235                 intel_dp_check_frl_training(intel_dp);
6236         }
6237 }
6238
6239 static bool
6240 intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
6241 {
6242         u8 link_status[DP_LINK_STATUS_SIZE];
6243
6244         if (!intel_dp->link_trained)
6245                 return false;
6246
6247         /*
6248          * While PSR source HW is enabled, it will control main-link sending
6249          * frames, enabling and disabling it so trying to do a retrain will fail
6250          * as the link would or not be on or it could mix training patterns
6251          * and frame data at the same time causing retrain to fail.
6252          * Also when exiting PSR, HW will retrain the link anyways fixing
6253          * any link status error.
6254          */
6255         if (intel_psr_enabled(intel_dp))
6256                 return false;
6257
6258         if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX,
6259                                              link_status) < 0)
6260                 return false;
6261
6262         /*
6263          * Validate the cached values of intel_dp->link_rate and
6264          * intel_dp->lane_count before attempting to retrain.
6265          *
6266          * FIXME would be nice to user the crtc state here, but since
6267          * we need to call this from the short HPD handler that seems
6268          * a bit hard.
6269          */
6270         if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate,
6271                                         intel_dp->lane_count))
6272                 return false;
6273
6274         /* Retrain if Channel EQ or CR not ok */
6275         return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
6276 }
6277
6278 static bool intel_dp_has_connector(struct intel_dp *intel_dp,
6279                                    const struct drm_connector_state *conn_state)
6280 {
6281         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
6282         struct intel_encoder *encoder;
6283         enum pipe pipe;
6284
6285         if (!conn_state->best_encoder)
6286                 return false;
6287
6288         /* SST */
6289         encoder = &dp_to_dig_port(intel_dp)->base;
6290         if (conn_state->best_encoder == &encoder->base)
6291                 return true;
6292
6293         /* MST */
6294         for_each_pipe(i915, pipe) {
6295                 encoder = &intel_dp->mst_encoders[pipe]->base;
6296                 if (conn_state->best_encoder == &encoder->base)
6297                         return true;
6298         }
6299
6300         return false;
6301 }
6302
6303 static int intel_dp_prep_link_retrain(struct intel_dp *intel_dp,
6304                                       struct drm_modeset_acquire_ctx *ctx,
6305                                       u32 *crtc_mask)
6306 {
6307         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
6308         struct drm_connector_list_iter conn_iter;
6309         struct intel_connector *connector;
6310         int ret = 0;
6311
6312         *crtc_mask = 0;
6313
6314         if (!intel_dp_needs_link_retrain(intel_dp))
6315                 return 0;
6316
6317         drm_connector_list_iter_begin(&i915->drm, &conn_iter);
6318         for_each_intel_connector_iter(connector, &conn_iter) {
6319                 struct drm_connector_state *conn_state =
6320                         connector->base.state;
6321                 struct intel_crtc_state *crtc_state;
6322                 struct intel_crtc *crtc;
6323
6324                 if (!intel_dp_has_connector(intel_dp, conn_state))
6325                         continue;
6326
6327                 crtc = to_intel_crtc(conn_state->crtc);
6328                 if (!crtc)
6329                         continue;
6330
6331                 ret = drm_modeset_lock(&crtc->base.mutex, ctx);
6332                 if (ret)
6333                         break;
6334
6335                 crtc_state = to_intel_crtc_state(crtc->base.state);
6336
6337                 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state));
6338
6339                 if (!crtc_state->hw.active)
6340                         continue;
6341
6342                 if (conn_state->commit &&
6343                     !try_wait_for_completion(&conn_state->commit->hw_done))
6344                         continue;
6345
6346                 *crtc_mask |= drm_crtc_mask(&crtc->base);
6347         }
6348         drm_connector_list_iter_end(&conn_iter);
6349
6350         if (!intel_dp_needs_link_retrain(intel_dp))
6351                 *crtc_mask = 0;
6352
6353         return ret;
6354 }
6355
6356 static bool intel_dp_is_connected(struct intel_dp *intel_dp)
6357 {
6358         struct intel_connector *connector = intel_dp->attached_connector;
6359
6360         return connector->base.status == connector_status_connected ||
6361                 intel_dp->is_mst;
6362 }
6363
6364 int intel_dp_retrain_link(struct intel_encoder *encoder,
6365                           struct drm_modeset_acquire_ctx *ctx)
6366 {
6367         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
6368         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
6369         struct intel_crtc *crtc;
6370         u32 crtc_mask;
6371         int ret;
6372
6373         if (!intel_dp_is_connected(intel_dp))
6374                 return 0;
6375
6376         ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
6377                                ctx);
6378         if (ret)
6379                 return ret;
6380
6381         ret = intel_dp_prep_link_retrain(intel_dp, ctx, &crtc_mask);
6382         if (ret)
6383                 return ret;
6384
6385         if (crtc_mask == 0)
6386                 return 0;
6387
6388         drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link\n",
6389                     encoder->base.base.id, encoder->base.name);
6390
6391         for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
6392                 const struct intel_crtc_state *crtc_state =
6393                         to_intel_crtc_state(crtc->base.state);
6394
6395                 /* Suppress underruns caused by re-training */
6396                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
6397                 if (crtc_state->has_pch_encoder)
6398                         intel_set_pch_fifo_underrun_reporting(dev_priv,
6399                                                               intel_crtc_pch_transcoder(crtc), false);
6400         }
6401
6402         for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
6403                 const struct intel_crtc_state *crtc_state =
6404                         to_intel_crtc_state(crtc->base.state);
6405
6406                 /* retrain on the MST master transcoder */
6407                 if (INTEL_GEN(dev_priv) >= 12 &&
6408                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) &&
6409                     !intel_dp_mst_is_master_trans(crtc_state))
6410                         continue;
6411
6412                 intel_dp_check_frl_training(intel_dp);
6413                 intel_dp_pcon_dsc_configure(intel_dp, crtc_state);
6414                 intel_dp_start_link_train(intel_dp, crtc_state);
6415                 intel_dp_stop_link_train(intel_dp, crtc_state);
6416                 break;
6417         }
6418
6419         for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
6420                 const struct intel_crtc_state *crtc_state =
6421                         to_intel_crtc_state(crtc->base.state);
6422
6423                 /* Keep underrun reporting disabled until things are stable */
6424                 intel_wait_for_vblank(dev_priv, crtc->pipe);
6425
6426                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
6427                 if (crtc_state->has_pch_encoder)
6428                         intel_set_pch_fifo_underrun_reporting(dev_priv,
6429                                                               intel_crtc_pch_transcoder(crtc), true);
6430         }
6431
6432         return 0;
6433 }
6434
6435 static int intel_dp_prep_phy_test(struct intel_dp *intel_dp,
6436                                   struct drm_modeset_acquire_ctx *ctx,
6437                                   u32 *crtc_mask)
6438 {
6439         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
6440         struct drm_connector_list_iter conn_iter;
6441         struct intel_connector *connector;
6442         int ret = 0;
6443
6444         *crtc_mask = 0;
6445
6446         drm_connector_list_iter_begin(&i915->drm, &conn_iter);
6447         for_each_intel_connector_iter(connector, &conn_iter) {
6448                 struct drm_connector_state *conn_state =
6449                         connector->base.state;
6450                 struct intel_crtc_state *crtc_state;
6451                 struct intel_crtc *crtc;
6452
6453                 if (!intel_dp_has_connector(intel_dp, conn_state))
6454                         continue;
6455
6456                 crtc = to_intel_crtc(conn_state->crtc);
6457                 if (!crtc)
6458                         continue;
6459
6460                 ret = drm_modeset_lock(&crtc->base.mutex, ctx);
6461                 if (ret)
6462                         break;
6463
6464                 crtc_state = to_intel_crtc_state(crtc->base.state);
6465
6466                 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state));
6467
6468                 if (!crtc_state->hw.active)
6469                         continue;
6470
6471                 if (conn_state->commit &&
6472                     !try_wait_for_completion(&conn_state->commit->hw_done))
6473                         continue;
6474
6475                 *crtc_mask |= drm_crtc_mask(&crtc->base);
6476         }
6477         drm_connector_list_iter_end(&conn_iter);
6478
6479         return ret;
6480 }
6481
6482 static int intel_dp_do_phy_test(struct intel_encoder *encoder,
6483                                 struct drm_modeset_acquire_ctx *ctx)
6484 {
6485         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
6486         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
6487         struct intel_crtc *crtc;
6488         u32 crtc_mask;
6489         int ret;
6490
6491         ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
6492                                ctx);
6493         if (ret)
6494                 return ret;
6495
6496         ret = intel_dp_prep_phy_test(intel_dp, ctx, &crtc_mask);
6497         if (ret)
6498                 return ret;
6499
6500         if (crtc_mask == 0)
6501                 return 0;
6502
6503         drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] PHY test\n",
6504                     encoder->base.base.id, encoder->base.name);
6505
6506         for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
6507                 const struct intel_crtc_state *crtc_state =
6508                         to_intel_crtc_state(crtc->base.state);
6509
6510                 /* test on the MST master transcoder */
6511                 if (INTEL_GEN(dev_priv) >= 12 &&
6512                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) &&
6513                     !intel_dp_mst_is_master_trans(crtc_state))
6514                         continue;
6515
6516                 intel_dp_process_phy_request(intel_dp, crtc_state);
6517                 break;
6518         }
6519
6520         return 0;
6521 }
6522
6523 static void intel_dp_phy_test(struct intel_encoder *encoder)
6524 {
6525         struct drm_modeset_acquire_ctx ctx;
6526         int ret;
6527
6528         drm_modeset_acquire_init(&ctx, 0);
6529
6530         for (;;) {
6531                 ret = intel_dp_do_phy_test(encoder, &ctx);
6532
6533                 if (ret == -EDEADLK) {
6534                         drm_modeset_backoff(&ctx);
6535                         continue;
6536                 }
6537
6538                 break;
6539         }
6540
6541         drm_modeset_drop_locks(&ctx);
6542         drm_modeset_acquire_fini(&ctx);
6543         drm_WARN(encoder->base.dev, ret,
6544                  "Acquiring modeset locks failed with %i\n", ret);
6545 }
6546
6547 /*
6548  * If display is now connected check links status,
6549  * there has been known issues of link loss triggering
6550  * long pulse.
6551  *
6552  * Some sinks (eg. ASUS PB287Q) seem to perform some
6553  * weird HPD ping pong during modesets. So we can apparently
6554  * end up with HPD going low during a modeset, and then
6555  * going back up soon after. And once that happens we must
6556  * retrain the link to get a picture. That's in case no
6557  * userspace component reacted to intermittent HPD dip.
6558  */
6559 static enum intel_hotplug_state
6560 intel_dp_hotplug(struct intel_encoder *encoder,
6561                  struct intel_connector *connector)
6562 {
6563         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
6564         struct drm_modeset_acquire_ctx ctx;
6565         enum intel_hotplug_state state;
6566         int ret;
6567
6568         if (intel_dp->compliance.test_active &&
6569             intel_dp->compliance.test_type == DP_TEST_LINK_PHY_TEST_PATTERN) {
6570                 intel_dp_phy_test(encoder);
6571                 /* just do the PHY test and nothing else */
6572                 return INTEL_HOTPLUG_UNCHANGED;
6573         }
6574
6575         state = intel_encoder_hotplug(encoder, connector);
6576
6577         drm_modeset_acquire_init(&ctx, 0);
6578
6579         for (;;) {
6580                 ret = intel_dp_retrain_link(encoder, &ctx);
6581
6582                 if (ret == -EDEADLK) {
6583                         drm_modeset_backoff(&ctx);
6584                         continue;
6585                 }
6586
6587                 break;
6588         }
6589
6590         drm_modeset_drop_locks(&ctx);
6591         drm_modeset_acquire_fini(&ctx);
6592         drm_WARN(encoder->base.dev, ret,
6593                  "Acquiring modeset locks failed with %i\n", ret);
6594
6595         /*
6596          * Keeping it consistent with intel_ddi_hotplug() and
6597          * intel_hdmi_hotplug().
6598          */
6599         if (state == INTEL_HOTPLUG_UNCHANGED && !connector->hotplug_retries)
6600                 state = INTEL_HOTPLUG_RETRY;
6601
6602         return state;
6603 }
6604
6605 static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp)
6606 {
6607         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
6608         u8 val;
6609
6610         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
6611                 return;
6612
6613         if (drm_dp_dpcd_readb(&intel_dp->aux,
6614                               DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val)
6615                 return;
6616
6617         drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val);
6618
6619         if (val & DP_AUTOMATED_TEST_REQUEST)
6620                 intel_dp_handle_test_request(intel_dp);
6621
6622         if (val & DP_CP_IRQ)
6623                 intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
6624
6625         if (val & DP_SINK_SPECIFIC_IRQ)
6626                 drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n");
6627 }
6628
6629 static void intel_dp_check_link_service_irq(struct intel_dp *intel_dp)
6630 {
6631         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
6632         u8 val;
6633
6634         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
6635                 return;
6636
6637         if (drm_dp_dpcd_readb(&intel_dp->aux,
6638                               DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val) {
6639                 drm_dbg_kms(&i915->drm, "Error in reading link service irq vector\n");
6640                 return;
6641         }
6642
6643         if (drm_dp_dpcd_writeb(&intel_dp->aux,
6644                                DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1) {
6645                 drm_dbg_kms(&i915->drm, "Error in writing link service irq vector\n");
6646                 return;
6647         }
6648
6649         if (val & HDMI_LINK_STATUS_CHANGED)
6650                 intel_dp_handle_hdmi_link_status_change(intel_dp);
6651 }
6652
6653 /*
6654  * According to DP spec
6655  * 5.1.2:
6656  *  1. Read DPCD
6657  *  2. Configure link according to Receiver Capabilities
6658  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
6659  *  4. Check link status on receipt of hot-plug interrupt
6660  *
6661  * intel_dp_short_pulse -  handles short pulse interrupts
6662  * when full detection is not required.
6663  * Returns %true if short pulse is handled and full detection
6664  * is NOT required and %false otherwise.
6665  */
6666 static bool
6667 intel_dp_short_pulse(struct intel_dp *intel_dp)
6668 {
6669         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6670         u8 old_sink_count = intel_dp->sink_count;
6671         bool ret;
6672
6673         /*
6674          * Clearing compliance test variables to allow capturing
6675          * of values for next automated test request.
6676          */
6677         memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
6678
6679         /*
6680          * Now read the DPCD to see if it's actually running
6681          * If the current value of sink count doesn't match with
6682          * the value that was stored earlier or dpcd read failed
6683          * we need to do full detection
6684          */
6685         ret = intel_dp_get_dpcd(intel_dp);
6686
6687         if ((old_sink_count != intel_dp->sink_count) || !ret) {
6688                 /* No need to proceed if we are going to do full detect */
6689                 return false;
6690         }
6691
6692         intel_dp_check_device_service_irq(intel_dp);
6693         intel_dp_check_link_service_irq(intel_dp);
6694
6695         /* Handle CEC interrupts, if any */
6696         drm_dp_cec_irq(&intel_dp->aux);
6697
6698         /* defer to the hotplug work for link retraining if needed */
6699         if (intel_dp_needs_link_retrain(intel_dp))
6700                 return false;
6701
6702         intel_psr_short_pulse(intel_dp);
6703
6704         switch (intel_dp->compliance.test_type) {
6705         case DP_TEST_LINK_TRAINING:
6706                 drm_dbg_kms(&dev_priv->drm,
6707                             "Link Training Compliance Test requested\n");
6708                 /* Send a Hotplug Uevent to userspace to start modeset */
6709                 drm_kms_helper_hotplug_event(&dev_priv->drm);
6710                 break;
6711         case DP_TEST_LINK_PHY_TEST_PATTERN:
6712                 drm_dbg_kms(&dev_priv->drm,
6713                             "PHY test pattern Compliance Test requested\n");
6714                 /*
6715                  * Schedule long hpd to do the test
6716                  *
6717                  * FIXME get rid of the ad-hoc phy test modeset code
6718                  * and properly incorporate it into the normal modeset.
6719                  */
6720                 return false;
6721         }
6722
6723         return true;
6724 }
6725
6726 /* XXX this is probably wrong for multiple downstream ports */
6727 static enum drm_connector_status
6728 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
6729 {
6730         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
6731         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
6732         u8 *dpcd = intel_dp->dpcd;
6733         u8 type;
6734
6735         if (drm_WARN_ON(&i915->drm, intel_dp_is_edp(intel_dp)))
6736                 return connector_status_connected;
6737
6738         lspcon_resume(dig_port);
6739
6740         if (!intel_dp_get_dpcd(intel_dp))
6741                 return connector_status_disconnected;
6742
6743         /* if there's no downstream port, we're done */
6744         if (!drm_dp_is_branch(dpcd))
6745                 return connector_status_connected;
6746
6747         /* If we're HPD-aware, SINK_COUNT changes dynamically */
6748         if (intel_dp_has_sink_count(intel_dp) &&
6749             intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
6750                 return intel_dp->sink_count ?
6751                 connector_status_connected : connector_status_disconnected;
6752         }
6753
6754         if (intel_dp_can_mst(intel_dp))
6755                 return connector_status_connected;
6756
6757         /* If no HPD, poke DDC gently */
6758         if (drm_probe_ddc(&intel_dp->aux.ddc))
6759                 return connector_status_connected;
6760
6761         /* Well we tried, say unknown for unreliable port types */
6762         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
6763                 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
6764                 if (type == DP_DS_PORT_TYPE_VGA ||
6765                     type == DP_DS_PORT_TYPE_NON_EDID)
6766                         return connector_status_unknown;
6767         } else {
6768                 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
6769                         DP_DWN_STRM_PORT_TYPE_MASK;
6770                 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
6771                     type == DP_DWN_STRM_PORT_TYPE_OTHER)
6772                         return connector_status_unknown;
6773         }
6774
6775         /* Anything else is out of spec, warn and ignore */
6776         drm_dbg_kms(&i915->drm, "Broken DP branch device, ignoring\n");
6777         return connector_status_disconnected;
6778 }
6779
6780 static enum drm_connector_status
6781 edp_detect(struct intel_dp *intel_dp)
6782 {
6783         return connector_status_connected;
6784 }
6785
6786 static bool ibx_digital_port_connected(struct intel_encoder *encoder)
6787 {
6788         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
6789         u32 bit = dev_priv->hotplug.pch_hpd[encoder->hpd_pin];
6790
6791         return intel_de_read(dev_priv, SDEISR) & bit;
6792 }
6793
6794 static bool g4x_digital_port_connected(struct intel_encoder *encoder)
6795 {
6796         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
6797         u32 bit;
6798
6799         switch (encoder->hpd_pin) {
6800         case HPD_PORT_B:
6801                 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
6802                 break;
6803         case HPD_PORT_C:
6804                 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
6805                 break;
6806         case HPD_PORT_D:
6807                 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
6808                 break;
6809         default:
6810                 MISSING_CASE(encoder->hpd_pin);
6811                 return false;
6812         }
6813
6814         return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit;
6815 }
6816
6817 static bool gm45_digital_port_connected(struct intel_encoder *encoder)
6818 {
6819         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
6820         u32 bit;
6821
6822         switch (encoder->hpd_pin) {
6823         case HPD_PORT_B:
6824                 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
6825                 break;
6826         case HPD_PORT_C:
6827                 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
6828                 break;
6829         case HPD_PORT_D:
6830                 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
6831                 break;
6832         default:
6833                 MISSING_CASE(encoder->hpd_pin);
6834                 return false;
6835         }
6836
6837         return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit;
6838 }
6839
6840 static bool ilk_digital_port_connected(struct intel_encoder *encoder)
6841 {
6842         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
6843         u32 bit = dev_priv->hotplug.hpd[encoder->hpd_pin];
6844
6845         return intel_de_read(dev_priv, DEISR) & bit;
6846 }
6847
6848 /*
6849  * intel_digital_port_connected - is the specified port connected?
6850  * @encoder: intel_encoder
6851  *
6852  * In cases where there's a connector physically connected but it can't be used
6853  * by our hardware we also return false, since the rest of the driver should
6854  * pretty much treat the port as disconnected. This is relevant for type-C
6855  * (starting on ICL) where there's ownership involved.
6856  *
6857  * Return %true if port is connected, %false otherwise.
6858  */
6859 bool intel_digital_port_connected(struct intel_encoder *encoder)
6860 {
6861         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
6862         struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
6863         bool is_connected = false;
6864         intel_wakeref_t wakeref;
6865
6866         with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref)
6867                 is_connected = dig_port->connected(encoder);
6868
6869         return is_connected;
6870 }
6871
6872 static struct edid *
6873 intel_dp_get_edid(struct intel_dp *intel_dp)
6874 {
6875         struct intel_connector *intel_connector = intel_dp->attached_connector;
6876
6877         /* use cached edid if we have one */
6878         if (intel_connector->edid) {
6879                 /* invalid edid */
6880                 if (IS_ERR(intel_connector->edid))
6881                         return NULL;
6882
6883                 return drm_edid_duplicate(intel_connector->edid);
6884         } else
6885                 return drm_get_edid(&intel_connector->base,
6886                                     &intel_dp->aux.ddc);
6887 }
6888
6889 static void
6890 intel_dp_update_dfp(struct intel_dp *intel_dp,
6891                     const struct edid *edid)
6892 {
6893         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
6894         struct intel_connector *connector = intel_dp->attached_connector;
6895
6896         intel_dp->dfp.max_bpc =
6897                 drm_dp_downstream_max_bpc(intel_dp->dpcd,
6898                                           intel_dp->downstream_ports, edid);
6899
6900         intel_dp->dfp.max_dotclock =
6901                 drm_dp_downstream_max_dotclock(intel_dp->dpcd,
6902                                                intel_dp->downstream_ports);
6903
6904         intel_dp->dfp.min_tmds_clock =
6905                 drm_dp_downstream_min_tmds_clock(intel_dp->dpcd,
6906                                                  intel_dp->downstream_ports,
6907                                                  edid);
6908         intel_dp->dfp.max_tmds_clock =
6909                 drm_dp_downstream_max_tmds_clock(intel_dp->dpcd,
6910                                                  intel_dp->downstream_ports,
6911                                                  edid);
6912
6913         intel_dp->dfp.pcon_max_frl_bw =
6914                 drm_dp_get_pcon_max_frl_bw(intel_dp->dpcd,
6915                                            intel_dp->downstream_ports);
6916
6917         drm_dbg_kms(&i915->drm,
6918                     "[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d, PCON Max FRL BW %dGbps\n",
6919                     connector->base.base.id, connector->base.name,
6920                     intel_dp->dfp.max_bpc,
6921                     intel_dp->dfp.max_dotclock,
6922                     intel_dp->dfp.min_tmds_clock,
6923                     intel_dp->dfp.max_tmds_clock,
6924                     intel_dp->dfp.pcon_max_frl_bw);
6925
6926         intel_dp_get_pcon_dsc_cap(intel_dp);
6927 }
6928
6929 static void
6930 intel_dp_update_420(struct intel_dp *intel_dp)
6931 {
6932         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
6933         struct intel_connector *connector = intel_dp->attached_connector;
6934         bool is_branch, ycbcr_420_passthrough, ycbcr_444_to_420, rgb_to_ycbcr;
6935
6936         /* No YCbCr output support on gmch platforms */
6937         if (HAS_GMCH(i915))
6938                 return;
6939
6940         /*
6941          * ILK doesn't seem capable of DP YCbCr output. The
6942          * displayed image is severly corrupted. SNB+ is fine.
6943          */
6944         if (IS_GEN(i915, 5))
6945                 return;
6946
6947         is_branch = drm_dp_is_branch(intel_dp->dpcd);
6948         ycbcr_420_passthrough =
6949                 drm_dp_downstream_420_passthrough(intel_dp->dpcd,
6950                                                   intel_dp->downstream_ports);
6951         /* on-board LSPCON always assumed to support 4:4:4->4:2:0 conversion */
6952         ycbcr_444_to_420 =
6953                 dp_to_dig_port(intel_dp)->lspcon.active ||
6954                 drm_dp_downstream_444_to_420_conversion(intel_dp->dpcd,
6955                                                         intel_dp->downstream_ports);
6956         rgb_to_ycbcr = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd,
6957                                                                  intel_dp->downstream_ports,
6958                                                                  DP_DS_HDMI_BT601_RGB_YCBCR_CONV ||
6959                                                                  DP_DS_HDMI_BT709_RGB_YCBCR_CONV ||
6960                                                                  DP_DS_HDMI_BT2020_RGB_YCBCR_CONV);
6961
6962         if (INTEL_GEN(i915) >= 11) {
6963                 /* Let PCON convert from RGB->YCbCr if possible */
6964                 if (is_branch && rgb_to_ycbcr && ycbcr_444_to_420) {
6965                         intel_dp->dfp.rgb_to_ycbcr = true;
6966                         intel_dp->dfp.ycbcr_444_to_420 = true;
6967                         connector->base.ycbcr_420_allowed = true;
6968                 } else {
6969                 /* Prefer 4:2:0 passthrough over 4:4:4->4:2:0 conversion */
6970                         intel_dp->dfp.ycbcr_444_to_420 =
6971                                 ycbcr_444_to_420 && !ycbcr_420_passthrough;
6972
6973                         connector->base.ycbcr_420_allowed =
6974                                 !is_branch || ycbcr_444_to_420 || ycbcr_420_passthrough;
6975                 }
6976         } else {
6977                 /* 4:4:4->4:2:0 conversion is the only way */
6978                 intel_dp->dfp.ycbcr_444_to_420 = ycbcr_444_to_420;
6979
6980                 connector->base.ycbcr_420_allowed = ycbcr_444_to_420;
6981         }
6982
6983         drm_dbg_kms(&i915->drm,
6984                     "[CONNECTOR:%d:%s] RGB->YcbCr conversion? %s, YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n",
6985                     connector->base.base.id, connector->base.name,
6986                     yesno(intel_dp->dfp.rgb_to_ycbcr),
6987                     yesno(connector->base.ycbcr_420_allowed),
6988                     yesno(intel_dp->dfp.ycbcr_444_to_420));
6989 }
6990
6991 static void
6992 intel_dp_set_edid(struct intel_dp *intel_dp)
6993 {
6994         struct intel_connector *connector = intel_dp->attached_connector;
6995         struct edid *edid;
6996
6997         intel_dp_unset_edid(intel_dp);
6998         edid = intel_dp_get_edid(intel_dp);
6999         connector->detect_edid = edid;
7000
7001         intel_dp_update_dfp(intel_dp, edid);
7002         intel_dp_update_420(intel_dp);
7003
7004         if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
7005                 intel_dp->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
7006                 intel_dp->has_audio = drm_detect_monitor_audio(edid);
7007         }
7008
7009         drm_dp_cec_set_edid(&intel_dp->aux, edid);
7010         intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid);
7011 }
7012
7013 static void
7014 intel_dp_unset_edid(struct intel_dp *intel_dp)
7015 {
7016         struct intel_connector *connector = intel_dp->attached_connector;
7017
7018         drm_dp_cec_unset_edid(&intel_dp->aux);
7019         kfree(connector->detect_edid);
7020         connector->detect_edid = NULL;
7021
7022         intel_dp->has_hdmi_sink = false;
7023         intel_dp->has_audio = false;
7024         intel_dp->edid_quirks = 0;
7025
7026         intel_dp->dfp.max_bpc = 0;
7027         intel_dp->dfp.max_dotclock = 0;
7028         intel_dp->dfp.min_tmds_clock = 0;
7029         intel_dp->dfp.max_tmds_clock = 0;
7030
7031         intel_dp->dfp.pcon_max_frl_bw = 0;
7032
7033         intel_dp->dfp.ycbcr_444_to_420 = false;
7034         connector->base.ycbcr_420_allowed = false;
7035 }
7036
7037 static int
7038 intel_dp_detect(struct drm_connector *connector,
7039                 struct drm_modeset_acquire_ctx *ctx,
7040                 bool force)
7041 {
7042         struct drm_i915_private *dev_priv = to_i915(connector->dev);
7043         struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
7044         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
7045         struct intel_encoder *encoder = &dig_port->base;
7046         enum drm_connector_status status;
7047
7048         drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
7049                     connector->base.id, connector->name);
7050         drm_WARN_ON(&dev_priv->drm,
7051                     !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
7052
7053         if (!INTEL_DISPLAY_ENABLED(dev_priv))
7054                 return connector_status_disconnected;
7055
7056         /* Can't disconnect eDP */
7057         if (intel_dp_is_edp(intel_dp))
7058                 status = edp_detect(intel_dp);
7059         else if (intel_digital_port_connected(encoder))
7060                 status = intel_dp_detect_dpcd(intel_dp);
7061         else
7062                 status = connector_status_disconnected;
7063
7064         if (status == connector_status_disconnected) {
7065                 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
7066                 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
7067
7068                 if (intel_dp->is_mst) {
7069                         drm_dbg_kms(&dev_priv->drm,
7070                                     "MST device may have disappeared %d vs %d\n",
7071                                     intel_dp->is_mst,
7072                                     intel_dp->mst_mgr.mst_state);
7073                         intel_dp->is_mst = false;
7074                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
7075                                                         intel_dp->is_mst);
7076                 }
7077
7078                 goto out;
7079         }
7080
7081         /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
7082         if (INTEL_GEN(dev_priv) >= 11)
7083                 intel_dp_get_dsc_sink_cap(intel_dp);
7084
7085         intel_dp_configure_mst(intel_dp);
7086
7087         /*
7088          * TODO: Reset link params when switching to MST mode, until MST
7089          * supports link training fallback params.
7090          */
7091         if (intel_dp->reset_link_params || intel_dp->is_mst) {
7092                 /* Initial max link lane count */
7093                 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
7094
7095                 /* Initial max link rate */
7096                 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
7097
7098                 intel_dp->reset_link_params = false;
7099         }
7100
7101         intel_dp_print_rates(intel_dp);
7102
7103         if (intel_dp->is_mst) {
7104                 /*
7105                  * If we are in MST mode then this connector
7106                  * won't appear connected or have anything
7107                  * with EDID on it
7108                  */
7109                 status = connector_status_disconnected;
7110                 goto out;
7111         }
7112
7113         /*
7114          * Some external monitors do not signal loss of link synchronization
7115          * with an IRQ_HPD, so force a link status check.
7116          */
7117         if (!intel_dp_is_edp(intel_dp)) {
7118                 int ret;
7119
7120                 ret = intel_dp_retrain_link(encoder, ctx);
7121                 if (ret)
7122                         return ret;
7123         }
7124
7125         /*
7126          * Clearing NACK and defer counts to get their exact values
7127          * while reading EDID which are required by Compliance tests
7128          * 4.2.2.4 and 4.2.2.5
7129          */
7130         intel_dp->aux.i2c_nack_count = 0;
7131         intel_dp->aux.i2c_defer_count = 0;
7132
7133         intel_dp_set_edid(intel_dp);
7134         if (intel_dp_is_edp(intel_dp) ||
7135             to_intel_connector(connector)->detect_edid)
7136                 status = connector_status_connected;
7137
7138         intel_dp_check_device_service_irq(intel_dp);
7139
7140 out:
7141         if (status != connector_status_connected && !intel_dp->is_mst)
7142                 intel_dp_unset_edid(intel_dp);
7143
7144         /*
7145          * Make sure the refs for power wells enabled during detect are
7146          * dropped to avoid a new detect cycle triggered by HPD polling.
7147          */
7148         intel_display_power_flush_work(dev_priv);
7149
7150         if (!intel_dp_is_edp(intel_dp))
7151                 drm_dp_set_subconnector_property(connector,
7152                                                  status,
7153                                                  intel_dp->dpcd,
7154                                                  intel_dp->downstream_ports);
7155         return status;
7156 }
7157
7158 static void
7159 intel_dp_force(struct drm_connector *connector)
7160 {
7161         struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
7162         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
7163         struct intel_encoder *intel_encoder = &dig_port->base;
7164         struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
7165         enum intel_display_power_domain aux_domain =
7166                 intel_aux_power_domain(dig_port);
7167         intel_wakeref_t wakeref;
7168
7169         drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
7170                     connector->base.id, connector->name);
7171         intel_dp_unset_edid(intel_dp);
7172
7173         if (connector->status != connector_status_connected)
7174                 return;
7175
7176         wakeref = intel_display_power_get(dev_priv, aux_domain);
7177
7178         intel_dp_set_edid(intel_dp);
7179
7180         intel_display_power_put(dev_priv, aux_domain, wakeref);
7181 }
7182
7183 static int intel_dp_get_modes(struct drm_connector *connector)
7184 {
7185         struct intel_connector *intel_connector = to_intel_connector(connector);
7186         struct edid *edid;
7187
7188         edid = intel_connector->detect_edid;
7189         if (edid) {
7190                 int ret = intel_connector_update_modes(connector, edid);
7191                 if (ret)
7192                         return ret;
7193         }
7194
7195         /* if eDP has no EDID, fall back to fixed mode */
7196         if (intel_dp_is_edp(intel_attached_dp(intel_connector)) &&
7197             intel_connector->panel.fixed_mode) {
7198                 struct drm_display_mode *mode;
7199
7200                 mode = drm_mode_duplicate(connector->dev,
7201                                           intel_connector->panel.fixed_mode);
7202                 if (mode) {
7203                         drm_mode_probed_add(connector, mode);
7204                         return 1;
7205                 }
7206         }
7207
7208         if (!edid) {
7209                 struct intel_dp *intel_dp = intel_attached_dp(intel_connector);
7210                 struct drm_display_mode *mode;
7211
7212                 mode = drm_dp_downstream_mode(connector->dev,
7213                                               intel_dp->dpcd,
7214                                               intel_dp->downstream_ports);
7215                 if (mode) {
7216                         drm_mode_probed_add(connector, mode);
7217                         return 1;
7218                 }
7219         }
7220
7221         return 0;
7222 }
7223
7224 static int
7225 intel_dp_connector_register(struct drm_connector *connector)
7226 {
7227         struct drm_i915_private *i915 = to_i915(connector->dev);
7228         struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
7229         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
7230         struct intel_lspcon *lspcon = &dig_port->lspcon;
7231         int ret;
7232
7233         ret = intel_connector_register(connector);
7234         if (ret)
7235                 return ret;
7236
7237         drm_dbg_kms(&i915->drm, "registering %s bus for %s\n",
7238                     intel_dp->aux.name, connector->kdev->kobj.name);
7239
7240         intel_dp->aux.dev = connector->kdev;
7241         ret = drm_dp_aux_register(&intel_dp->aux);
7242         if (!ret)
7243                 drm_dp_cec_register_connector(&intel_dp->aux, connector);
7244
7245         if (!intel_bios_is_lspcon_present(i915, dig_port->base.port))
7246                 return ret;
7247
7248         /*
7249          * ToDo: Clean this up to handle lspcon init and resume more
7250          * efficiently and streamlined.
7251          */
7252         if (lspcon_init(dig_port)) {
7253                 lspcon_detect_hdr_capability(lspcon);
7254                 if (lspcon->hdr_supported)
7255                         drm_object_attach_property(&connector->base,
7256                                                    connector->dev->mode_config.hdr_output_metadata_property,
7257                                                    0);
7258         }
7259
7260         return ret;
7261 }
7262
7263 static void
7264 intel_dp_connector_unregister(struct drm_connector *connector)
7265 {
7266         struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
7267
7268         drm_dp_cec_unregister_connector(&intel_dp->aux);
7269         drm_dp_aux_unregister(&intel_dp->aux);
7270         intel_connector_unregister(connector);
7271 }
7272
7273 void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
7274 {
7275         struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
7276         struct intel_dp *intel_dp = &dig_port->dp;
7277
7278         intel_dp_mst_encoder_cleanup(dig_port);
7279         if (intel_dp_is_edp(intel_dp)) {
7280                 intel_wakeref_t wakeref;
7281
7282                 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
7283                 /*
7284                  * vdd might still be enabled do to the delayed vdd off.
7285                  * Make sure vdd is actually turned off here.
7286                  */
7287                 with_pps_lock(intel_dp, wakeref)
7288                         edp_panel_vdd_off_sync(intel_dp);
7289         }
7290
7291         intel_dp_aux_fini(intel_dp);
7292 }
7293
7294 static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
7295 {
7296         intel_dp_encoder_flush_work(encoder);
7297
7298         drm_encoder_cleanup(encoder);
7299         kfree(enc_to_dig_port(to_intel_encoder(encoder)));
7300 }
7301
7302 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
7303 {
7304         struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
7305         intel_wakeref_t wakeref;
7306
7307         if (!intel_dp_is_edp(intel_dp))
7308                 return;
7309
7310         /*
7311          * vdd might still be enabled do to the delayed vdd off.
7312          * Make sure vdd is actually turned off here.
7313          */
7314         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
7315         with_pps_lock(intel_dp, wakeref)
7316                 edp_panel_vdd_off_sync(intel_dp);
7317 }
7318
7319 void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder)
7320 {
7321         struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
7322         intel_wakeref_t wakeref;
7323
7324         if (!intel_dp_is_edp(intel_dp))
7325                 return;
7326
7327         with_pps_lock(intel_dp, wakeref)
7328                 wait_panel_power_cycle(intel_dp);
7329 }
7330
7331 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
7332 {
7333         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7334         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
7335
7336         lockdep_assert_held(&dev_priv->pps_mutex);
7337
7338         if (!edp_have_panel_vdd(intel_dp))
7339                 return;
7340
7341         /*
7342          * The VDD bit needs a power domain reference, so if the bit is
7343          * already enabled when we boot or resume, grab this reference and
7344          * schedule a vdd off, so we don't hold on to the reference
7345          * indefinitely.
7346          */
7347         drm_dbg_kms(&dev_priv->drm,
7348                     "VDD left on by BIOS, adjusting state tracking\n");
7349         drm_WARN_ON(&dev_priv->drm, intel_dp->vdd_wakeref);
7350         intel_dp->vdd_wakeref = intel_display_power_get(dev_priv,
7351                                                         intel_aux_power_domain(dig_port));
7352
7353         edp_panel_vdd_schedule_off(intel_dp);
7354 }
7355
7356 static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
7357 {
7358         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7359         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
7360         enum pipe pipe;
7361
7362         if (intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
7363                                   encoder->port, &pipe))
7364                 return pipe;
7365
7366         return INVALID_PIPE;
7367 }
7368
7369 void intel_dp_encoder_reset(struct drm_encoder *encoder)
7370 {
7371         struct drm_i915_private *dev_priv = to_i915(encoder->dev);
7372         struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder));
7373         intel_wakeref_t wakeref;
7374
7375         if (!HAS_DDI(dev_priv))
7376                 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
7377
7378         intel_dp->reset_link_params = true;
7379
7380         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
7381             !intel_dp_is_edp(intel_dp))
7382                 return;
7383
7384         with_pps_lock(intel_dp, wakeref) {
7385                 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
7386                         intel_dp->active_pipe = vlv_active_pipe(intel_dp);
7387
7388                 if (intel_dp_is_edp(intel_dp)) {
7389                         /*
7390                          * Reinit the power sequencer, in case BIOS did
7391                          * something nasty with it.
7392                          */
7393                         intel_dp_pps_init(intel_dp);
7394                         intel_edp_panel_vdd_sanitize(intel_dp);
7395                 }
7396         }
7397 }
7398
7399 static int intel_modeset_tile_group(struct intel_atomic_state *state,
7400                                     int tile_group_id)
7401 {
7402         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7403         struct drm_connector_list_iter conn_iter;
7404         struct drm_connector *connector;
7405         int ret = 0;
7406
7407         drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
7408         drm_for_each_connector_iter(connector, &conn_iter) {
7409                 struct drm_connector_state *conn_state;
7410                 struct intel_crtc_state *crtc_state;
7411                 struct intel_crtc *crtc;
7412
7413                 if (!connector->has_tile ||
7414                     connector->tile_group->id != tile_group_id)
7415                         continue;
7416
7417                 conn_state = drm_atomic_get_connector_state(&state->base,
7418                                                             connector);
7419                 if (IS_ERR(conn_state)) {
7420                         ret = PTR_ERR(conn_state);
7421                         break;
7422                 }
7423
7424                 crtc = to_intel_crtc(conn_state->crtc);
7425
7426                 if (!crtc)
7427                         continue;
7428
7429                 crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
7430                 crtc_state->uapi.mode_changed = true;
7431
7432                 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
7433                 if (ret)
7434                         break;
7435         }
7436         drm_connector_list_iter_end(&conn_iter);
7437
7438         return ret;
7439 }
7440
7441 static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders)
7442 {
7443         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7444         struct intel_crtc *crtc;
7445
7446         if (transcoders == 0)
7447                 return 0;
7448
7449         for_each_intel_crtc(&dev_priv->drm, crtc) {
7450                 struct intel_crtc_state *crtc_state;
7451                 int ret;
7452
7453                 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
7454                 if (IS_ERR(crtc_state))
7455                         return PTR_ERR(crtc_state);
7456
7457                 if (!crtc_state->hw.enable)
7458                         continue;
7459
7460                 if (!(transcoders & BIT(crtc_state->cpu_transcoder)))
7461                         continue;
7462
7463                 crtc_state->uapi.mode_changed = true;
7464
7465                 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base);
7466                 if (ret)
7467                         return ret;
7468
7469                 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
7470                 if (ret)
7471                         return ret;
7472
7473                 transcoders &= ~BIT(crtc_state->cpu_transcoder);
7474         }
7475
7476         drm_WARN_ON(&dev_priv->drm, transcoders != 0);
7477
7478         return 0;
7479 }
7480
7481 static int intel_modeset_synced_crtcs(struct intel_atomic_state *state,
7482                                       struct drm_connector *connector)
7483 {
7484         const struct drm_connector_state *old_conn_state =
7485                 drm_atomic_get_old_connector_state(&state->base, connector);
7486         const struct intel_crtc_state *old_crtc_state;
7487         struct intel_crtc *crtc;
7488         u8 transcoders;
7489
7490         crtc = to_intel_crtc(old_conn_state->crtc);
7491         if (!crtc)
7492                 return 0;
7493
7494         old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
7495
7496         if (!old_crtc_state->hw.active)
7497                 return 0;
7498
7499         transcoders = old_crtc_state->sync_mode_slaves_mask;
7500         if (old_crtc_state->master_transcoder != INVALID_TRANSCODER)
7501                 transcoders |= BIT(old_crtc_state->master_transcoder);
7502
7503         return intel_modeset_affected_transcoders(state,
7504                                                   transcoders);
7505 }
7506
7507 static int intel_dp_connector_atomic_check(struct drm_connector *conn,
7508                                            struct drm_atomic_state *_state)
7509 {
7510         struct drm_i915_private *dev_priv = to_i915(conn->dev);
7511         struct intel_atomic_state *state = to_intel_atomic_state(_state);
7512         int ret;
7513
7514         ret = intel_digital_connector_atomic_check(conn, &state->base);
7515         if (ret)
7516                 return ret;
7517
7518         /*
7519          * We don't enable port sync on BDW due to missing w/as and
7520          * due to not having adjusted the modeset sequence appropriately.
7521          */
7522         if (INTEL_GEN(dev_priv) < 9)
7523                 return 0;
7524
7525         if (!intel_connector_needs_modeset(state, conn))
7526                 return 0;
7527
7528         if (conn->has_tile) {
7529                 ret = intel_modeset_tile_group(state, conn->tile_group->id);
7530                 if (ret)
7531                         return ret;
7532         }
7533
7534         return intel_modeset_synced_crtcs(state, conn);
7535 }
7536
7537 static const struct drm_connector_funcs intel_dp_connector_funcs = {
7538         .force = intel_dp_force,
7539         .fill_modes = drm_helper_probe_single_connector_modes,
7540         .atomic_get_property = intel_digital_connector_atomic_get_property,
7541         .atomic_set_property = intel_digital_connector_atomic_set_property,
7542         .late_register = intel_dp_connector_register,
7543         .early_unregister = intel_dp_connector_unregister,
7544         .destroy = intel_connector_destroy,
7545         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
7546         .atomic_duplicate_state = intel_digital_connector_duplicate_state,
7547 };
7548
7549 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
7550         .detect_ctx = intel_dp_detect,
7551         .get_modes = intel_dp_get_modes,
7552         .mode_valid = intel_dp_mode_valid,
7553         .atomic_check = intel_dp_connector_atomic_check,
7554 };
7555
7556 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
7557         .reset = intel_dp_encoder_reset,
7558         .destroy = intel_dp_encoder_destroy,
7559 };
7560
7561 static bool intel_edp_have_power(struct intel_dp *intel_dp)
7562 {
7563         intel_wakeref_t wakeref;
7564         bool have_power = false;
7565
7566         with_pps_lock(intel_dp, wakeref) {
7567                 have_power = edp_have_panel_power(intel_dp) &&
7568                                                   edp_have_panel_vdd(intel_dp);
7569         }
7570
7571         return have_power;
7572 }
7573
7574 enum irqreturn
7575 intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
7576 {
7577         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
7578         struct intel_dp *intel_dp = &dig_port->dp;
7579
7580         if (dig_port->base.type == INTEL_OUTPUT_EDP &&
7581             (long_hpd || !intel_edp_have_power(intel_dp))) {
7582                 /*
7583                  * vdd off can generate a long/short pulse on eDP which
7584                  * would require vdd on to handle it, and thus we
7585                  * would end up in an endless cycle of
7586                  * "vdd off -> long/short hpd -> vdd on -> detect -> vdd off -> ..."
7587                  */
7588                 drm_dbg_kms(&i915->drm,
7589                             "ignoring %s hpd on eDP [ENCODER:%d:%s]\n",
7590                             long_hpd ? "long" : "short",
7591                             dig_port->base.base.base.id,
7592                             dig_port->base.base.name);
7593                 return IRQ_HANDLED;
7594         }
7595
7596         drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n",
7597                     dig_port->base.base.base.id,
7598                     dig_port->base.base.name,
7599                     long_hpd ? "long" : "short");
7600
7601         if (long_hpd) {
7602                 intel_dp->reset_link_params = true;
7603                 return IRQ_NONE;
7604         }
7605
7606         if (intel_dp->is_mst) {
7607                 if (!intel_dp_check_mst_status(intel_dp))
7608                         return IRQ_NONE;
7609         } else if (!intel_dp_short_pulse(intel_dp)) {
7610                 return IRQ_NONE;
7611         }
7612
7613         return IRQ_HANDLED;
7614 }
7615
7616 /* check the VBT to see whether the eDP is on another port */
7617 bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
7618 {
7619         /*
7620          * eDP not supported on g4x. so bail out early just
7621          * for a bit extra safety in case the VBT is bonkers.
7622          */
7623         if (INTEL_GEN(dev_priv) < 5)
7624                 return false;
7625
7626         if (INTEL_GEN(dev_priv) < 9 && port == PORT_A)
7627                 return true;
7628
7629         return intel_bios_is_port_edp(dev_priv, port);
7630 }
7631
7632 static void
7633 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
7634 {
7635         struct drm_i915_private *dev_priv = to_i915(connector->dev);
7636         enum port port = dp_to_dig_port(intel_dp)->base.port;
7637
7638         if (!intel_dp_is_edp(intel_dp))
7639                 drm_connector_attach_dp_subconnector_property(connector);
7640
7641         if (!IS_G4X(dev_priv) && port != PORT_A)
7642                 intel_attach_force_audio_property(connector);
7643
7644         intel_attach_broadcast_rgb_property(connector);
7645         if (HAS_GMCH(dev_priv))
7646                 drm_connector_attach_max_bpc_property(connector, 6, 10);
7647         else if (INTEL_GEN(dev_priv) >= 5)
7648                 drm_connector_attach_max_bpc_property(connector, 6, 12);
7649
7650         /* Register HDMI colorspace for case of lspcon */
7651         if (intel_bios_is_lspcon_present(dev_priv, port)) {
7652                 drm_connector_attach_content_type_property(connector);
7653                 intel_attach_hdmi_colorspace_property(connector);
7654         } else {
7655                 intel_attach_dp_colorspace_property(connector);
7656         }
7657
7658         if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 11)
7659                 drm_object_attach_property(&connector->base,
7660                                            connector->dev->mode_config.hdr_output_metadata_property,
7661                                            0);
7662
7663         if (intel_dp_is_edp(intel_dp)) {
7664                 u32 allowed_scalers;
7665
7666                 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
7667                 if (!HAS_GMCH(dev_priv))
7668                         allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
7669
7670                 drm_connector_attach_scaling_mode_property(connector, allowed_scalers);
7671
7672                 connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT;
7673
7674         }
7675 }
7676
7677 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
7678 {
7679         intel_dp->panel_power_off_time = ktime_get_boottime();
7680         intel_dp->last_power_on = jiffies;
7681         intel_dp->last_backlight_off = jiffies;
7682 }
7683
7684 static void
7685 intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
7686 {
7687         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7688         u32 pp_on, pp_off, pp_ctl;
7689         struct pps_registers regs;
7690
7691         intel_pps_get_registers(intel_dp, &regs);
7692
7693         pp_ctl = ilk_get_pp_control(intel_dp);
7694
7695         /* Ensure PPS is unlocked */
7696         if (!HAS_DDI(dev_priv))
7697                 intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
7698
7699         pp_on = intel_de_read(dev_priv, regs.pp_on);
7700         pp_off = intel_de_read(dev_priv, regs.pp_off);
7701
7702         /* Pull timing values out of registers */
7703         seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
7704         seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
7705         seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
7706         seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
7707
7708         if (i915_mmio_reg_valid(regs.pp_div)) {
7709                 u32 pp_div;
7710
7711                 pp_div = intel_de_read(dev_priv, regs.pp_div);
7712
7713                 seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
7714         } else {
7715                 seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
7716         }
7717 }
7718
7719 static void
7720 intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
7721 {
7722         DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
7723                       state_name,
7724                       seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
7725 }
7726
7727 static void
7728 intel_pps_verify_state(struct intel_dp *intel_dp)
7729 {
7730         struct edp_power_seq hw;
7731         struct edp_power_seq *sw = &intel_dp->pps_delays;
7732
7733         intel_pps_readout_hw_state(intel_dp, &hw);
7734
7735         if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
7736             hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
7737                 DRM_ERROR("PPS state mismatch\n");
7738                 intel_pps_dump_state("sw", sw);
7739                 intel_pps_dump_state("hw", &hw);
7740         }
7741 }
7742
7743 static void
7744 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp)
7745 {
7746         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7747         struct edp_power_seq cur, vbt, spec,
7748                 *final = &intel_dp->pps_delays;
7749
7750         lockdep_assert_held(&dev_priv->pps_mutex);
7751
7752         /* already initialized? */
7753         if (final->t11_t12 != 0)
7754                 return;
7755
7756         intel_pps_readout_hw_state(intel_dp, &cur);
7757
7758         intel_pps_dump_state("cur", &cur);
7759
7760         vbt = dev_priv->vbt.edp.pps;
7761         /* On Toshiba Satellite P50-C-18C system the VBT T12 delay
7762          * of 500ms appears to be too short. Ocassionally the panel
7763          * just fails to power back on. Increasing the delay to 800ms
7764          * seems sufficient to avoid this problem.
7765          */
7766         if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
7767                 vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
7768                 drm_dbg_kms(&dev_priv->drm,
7769                             "Increasing T12 panel delay as per the quirk to %d\n",
7770                             vbt.t11_t12);
7771         }
7772         /* T11_T12 delay is special and actually in units of 100ms, but zero
7773          * based in the hw (so we need to add 100 ms). But the sw vbt
7774          * table multiplies it with 1000 to make it in units of 100usec,
7775          * too. */
7776         vbt.t11_t12 += 100 * 10;
7777
7778         /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
7779          * our hw here, which are all in 100usec. */
7780         spec.t1_t3 = 210 * 10;
7781         spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
7782         spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
7783         spec.t10 = 500 * 10;
7784         /* This one is special and actually in units of 100ms, but zero
7785          * based in the hw (so we need to add 100 ms). But the sw vbt
7786          * table multiplies it with 1000 to make it in units of 100usec,
7787          * too. */
7788         spec.t11_t12 = (510 + 100) * 10;
7789
7790         intel_pps_dump_state("vbt", &vbt);
7791
7792         /* Use the max of the register settings and vbt. If both are
7793          * unset, fall back to the spec limits. */
7794 #define assign_final(field)     final->field = (max(cur.field, vbt.field) == 0 ? \
7795                                        spec.field : \
7796                                        max(cur.field, vbt.field))
7797         assign_final(t1_t3);
7798         assign_final(t8);
7799         assign_final(t9);
7800         assign_final(t10);
7801         assign_final(t11_t12);
7802 #undef assign_final
7803
7804 #define get_delay(field)        (DIV_ROUND_UP(final->field, 10))
7805         intel_dp->panel_power_up_delay = get_delay(t1_t3);
7806         intel_dp->backlight_on_delay = get_delay(t8);
7807         intel_dp->backlight_off_delay = get_delay(t9);
7808         intel_dp->panel_power_down_delay = get_delay(t10);
7809         intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
7810 #undef get_delay
7811
7812         drm_dbg_kms(&dev_priv->drm,
7813                     "panel power up delay %d, power down delay %d, power cycle delay %d\n",
7814                     intel_dp->panel_power_up_delay,
7815                     intel_dp->panel_power_down_delay,
7816                     intel_dp->panel_power_cycle_delay);
7817
7818         drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n",
7819                     intel_dp->backlight_on_delay,
7820                     intel_dp->backlight_off_delay);
7821
7822         /*
7823          * We override the HW backlight delays to 1 because we do manual waits
7824          * on them. For T8, even BSpec recommends doing it. For T9, if we
7825          * don't do this, we'll end up waiting for the backlight off delay
7826          * twice: once when we do the manual sleep, and once when we disable
7827          * the panel and wait for the PP_STATUS bit to become zero.
7828          */
7829         final->t8 = 1;
7830         final->t9 = 1;
7831
7832         /*
7833          * HW has only a 100msec granularity for t11_t12 so round it up
7834          * accordingly.
7835          */
7836         final->t11_t12 = roundup(final->t11_t12, 100 * 10);
7837 }
7838
7839 static void
7840 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
7841                                               bool force_disable_vdd)
7842 {
7843         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7844         u32 pp_on, pp_off, port_sel = 0;
7845         int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000;
7846         struct pps_registers regs;
7847         enum port port = dp_to_dig_port(intel_dp)->base.port;
7848         const struct edp_power_seq *seq = &intel_dp->pps_delays;
7849
7850         lockdep_assert_held(&dev_priv->pps_mutex);
7851
7852         intel_pps_get_registers(intel_dp, &regs);
7853
7854         /*
7855          * On some VLV machines the BIOS can leave the VDD
7856          * enabled even on power sequencers which aren't
7857          * hooked up to any port. This would mess up the
7858          * power domain tracking the first time we pick
7859          * one of these power sequencers for use since
7860          * edp_panel_vdd_on() would notice that the VDD was
7861          * already on and therefore wouldn't grab the power
7862          * domain reference. Disable VDD first to avoid this.
7863          * This also avoids spuriously turning the VDD on as
7864          * soon as the new power sequencer gets initialized.
7865          */
7866         if (force_disable_vdd) {
7867                 u32 pp = ilk_get_pp_control(intel_dp);
7868
7869                 drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON,
7870                          "Panel power already on\n");
7871
7872                 if (pp & EDP_FORCE_VDD)
7873                         drm_dbg_kms(&dev_priv->drm,
7874                                     "VDD already on, disabling first\n");
7875
7876                 pp &= ~EDP_FORCE_VDD;
7877
7878                 intel_de_write(dev_priv, regs.pp_ctrl, pp);
7879         }
7880
7881         pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
7882                 REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
7883         pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
7884                 REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
7885
7886         /* Haswell doesn't have any port selection bits for the panel
7887          * power sequencer any more. */
7888         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
7889                 port_sel = PANEL_PORT_SELECT_VLV(port);
7890         } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
7891                 switch (port) {
7892                 case PORT_A:
7893                         port_sel = PANEL_PORT_SELECT_DPA;
7894                         break;
7895                 case PORT_C:
7896                         port_sel = PANEL_PORT_SELECT_DPC;
7897                         break;
7898                 case PORT_D:
7899                         port_sel = PANEL_PORT_SELECT_DPD;
7900                         break;
7901                 default:
7902                         MISSING_CASE(port);
7903                         break;
7904                 }
7905         }
7906
7907         pp_on |= port_sel;
7908
7909         intel_de_write(dev_priv, regs.pp_on, pp_on);
7910         intel_de_write(dev_priv, regs.pp_off, pp_off);
7911
7912         /*
7913          * Compute the divisor for the pp clock, simply match the Bspec formula.
7914          */
7915         if (i915_mmio_reg_valid(regs.pp_div)) {
7916                 intel_de_write(dev_priv, regs.pp_div,
7917                                REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
7918         } else {
7919                 u32 pp_ctl;
7920
7921                 pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl);
7922                 pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
7923                 pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
7924                 intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
7925         }
7926
7927         drm_dbg_kms(&dev_priv->drm,
7928                     "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
7929                     intel_de_read(dev_priv, regs.pp_on),
7930                     intel_de_read(dev_priv, regs.pp_off),
7931                     i915_mmio_reg_valid(regs.pp_div) ?
7932                     intel_de_read(dev_priv, regs.pp_div) :
7933                     (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
7934 }
7935
7936 static void intel_dp_pps_init(struct intel_dp *intel_dp)
7937 {
7938         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7939
7940         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
7941                 vlv_initial_power_sequencer_setup(intel_dp);
7942         } else {
7943                 intel_dp_init_panel_power_sequencer(intel_dp);
7944                 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
7945         }
7946 }
7947
7948 /**
7949  * intel_dp_set_drrs_state - program registers for RR switch to take effect
7950  * @dev_priv: i915 device
7951  * @crtc_state: a pointer to the active intel_crtc_state
7952  * @refresh_rate: RR to be programmed
7953  *
7954  * This function gets called when refresh rate (RR) has to be changed from
7955  * one frequency to another. Switches can be between high and low RR
7956  * supported by the panel or to any other RR based on media playback (in
7957  * this case, RR value needs to be passed from user space).
7958  *
7959  * The caller of this function needs to take a lock on dev_priv->drrs.
7960  */
7961 static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
7962                                     const struct intel_crtc_state *crtc_state,
7963                                     int refresh_rate)
7964 {
7965         struct intel_dp *intel_dp = dev_priv->drrs.dp;
7966         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
7967         enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
7968
7969         if (refresh_rate <= 0) {
7970                 drm_dbg_kms(&dev_priv->drm,
7971                             "Refresh rate should be positive non-zero.\n");
7972                 return;
7973         }
7974
7975         if (intel_dp == NULL) {
7976                 drm_dbg_kms(&dev_priv->drm, "DRRS not supported.\n");
7977                 return;
7978         }
7979
7980         if (!intel_crtc) {
7981                 drm_dbg_kms(&dev_priv->drm,
7982                             "DRRS: intel_crtc not initialized\n");
7983                 return;
7984         }
7985
7986         if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
7987                 drm_dbg_kms(&dev_priv->drm, "Only Seamless DRRS supported.\n");
7988                 return;
7989         }
7990
7991         if (drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode) ==
7992                         refresh_rate)
7993                 index = DRRS_LOW_RR;
7994
7995         if (index == dev_priv->drrs.refresh_rate_type) {
7996                 drm_dbg_kms(&dev_priv->drm,
7997                             "DRRS requested for previously set RR...ignoring\n");
7998                 return;
7999         }
8000
8001         if (!crtc_state->hw.active) {
8002                 drm_dbg_kms(&dev_priv->drm,
8003                             "eDP encoder disabled. CRTC not Active\n");
8004                 return;
8005         }
8006
8007         if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
8008                 switch (index) {
8009                 case DRRS_HIGH_RR:
8010                         intel_dp_set_m_n(crtc_state, M1_N1);
8011                         break;
8012                 case DRRS_LOW_RR:
8013                         intel_dp_set_m_n(crtc_state, M2_N2);
8014                         break;
8015                 case DRRS_MAX_RR:
8016                 default:
8017                         drm_err(&dev_priv->drm,
8018                                 "Unsupported refreshrate type\n");
8019                 }
8020         } else if (INTEL_GEN(dev_priv) > 6) {
8021                 i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
8022                 u32 val;
8023
8024                 val = intel_de_read(dev_priv, reg);
8025                 if (index > DRRS_HIGH_RR) {
8026                         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
8027                                 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
8028                         else
8029                                 val |= PIPECONF_EDP_RR_MODE_SWITCH;
8030                 } else {
8031                         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
8032                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
8033                         else
8034                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
8035                 }
8036                 intel_de_write(dev_priv, reg, val);
8037         }
8038
8039         dev_priv->drrs.refresh_rate_type = index;
8040
8041         drm_dbg_kms(&dev_priv->drm, "eDP Refresh Rate set to : %dHz\n",
8042                     refresh_rate);
8043 }
8044
8045 static void
8046 intel_edp_drrs_enable_locked(struct intel_dp *intel_dp)
8047 {
8048         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
8049
8050         dev_priv->drrs.busy_frontbuffer_bits = 0;
8051         dev_priv->drrs.dp = intel_dp;
8052 }
8053
8054 /**
8055  * intel_edp_drrs_enable - init drrs struct if supported
8056  * @intel_dp: DP struct
8057  * @crtc_state: A pointer to the active crtc state.
8058  *
8059  * Initializes frontbuffer_bits and drrs.dp
8060  */
8061 void intel_edp_drrs_enable(struct intel_dp *intel_dp,
8062                            const struct intel_crtc_state *crtc_state)
8063 {
8064         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
8065
8066         if (!crtc_state->has_drrs)
8067                 return;
8068
8069         drm_dbg_kms(&dev_priv->drm, "Enabling DRRS\n");
8070
8071         mutex_lock(&dev_priv->drrs.mutex);
8072
8073         if (dev_priv->drrs.dp) {
8074                 drm_warn(&dev_priv->drm, "DRRS already enabled\n");
8075                 goto unlock;
8076         }
8077
8078         intel_edp_drrs_enable_locked(intel_dp);
8079
8080 unlock:
8081         mutex_unlock(&dev_priv->drrs.mutex);
8082 }
8083
8084 static void
8085 intel_edp_drrs_disable_locked(struct intel_dp *intel_dp,
8086                               const struct intel_crtc_state *crtc_state)
8087 {
8088         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
8089
8090         if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
8091                 int refresh;
8092
8093                 refresh = drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode);
8094                 intel_dp_set_drrs_state(dev_priv, crtc_state, refresh);
8095         }
8096
8097         dev_priv->drrs.dp = NULL;
8098 }
8099
8100 /**
8101  * intel_edp_drrs_disable - Disable DRRS
8102  * @intel_dp: DP struct
8103  * @old_crtc_state: Pointer to old crtc_state.
8104  *
8105  */
8106 void intel_edp_drrs_disable(struct intel_dp *intel_dp,
8107                             const struct intel_crtc_state *old_crtc_state)
8108 {
8109         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
8110
8111         if (!old_crtc_state->has_drrs)
8112                 return;
8113
8114         mutex_lock(&dev_priv->drrs.mutex);
8115         if (!dev_priv->drrs.dp) {
8116                 mutex_unlock(&dev_priv->drrs.mutex);
8117                 return;
8118         }
8119
8120         intel_edp_drrs_disable_locked(intel_dp, old_crtc_state);
8121         mutex_unlock(&dev_priv->drrs.mutex);
8122
8123         cancel_delayed_work_sync(&dev_priv->drrs.work);
8124 }
8125
8126 /**
8127  * intel_edp_drrs_update - Update DRRS state
8128  * @intel_dp: Intel DP
8129  * @crtc_state: new CRTC state
8130  *
8131  * This function will update DRRS states, disabling or enabling DRRS when
8132  * executing fastsets. For full modeset, intel_edp_drrs_disable() and
8133  * intel_edp_drrs_enable() should be called instead.
8134  */
8135 void
8136 intel_edp_drrs_update(struct intel_dp *intel_dp,
8137                       const struct intel_crtc_state *crtc_state)
8138 {
8139         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
8140
8141         if (dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT)
8142                 return;
8143
8144         mutex_lock(&dev_priv->drrs.mutex);
8145
8146         /* New state matches current one? */
8147         if (crtc_state->has_drrs == !!dev_priv->drrs.dp)
8148                 goto unlock;
8149
8150         if (crtc_state->has_drrs)
8151                 intel_edp_drrs_enable_locked(intel_dp);
8152         else
8153                 intel_edp_drrs_disable_locked(intel_dp, crtc_state);
8154
8155 unlock:
8156         mutex_unlock(&dev_priv->drrs.mutex);
8157 }
8158
8159 static void intel_edp_drrs_downclock_work(struct work_struct *work)
8160 {
8161         struct drm_i915_private *dev_priv =
8162                 container_of(work, typeof(*dev_priv), drrs.work.work);
8163         struct intel_dp *intel_dp;
8164
8165         mutex_lock(&dev_priv->drrs.mutex);
8166
8167         intel_dp = dev_priv->drrs.dp;
8168
8169         if (!intel_dp)
8170                 goto unlock;
8171
8172         /*
8173          * The delayed work can race with an invalidate hence we need to
8174          * recheck.
8175          */
8176
8177         if (dev_priv->drrs.busy_frontbuffer_bits)
8178                 goto unlock;
8179
8180         if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) {
8181                 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
8182
8183                 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
8184                         drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode));
8185         }
8186
8187 unlock:
8188         mutex_unlock(&dev_priv->drrs.mutex);
8189 }
8190
8191 /**
8192  * intel_edp_drrs_invalidate - Disable Idleness DRRS
8193  * @dev_priv: i915 device
8194  * @frontbuffer_bits: frontbuffer plane tracking bits
8195  *
8196  * This function gets called everytime rendering on the given planes start.
8197  * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
8198  *
8199  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
8200  */
8201 void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
8202                                unsigned int frontbuffer_bits)
8203 {
8204         struct intel_dp *intel_dp;
8205         struct drm_crtc *crtc;
8206         enum pipe pipe;
8207
8208         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
8209                 return;
8210
8211         cancel_delayed_work(&dev_priv->drrs.work);
8212
8213         mutex_lock(&dev_priv->drrs.mutex);
8214
8215         intel_dp = dev_priv->drrs.dp;
8216         if (!intel_dp) {
8217                 mutex_unlock(&dev_priv->drrs.mutex);
8218                 return;
8219         }
8220
8221         crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
8222         pipe = to_intel_crtc(crtc)->pipe;
8223
8224         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
8225         dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
8226
8227         /* invalidate means busy screen hence upclock */
8228         if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
8229                 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
8230                                         drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode));
8231
8232         mutex_unlock(&dev_priv->drrs.mutex);
8233 }
8234
8235 /**
8236  * intel_edp_drrs_flush - Restart Idleness DRRS
8237  * @dev_priv: i915 device
8238  * @frontbuffer_bits: frontbuffer plane tracking bits
8239  *
8240  * This function gets called every time rendering on the given planes has
8241  * completed or flip on a crtc is completed. So DRRS should be upclocked
8242  * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
8243  * if no other planes are dirty.
8244  *
8245  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
8246  */
8247 void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
8248                           unsigned int frontbuffer_bits)
8249 {
8250         struct intel_dp *intel_dp;
8251         struct drm_crtc *crtc;
8252         enum pipe pipe;
8253
8254         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
8255                 return;
8256
8257         cancel_delayed_work(&dev_priv->drrs.work);
8258
8259         mutex_lock(&dev_priv->drrs.mutex);
8260
8261         intel_dp = dev_priv->drrs.dp;
8262         if (!intel_dp) {
8263                 mutex_unlock(&dev_priv->drrs.mutex);
8264                 return;
8265         }
8266
8267         crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
8268         pipe = to_intel_crtc(crtc)->pipe;
8269
8270         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
8271         dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
8272
8273         /* flush means busy screen hence upclock */
8274         if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
8275                 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
8276                                         drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode));
8277
8278         /*
8279          * flush also means no more activity hence schedule downclock, if all
8280          * other fbs are quiescent too
8281          */
8282         if (!dev_priv->drrs.busy_frontbuffer_bits)
8283                 schedule_delayed_work(&dev_priv->drrs.work,
8284                                 msecs_to_jiffies(1000));
8285         mutex_unlock(&dev_priv->drrs.mutex);
8286 }
8287
8288 /**
8289  * DOC: Display Refresh Rate Switching (DRRS)
8290  *
8291  * Display Refresh Rate Switching (DRRS) is a power conservation feature
8292  * which enables swtching between low and high refresh rates,
8293  * dynamically, based on the usage scenario. This feature is applicable
8294  * for internal panels.
8295  *
8296  * Indication that the panel supports DRRS is given by the panel EDID, which
8297  * would list multiple refresh rates for one resolution.
8298  *
8299  * DRRS is of 2 types - static and seamless.
8300  * Static DRRS involves changing refresh rate (RR) by doing a full modeset
8301  * (may appear as a blink on screen) and is used in dock-undock scenario.
8302  * Seamless DRRS involves changing RR without any visual effect to the user
8303  * and can be used during normal system usage. This is done by programming
8304  * certain registers.
8305  *
8306  * Support for static/seamless DRRS may be indicated in the VBT based on
8307  * inputs from the panel spec.
8308  *
8309  * DRRS saves power by switching to low RR based on usage scenarios.
8310  *
8311  * The implementation is based on frontbuffer tracking implementation.  When
8312  * there is a disturbance on the screen triggered by user activity or a periodic
8313  * system activity, DRRS is disabled (RR is changed to high RR).  When there is
8314  * no movement on screen, after a timeout of 1 second, a switch to low RR is
8315  * made.
8316  *
8317  * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
8318  * and intel_edp_drrs_flush() are called.
8319  *
8320  * DRRS can be further extended to support other internal panels and also
8321  * the scenario of video playback wherein RR is set based on the rate
8322  * requested by userspace.
8323  */
8324
8325 /**
8326  * intel_dp_drrs_init - Init basic DRRS work and mutex.
8327  * @connector: eDP connector
8328  * @fixed_mode: preferred mode of panel
8329  *
8330  * This function is  called only once at driver load to initialize basic
8331  * DRRS stuff.
8332  *
8333  * Returns:
8334  * Downclock mode if panel supports it, else return NULL.
8335  * DRRS support is determined by the presence of downclock mode (apart
8336  * from VBT setting).
8337  */
8338 static struct drm_display_mode *
8339 intel_dp_drrs_init(struct intel_connector *connector,
8340                    struct drm_display_mode *fixed_mode)
8341 {
8342         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
8343         struct drm_display_mode *downclock_mode = NULL;
8344
8345         INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
8346         mutex_init(&dev_priv->drrs.mutex);
8347
8348         if (INTEL_GEN(dev_priv) <= 6) {
8349                 drm_dbg_kms(&dev_priv->drm,
8350                             "DRRS supported for Gen7 and above\n");
8351                 return NULL;
8352         }
8353
8354         if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
8355                 drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n");
8356                 return NULL;
8357         }
8358
8359         downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode);
8360         if (!downclock_mode) {
8361                 drm_dbg_kms(&dev_priv->drm,
8362                             "Downclock mode is not found. DRRS not supported\n");
8363                 return NULL;
8364         }
8365
8366         dev_priv->drrs.type = dev_priv->vbt.drrs_type;
8367
8368         dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
8369         drm_dbg_kms(&dev_priv->drm,
8370                     "seamless DRRS supported for eDP panel.\n");
8371         return downclock_mode;
8372 }
8373
8374 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
8375                                      struct intel_connector *intel_connector)
8376 {
8377         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
8378         struct drm_device *dev = &dev_priv->drm;
8379         struct drm_connector *connector = &intel_connector->base;
8380         struct drm_display_mode *fixed_mode = NULL;
8381         struct drm_display_mode *downclock_mode = NULL;
8382         bool has_dpcd;
8383         enum pipe pipe = INVALID_PIPE;
8384         intel_wakeref_t wakeref;
8385         struct edid *edid;
8386
8387         if (!intel_dp_is_edp(intel_dp))
8388                 return true;
8389
8390         INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work);
8391
8392         /*
8393          * On IBX/CPT we may get here with LVDS already registered. Since the
8394          * driver uses the only internal power sequencer available for both
8395          * eDP and LVDS bail out early in this case to prevent interfering
8396          * with an already powered-on LVDS power sequencer.
8397          */
8398         if (intel_get_lvds_encoder(dev_priv)) {
8399                 drm_WARN_ON(dev,
8400                             !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
8401                 drm_info(&dev_priv->drm,
8402                          "LVDS was detected, not registering eDP\n");
8403
8404                 return false;
8405         }
8406
8407         with_pps_lock(intel_dp, wakeref) {
8408                 intel_dp_init_panel_power_timestamps(intel_dp);
8409                 intel_dp_pps_init(intel_dp);
8410                 intel_edp_panel_vdd_sanitize(intel_dp);
8411         }
8412
8413         /* Cache DPCD and EDID for edp. */
8414         has_dpcd = intel_edp_init_dpcd(intel_dp);
8415
8416         if (!has_dpcd) {
8417                 /* if this fails, presume the device is a ghost */
8418                 drm_info(&dev_priv->drm,
8419                          "failed to retrieve link info, disabling eDP\n");
8420                 goto out_vdd_off;
8421         }
8422
8423         mutex_lock(&dev->mode_config.mutex);
8424         edid = drm_get_edid(connector, &intel_dp->aux.ddc);
8425         if (edid) {
8426                 if (drm_add_edid_modes(connector, edid)) {
8427                         drm_connector_update_edid_property(connector, edid);
8428                         intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid);
8429                 } else {
8430                         kfree(edid);
8431                         edid = ERR_PTR(-EINVAL);
8432                 }
8433         } else {
8434                 edid = ERR_PTR(-ENOENT);
8435         }
8436         intel_connector->edid = edid;
8437
8438         fixed_mode = intel_panel_edid_fixed_mode(intel_connector);
8439         if (fixed_mode)
8440                 downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode);
8441
8442         /* fallback to VBT if available for eDP */
8443         if (!fixed_mode)
8444                 fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
8445         mutex_unlock(&dev->mode_config.mutex);
8446
8447         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
8448                 /*
8449                  * Figure out the current pipe for the initial backlight setup.
8450                  * If the current pipe isn't valid, try the PPS pipe, and if that
8451                  * fails just assume pipe A.
8452                  */
8453                 pipe = vlv_active_pipe(intel_dp);
8454
8455                 if (pipe != PIPE_A && pipe != PIPE_B)
8456                         pipe = intel_dp->pps_pipe;
8457
8458                 if (pipe != PIPE_A && pipe != PIPE_B)
8459                         pipe = PIPE_A;
8460
8461                 drm_dbg_kms(&dev_priv->drm,
8462                             "using pipe %c for initial backlight setup\n",
8463                             pipe_name(pipe));
8464         }
8465
8466         intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
8467         intel_connector->panel.backlight.power = intel_edp_backlight_power;
8468         intel_panel_setup_backlight(connector, pipe);
8469
8470         if (fixed_mode) {
8471                 drm_connector_set_panel_orientation_with_quirk(connector,
8472                                 dev_priv->vbt.orientation,
8473                                 fixed_mode->hdisplay, fixed_mode->vdisplay);
8474         }
8475
8476         return true;
8477
8478 out_vdd_off:
8479         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
8480         /*
8481          * vdd might still be enabled do to the delayed vdd off.
8482          * Make sure vdd is actually turned off here.
8483          */
8484         with_pps_lock(intel_dp, wakeref)
8485                 edp_panel_vdd_off_sync(intel_dp);
8486
8487         return false;
8488 }
8489
8490 static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
8491 {
8492         struct intel_connector *intel_connector;
8493         struct drm_connector *connector;
8494
8495         intel_connector = container_of(work, typeof(*intel_connector),
8496                                        modeset_retry_work);
8497         connector = &intel_connector->base;
8498         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
8499                       connector->name);
8500
8501         /* Grab the locks before changing connector property*/
8502         mutex_lock(&connector->dev->mode_config.mutex);
8503         /* Set connector link status to BAD and send a Uevent to notify
8504          * userspace to do a modeset.
8505          */
8506         drm_connector_set_link_status_property(connector,
8507                                                DRM_MODE_LINK_STATUS_BAD);
8508         mutex_unlock(&connector->dev->mode_config.mutex);
8509         /* Send Hotplug uevent so userspace can reprobe */
8510         drm_kms_helper_hotplug_event(connector->dev);
8511 }
8512
8513 bool
8514 intel_dp_init_connector(struct intel_digital_port *dig_port,
8515                         struct intel_connector *intel_connector)
8516 {
8517         struct drm_connector *connector = &intel_connector->base;
8518         struct intel_dp *intel_dp = &dig_port->dp;
8519         struct intel_encoder *intel_encoder = &dig_port->base;
8520         struct drm_device *dev = intel_encoder->base.dev;
8521         struct drm_i915_private *dev_priv = to_i915(dev);
8522         enum port port = intel_encoder->port;
8523         enum phy phy = intel_port_to_phy(dev_priv, port);
8524         int type;
8525
8526         /* Initialize the work for modeset in case of link train failure */
8527         INIT_WORK(&intel_connector->modeset_retry_work,
8528                   intel_dp_modeset_retry_work_fn);
8529
8530         if (drm_WARN(dev, dig_port->max_lanes < 1,
8531                      "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n",
8532                      dig_port->max_lanes, intel_encoder->base.base.id,
8533                      intel_encoder->base.name))
8534                 return false;
8535
8536         intel_dp_set_source_rates(intel_dp);
8537
8538         intel_dp->reset_link_params = true;
8539         intel_dp->pps_pipe = INVALID_PIPE;
8540         intel_dp->active_pipe = INVALID_PIPE;
8541
8542         /* Preserve the current hw state. */
8543         intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
8544         intel_dp->attached_connector = intel_connector;
8545
8546         if (intel_dp_is_port_edp(dev_priv, port)) {
8547                 /*
8548                  * Currently we don't support eDP on TypeC ports, although in
8549                  * theory it could work on TypeC legacy ports.
8550                  */
8551                 drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy));
8552                 type = DRM_MODE_CONNECTOR_eDP;
8553         } else {
8554                 type = DRM_MODE_CONNECTOR_DisplayPort;
8555         }
8556
8557         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
8558                 intel_dp->active_pipe = vlv_active_pipe(intel_dp);
8559
8560         /*
8561          * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
8562          * for DP the encoder type can be set by the caller to
8563          * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
8564          */
8565         if (type == DRM_MODE_CONNECTOR_eDP)
8566                 intel_encoder->type = INTEL_OUTPUT_EDP;
8567
8568         /* eDP only on port B and/or C on vlv/chv */
8569         if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) ||
8570                               IS_CHERRYVIEW(dev_priv)) &&
8571                         intel_dp_is_edp(intel_dp) &&
8572                         port != PORT_B && port != PORT_C))
8573                 return false;
8574
8575         drm_dbg_kms(&dev_priv->drm,
8576                     "Adding %s connector on [ENCODER:%d:%s]\n",
8577                     type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
8578                     intel_encoder->base.base.id, intel_encoder->base.name);
8579
8580         drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
8581         drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
8582
8583         if (!HAS_GMCH(dev_priv))
8584                 connector->interlace_allowed = true;
8585         connector->doublescan_allowed = 0;
8586
8587         intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
8588
8589         intel_dp_aux_init(intel_dp);
8590
8591         intel_connector_attach_encoder(intel_connector, intel_encoder);
8592
8593         if (HAS_DDI(dev_priv))
8594                 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
8595         else
8596                 intel_connector->get_hw_state = intel_connector_get_hw_state;
8597
8598         /* init MST on ports that can support it */
8599         intel_dp_mst_encoder_init(dig_port,
8600                                   intel_connector->base.base.id);
8601
8602         if (!intel_edp_init_connector(intel_dp, intel_connector)) {
8603                 intel_dp_aux_fini(intel_dp);
8604                 intel_dp_mst_encoder_cleanup(dig_port);
8605                 goto fail;
8606         }
8607
8608         intel_dp_add_properties(intel_dp, connector);
8609
8610         if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) {
8611                 int ret = intel_dp_init_hdcp(dig_port, intel_connector);
8612                 if (ret)
8613                         drm_dbg_kms(&dev_priv->drm,
8614                                     "HDCP init failed, skipping.\n");
8615         }
8616
8617         /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
8618          * 0xd.  Failure to do so will result in spurious interrupts being
8619          * generated on the port when a cable is not attached.
8620          */
8621         if (IS_G45(dev_priv)) {
8622                 u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA);
8623                 intel_de_write(dev_priv, PEG_BAND_GAP_DATA,
8624                                (temp & ~0xf) | 0xd);
8625         }
8626
8627         intel_dp->frl.is_trained = false;
8628         intel_dp->frl.trained_rate_gbps = 0;
8629
8630         return true;
8631
8632 fail:
8633         drm_connector_cleanup(connector);
8634
8635         return false;
8636 }
8637
8638 bool intel_dp_init(struct drm_i915_private *dev_priv,
8639                    i915_reg_t output_reg,
8640                    enum port port)
8641 {
8642         struct intel_digital_port *dig_port;
8643         struct intel_encoder *intel_encoder;
8644         struct drm_encoder *encoder;
8645         struct intel_connector *intel_connector;
8646
8647         dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
8648         if (!dig_port)
8649                 return false;
8650
8651         intel_connector = intel_connector_alloc();
8652         if (!intel_connector)
8653                 goto err_connector_alloc;
8654
8655         intel_encoder = &dig_port->base;
8656         encoder = &intel_encoder->base;
8657
8658         mutex_init(&dig_port->hdcp_mutex);
8659
8660         if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
8661                              &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
8662                              "DP %c", port_name(port)))
8663                 goto err_encoder_init;
8664
8665         intel_encoder->hotplug = intel_dp_hotplug;
8666         intel_encoder->compute_config = intel_dp_compute_config;
8667         intel_encoder->get_hw_state = intel_dp_get_hw_state;
8668         intel_encoder->get_config = intel_dp_get_config;
8669         intel_encoder->sync_state = intel_dp_sync_state;
8670         intel_encoder->initial_fastset_check = intel_dp_initial_fastset_check;
8671         intel_encoder->update_pipe = intel_panel_update_backlight;
8672         intel_encoder->suspend = intel_dp_encoder_suspend;
8673         intel_encoder->shutdown = intel_dp_encoder_shutdown;
8674         if (IS_CHERRYVIEW(dev_priv)) {
8675                 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
8676                 intel_encoder->pre_enable = chv_pre_enable_dp;
8677                 intel_encoder->enable = vlv_enable_dp;
8678                 intel_encoder->disable = vlv_disable_dp;
8679                 intel_encoder->post_disable = chv_post_disable_dp;
8680                 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
8681         } else if (IS_VALLEYVIEW(dev_priv)) {
8682                 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
8683                 intel_encoder->pre_enable = vlv_pre_enable_dp;
8684                 intel_encoder->enable = vlv_enable_dp;
8685                 intel_encoder->disable = vlv_disable_dp;
8686                 intel_encoder->post_disable = vlv_post_disable_dp;
8687         } else {
8688                 intel_encoder->pre_enable = g4x_pre_enable_dp;
8689                 intel_encoder->enable = g4x_enable_dp;
8690                 intel_encoder->disable = g4x_disable_dp;
8691                 intel_encoder->post_disable = g4x_post_disable_dp;
8692         }
8693
8694         if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
8695             (HAS_PCH_CPT(dev_priv) && port != PORT_A))
8696                 dig_port->dp.set_link_train = cpt_set_link_train;
8697         else
8698                 dig_port->dp.set_link_train = g4x_set_link_train;
8699
8700         if (IS_CHERRYVIEW(dev_priv))
8701                 dig_port->dp.set_signal_levels = chv_set_signal_levels;
8702         else if (IS_VALLEYVIEW(dev_priv))
8703                 dig_port->dp.set_signal_levels = vlv_set_signal_levels;
8704         else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
8705                 dig_port->dp.set_signal_levels = ivb_cpu_edp_set_signal_levels;
8706         else if (IS_GEN(dev_priv, 6) && port == PORT_A)
8707                 dig_port->dp.set_signal_levels = snb_cpu_edp_set_signal_levels;
8708         else
8709                 dig_port->dp.set_signal_levels = g4x_set_signal_levels;
8710
8711         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv) ||
8712             (HAS_PCH_SPLIT(dev_priv) && port != PORT_A)) {
8713                 dig_port->dp.preemph_max = intel_dp_preemph_max_3;
8714                 dig_port->dp.voltage_max = intel_dp_voltage_max_3;
8715         } else {
8716                 dig_port->dp.preemph_max = intel_dp_preemph_max_2;
8717                 dig_port->dp.voltage_max = intel_dp_voltage_max_2;
8718         }
8719
8720         dig_port->dp.output_reg = output_reg;
8721         dig_port->max_lanes = 4;
8722
8723         intel_encoder->type = INTEL_OUTPUT_DP;
8724         intel_encoder->power_domain = intel_port_to_power_domain(port);
8725         if (IS_CHERRYVIEW(dev_priv)) {
8726                 if (port == PORT_D)
8727                         intel_encoder->pipe_mask = BIT(PIPE_C);
8728                 else
8729                         intel_encoder->pipe_mask = BIT(PIPE_A) | BIT(PIPE_B);
8730         } else {
8731                 intel_encoder->pipe_mask = ~0;
8732         }
8733         intel_encoder->cloneable = 0;
8734         intel_encoder->port = port;
8735         intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
8736
8737         dig_port->hpd_pulse = intel_dp_hpd_pulse;
8738
8739         if (HAS_GMCH(dev_priv)) {
8740                 if (IS_GM45(dev_priv))
8741                         dig_port->connected = gm45_digital_port_connected;
8742                 else
8743                         dig_port->connected = g4x_digital_port_connected;
8744         } else {
8745                 if (port == PORT_A)
8746                         dig_port->connected = ilk_digital_port_connected;
8747                 else
8748                         dig_port->connected = ibx_digital_port_connected;
8749         }
8750
8751         if (port != PORT_A)
8752                 intel_infoframe_init(dig_port);
8753
8754         dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
8755         if (!intel_dp_init_connector(dig_port, intel_connector))
8756                 goto err_init_connector;
8757
8758         return true;
8759
8760 err_init_connector:
8761         drm_encoder_cleanup(encoder);
8762 err_encoder_init:
8763         kfree(intel_connector);
8764 err_connector_alloc:
8765         kfree(dig_port);
8766         return false;
8767 }
8768
8769 void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
8770 {
8771         struct intel_encoder *encoder;
8772
8773         for_each_intel_encoder(&dev_priv->drm, encoder) {
8774                 struct intel_dp *intel_dp;
8775
8776                 if (encoder->type != INTEL_OUTPUT_DDI)
8777                         continue;
8778
8779                 intel_dp = enc_to_intel_dp(encoder);
8780
8781                 if (!intel_dp->can_mst)
8782                         continue;
8783
8784                 if (intel_dp->is_mst)
8785                         drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr);
8786         }
8787 }
8788
8789 void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
8790 {
8791         struct intel_encoder *encoder;
8792
8793         for_each_intel_encoder(&dev_priv->drm, encoder) {
8794                 struct intel_dp *intel_dp;
8795                 int ret;
8796
8797                 if (encoder->type != INTEL_OUTPUT_DDI)
8798                         continue;
8799
8800                 intel_dp = enc_to_intel_dp(encoder);
8801
8802                 if (!intel_dp->can_mst)
8803                         continue;
8804
8805                 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr,
8806                                                      true);
8807                 if (ret) {
8808                         intel_dp->is_mst = false;
8809                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
8810                                                         false);
8811                 }
8812         }
8813 }