1 // SPDX-License-Identifier: MIT
3 * Copyright © 2020 Intel Corporation
6 #include <drm/drm_debugfs.h>
7 #include <drm/drm_fourcc.h>
9 #include "i915_debugfs.h"
10 #include "intel_display_debugfs.h"
11 #include "intel_display_power.h"
13 #include "intel_display_types.h"
14 #include "intel_dmc.h"
16 #include "intel_drrs.h"
17 #include "intel_fbc.h"
18 #include "intel_hdcp.h"
19 #include "intel_hdmi.h"
21 #include "intel_psr.h"
22 #include "intel_sideband.h"
23 #include "intel_sprite.h"
25 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
27 return to_i915(node->minor->dev);
30 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
32 struct drm_i915_private *dev_priv = node_to_i915(m->private);
34 seq_printf(m, "FB tracking busy bits: 0x%08x\n",
35 dev_priv->fb_tracking.busy_bits);
37 seq_printf(m, "FB tracking flip bits: 0x%08x\n",
38 dev_priv->fb_tracking.flip_bits);
43 static int i915_fbc_status(struct seq_file *m, void *unused)
45 struct drm_i915_private *dev_priv = node_to_i915(m->private);
46 struct intel_fbc *fbc = &dev_priv->fbc;
47 intel_wakeref_t wakeref;
49 if (!HAS_FBC(dev_priv))
52 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
53 mutex_lock(&fbc->lock);
55 if (intel_fbc_is_active(dev_priv))
56 seq_puts(m, "FBC enabled\n");
58 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
60 if (intel_fbc_is_active(dev_priv)) {
63 if (DISPLAY_VER(dev_priv) >= 8)
64 mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
65 else if (DISPLAY_VER(dev_priv) >= 7)
66 mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
67 else if (DISPLAY_VER(dev_priv) >= 5)
68 mask = intel_de_read(dev_priv, ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
69 else if (IS_G4X(dev_priv))
70 mask = intel_de_read(dev_priv, DPFC_STATUS) & DPFC_COMP_SEG_MASK;
72 mask = intel_de_read(dev_priv, FBC_STATUS) &
73 (FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED);
75 seq_printf(m, "Compressing: %s\n", yesno(mask));
78 mutex_unlock(&fbc->lock);
79 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
84 static int i915_fbc_false_color_get(void *data, u64 *val)
86 struct drm_i915_private *dev_priv = data;
88 if (DISPLAY_VER(dev_priv) < 7 || !HAS_FBC(dev_priv))
91 *val = dev_priv->fbc.false_color;
96 static int i915_fbc_false_color_set(void *data, u64 val)
98 struct drm_i915_private *dev_priv = data;
101 if (DISPLAY_VER(dev_priv) < 7 || !HAS_FBC(dev_priv))
104 mutex_lock(&dev_priv->fbc.lock);
106 reg = intel_de_read(dev_priv, ILK_DPFC_CONTROL);
107 dev_priv->fbc.false_color = val;
109 intel_de_write(dev_priv, ILK_DPFC_CONTROL,
110 val ? (reg | FBC_CTL_FALSE_COLOR) : (reg & ~FBC_CTL_FALSE_COLOR));
112 mutex_unlock(&dev_priv->fbc.lock);
116 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
117 i915_fbc_false_color_get, i915_fbc_false_color_set,
120 static int i915_ips_status(struct seq_file *m, void *unused)
122 struct drm_i915_private *dev_priv = node_to_i915(m->private);
123 intel_wakeref_t wakeref;
125 if (!HAS_IPS(dev_priv))
128 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
130 seq_printf(m, "Enabled by kernel parameter: %s\n",
131 yesno(dev_priv->params.enable_ips));
133 if (DISPLAY_VER(dev_priv) >= 8) {
134 seq_puts(m, "Currently: unknown\n");
136 if (intel_de_read(dev_priv, IPS_CTL) & IPS_ENABLE)
137 seq_puts(m, "Currently: enabled\n");
139 seq_puts(m, "Currently: disabled\n");
142 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
147 static int i915_sr_status(struct seq_file *m, void *unused)
149 struct drm_i915_private *dev_priv = node_to_i915(m->private);
150 intel_wakeref_t wakeref;
151 bool sr_enabled = false;
153 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
155 if (DISPLAY_VER(dev_priv) >= 9)
156 /* no global SR status; inspect per-plane WM */;
157 else if (HAS_PCH_SPLIT(dev_priv))
158 sr_enabled = intel_de_read(dev_priv, WM1_LP_ILK) & WM1_LP_SR_EN;
159 else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
160 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
161 sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF) & FW_BLC_SELF_EN;
162 else if (IS_I915GM(dev_priv))
163 sr_enabled = intel_de_read(dev_priv, INSTPM) & INSTPM_SELF_EN;
164 else if (IS_PINEVIEW(dev_priv))
165 sr_enabled = intel_de_read(dev_priv, DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
166 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
167 sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
169 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
171 seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
176 static int i915_opregion(struct seq_file *m, void *unused)
178 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
180 if (opregion->header)
181 seq_write(m, opregion->header, OPREGION_SIZE);
186 static int i915_vbt(struct seq_file *m, void *unused)
188 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
191 seq_write(m, opregion->vbt, opregion->vbt_size);
196 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
198 struct drm_i915_private *dev_priv = node_to_i915(m->private);
199 struct drm_device *dev = &dev_priv->drm;
200 struct intel_framebuffer *fbdev_fb = NULL;
201 struct drm_framebuffer *drm_fb;
203 #ifdef CONFIG_DRM_FBDEV_EMULATION
204 if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
205 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
207 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
208 fbdev_fb->base.width,
209 fbdev_fb->base.height,
210 fbdev_fb->base.format->depth,
211 fbdev_fb->base.format->cpp[0] * 8,
212 fbdev_fb->base.modifier,
213 drm_framebuffer_read_refcount(&fbdev_fb->base));
214 i915_debugfs_describe_obj(m, intel_fb_obj(&fbdev_fb->base));
219 mutex_lock(&dev->mode_config.fb_lock);
220 drm_for_each_fb(drm_fb, dev) {
221 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
225 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
228 fb->base.format->depth,
229 fb->base.format->cpp[0] * 8,
231 drm_framebuffer_read_refcount(&fb->base));
232 i915_debugfs_describe_obj(m, intel_fb_obj(&fb->base));
235 mutex_unlock(&dev->mode_config.fb_lock);
240 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
243 static const char * const sink_status[] = {
245 "transition to active, capture and display",
246 "active, display from RFB",
247 "active, capture and display on sink device timings",
248 "transition to inactive, capture and display, timing re-sync",
251 "sink internal error",
253 struct drm_connector *connector = m->private;
254 struct intel_dp *intel_dp =
255 intel_attached_dp(to_intel_connector(connector));
258 if (!CAN_PSR(intel_dp)) {
259 seq_puts(m, "PSR Unsupported\n");
263 if (connector->status != connector_status_connected)
266 ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
269 const char *str = "unknown";
271 val &= DP_PSR_SINK_STATE_MASK;
272 if (val < ARRAY_SIZE(sink_status))
273 str = sink_status[val];
274 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
281 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
284 psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
286 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
287 const char *status = "unknown";
290 if (intel_dp->psr.psr2_enabled) {
291 static const char * const live_status[] = {
304 val = intel_de_read(dev_priv,
305 EDP_PSR2_STATUS(intel_dp->psr.transcoder));
306 status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
307 EDP_PSR2_STATUS_STATE_SHIFT;
308 if (status_val < ARRAY_SIZE(live_status))
309 status = live_status[status_val];
311 static const char * const live_status[] = {
321 val = intel_de_read(dev_priv,
322 EDP_PSR_STATUS(intel_dp->psr.transcoder));
323 status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
324 EDP_PSR_STATUS_STATE_SHIFT;
325 if (status_val < ARRAY_SIZE(live_status))
326 status = live_status[status_val];
329 seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
332 static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
334 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
335 struct intel_psr *psr = &intel_dp->psr;
336 intel_wakeref_t wakeref;
341 seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
342 if (psr->sink_support)
343 seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
346 if (!psr->sink_support)
349 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
350 mutex_lock(&psr->lock);
353 status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
356 seq_printf(m, "PSR mode: %s\n", status);
359 seq_printf(m, "PSR sink not reliable: %s\n",
360 yesno(psr->sink_not_reliable));
365 if (psr->psr2_enabled) {
366 val = intel_de_read(dev_priv,
367 EDP_PSR2_CTL(intel_dp->psr.transcoder));
368 enabled = val & EDP_PSR2_ENABLE;
370 val = intel_de_read(dev_priv,
371 EDP_PSR_CTL(intel_dp->psr.transcoder));
372 enabled = val & EDP_PSR_ENABLE;
374 seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
375 enableddisabled(enabled), val);
376 psr_source_status(intel_dp, m);
377 seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
378 psr->busy_frontbuffer_bits);
381 * SKL+ Perf counter is reset to 0 everytime DC state is entered
383 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
384 val = intel_de_read(dev_priv,
385 EDP_PSR_PERF_CNT(intel_dp->psr.transcoder));
386 val &= EDP_PSR_PERF_CNT_MASK;
387 seq_printf(m, "Performance counter: %u\n", val);
390 if (psr->debug & I915_PSR_DEBUG_IRQ) {
391 seq_printf(m, "Last attempted entry at: %lld\n",
392 psr->last_entry_attempt);
393 seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
396 if (psr->psr2_enabled) {
397 u32 su_frames_val[3];
401 * Reading all 3 registers before hand to minimize crossing a
402 * frame boundary between register reads
404 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
405 val = intel_de_read(dev_priv,
406 PSR2_SU_STATUS(intel_dp->psr.transcoder, frame));
407 su_frames_val[frame / 3] = val;
410 seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
412 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
415 su_blocks = su_frames_val[frame / 3] &
416 PSR2_SU_STATUS_MASK(frame);
417 su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
418 seq_printf(m, "%d\t%d\n", frame, su_blocks);
421 seq_printf(m, "PSR2 selective fetch: %s\n",
422 enableddisabled(psr->psr2_sel_fetch_enabled));
426 mutex_unlock(&psr->lock);
427 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
432 static int i915_edp_psr_status(struct seq_file *m, void *data)
434 struct drm_i915_private *dev_priv = node_to_i915(m->private);
435 struct intel_dp *intel_dp = NULL;
436 struct intel_encoder *encoder;
438 if (!HAS_PSR(dev_priv))
441 /* Find the first EDP which supports PSR */
442 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
443 intel_dp = enc_to_intel_dp(encoder);
450 return intel_psr_status(m, intel_dp);
454 i915_edp_psr_debug_set(void *data, u64 val)
456 struct drm_i915_private *dev_priv = data;
457 struct intel_encoder *encoder;
458 intel_wakeref_t wakeref;
461 if (!HAS_PSR(dev_priv))
464 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
465 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
467 drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
469 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
471 // TODO: split to each transcoder's PSR debug state
472 ret = intel_psr_debug_set(intel_dp, val);
474 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
481 i915_edp_psr_debug_get(void *data, u64 *val)
483 struct drm_i915_private *dev_priv = data;
484 struct intel_encoder *encoder;
486 if (!HAS_PSR(dev_priv))
489 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
490 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
492 // TODO: split to each transcoder's PSR debug state
493 *val = READ_ONCE(intel_dp->psr.debug);
500 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
501 i915_edp_psr_debug_get, i915_edp_psr_debug_set,
504 static int i915_power_domain_info(struct seq_file *m, void *unused)
506 struct drm_i915_private *dev_priv = node_to_i915(m->private);
507 struct i915_power_domains *power_domains = &dev_priv->power_domains;
510 mutex_lock(&power_domains->lock);
512 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
513 for (i = 0; i < power_domains->power_well_count; i++) {
514 struct i915_power_well *power_well;
515 enum intel_display_power_domain power_domain;
517 power_well = &power_domains->power_wells[i];
518 seq_printf(m, "%-25s %d\n", power_well->desc->name,
521 for_each_power_domain(power_domain, power_well->desc->domains)
522 seq_printf(m, " %-23s %d\n",
523 intel_display_power_domain_str(power_domain),
524 power_domains->domain_use_count[power_domain]);
527 mutex_unlock(&power_domains->lock);
532 static int i915_dmc_info(struct seq_file *m, void *unused)
534 struct drm_i915_private *dev_priv = node_to_i915(m->private);
535 intel_wakeref_t wakeref;
536 struct intel_dmc *dmc;
537 i915_reg_t dc5_reg, dc6_reg = {};
539 if (!HAS_DMC(dev_priv))
542 dmc = &dev_priv->dmc;
544 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
546 seq_printf(m, "fw loaded: %s\n", yesno(intel_dmc_has_payload(dev_priv)));
547 seq_printf(m, "path: %s\n", dmc->fw_path);
548 seq_printf(m, "Pipe A fw support: %s\n",
549 yesno(GRAPHICS_VER(dev_priv) >= 12));
550 seq_printf(m, "Pipe A fw loaded: %s\n", yesno(dmc->dmc_info[DMC_FW_PIPEA].payload));
551 seq_printf(m, "Pipe B fw support: %s\n", yesno(IS_ALDERLAKE_P(dev_priv)));
552 seq_printf(m, "Pipe B fw loaded: %s\n", yesno(dmc->dmc_info[DMC_FW_PIPEB].payload));
554 if (!intel_dmc_has_payload(dev_priv))
557 seq_printf(m, "version: %d.%d\n", DMC_VERSION_MAJOR(dmc->version),
558 DMC_VERSION_MINOR(dmc->version));
560 if (DISPLAY_VER(dev_priv) >= 12) {
561 if (IS_DGFX(dev_priv)) {
562 dc5_reg = DG1_DMC_DEBUG_DC5_COUNT;
564 dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
565 dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
569 * NOTE: DMC_DEBUG3 is a general purpose reg.
570 * According to B.Specs:49196 DMC f/w reuses DC5/6 counter
571 * reg for DC3CO debugging and validation,
572 * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter.
574 seq_printf(m, "DC3CO count: %d\n",
575 intel_de_read(dev_priv, DMC_DEBUG3));
577 dc5_reg = IS_BROXTON(dev_priv) ? BXT_DMC_DC3_DC5_COUNT :
578 SKL_DMC_DC3_DC5_COUNT;
579 if (!IS_GEMINILAKE(dev_priv) && !IS_BROXTON(dev_priv))
580 dc6_reg = SKL_DMC_DC5_DC6_COUNT;
583 seq_printf(m, "DC3 -> DC5 count: %d\n",
584 intel_de_read(dev_priv, dc5_reg));
586 seq_printf(m, "DC5 -> DC6 count: %d\n",
587 intel_de_read(dev_priv, dc6_reg));
590 seq_printf(m, "program base: 0x%08x\n",
591 intel_de_read(dev_priv, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)));
592 seq_printf(m, "ssp base: 0x%08x\n",
593 intel_de_read(dev_priv, DMC_SSP_BASE));
594 seq_printf(m, "htp: 0x%08x\n", intel_de_read(dev_priv, DMC_HTP_SKL));
596 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
601 static void intel_seq_print_mode(struct seq_file *m, int tabs,
602 const struct drm_display_mode *mode)
606 for (i = 0; i < tabs; i++)
609 seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
612 static void intel_encoder_info(struct seq_file *m,
613 struct intel_crtc *crtc,
614 struct intel_encoder *encoder)
616 struct drm_i915_private *dev_priv = node_to_i915(m->private);
617 struct drm_connector_list_iter conn_iter;
618 struct drm_connector *connector;
620 seq_printf(m, "\t[ENCODER:%d:%s]: connectors:\n",
621 encoder->base.base.id, encoder->base.name);
623 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
624 drm_for_each_connector_iter(connector, &conn_iter) {
625 const struct drm_connector_state *conn_state =
628 if (conn_state->best_encoder != &encoder->base)
631 seq_printf(m, "\t\t[CONNECTOR:%d:%s]\n",
632 connector->base.id, connector->name);
634 drm_connector_list_iter_end(&conn_iter);
637 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
639 const struct drm_display_mode *mode = panel->fixed_mode;
641 seq_printf(m, "\tfixed mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
644 static void intel_hdcp_info(struct seq_file *m,
645 struct intel_connector *intel_connector)
647 bool hdcp_cap, hdcp2_cap;
649 if (!intel_connector->hdcp.shim) {
650 seq_puts(m, "No Connector Support");
654 hdcp_cap = intel_hdcp_capable(intel_connector);
655 hdcp2_cap = intel_hdcp2_capable(intel_connector);
658 seq_puts(m, "HDCP1.4 ");
660 seq_puts(m, "HDCP2.2 ");
662 if (!hdcp_cap && !hdcp2_cap)
669 static void intel_dp_info(struct seq_file *m,
670 struct intel_connector *intel_connector)
672 struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
673 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
674 const struct drm_property_blob *edid = intel_connector->base.edid_blob_ptr;
676 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
677 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
678 if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
679 intel_panel_info(m, &intel_connector->panel);
681 drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
682 edid ? edid->data : NULL, &intel_dp->aux);
685 static void intel_dp_mst_info(struct seq_file *m,
686 struct intel_connector *intel_connector)
688 bool has_audio = intel_connector->port->has_audio;
690 seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
693 static void intel_hdmi_info(struct seq_file *m,
694 struct intel_connector *intel_connector)
696 struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
697 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder);
699 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
702 static void intel_lvds_info(struct seq_file *m,
703 struct intel_connector *intel_connector)
705 intel_panel_info(m, &intel_connector->panel);
708 static void intel_connector_info(struct seq_file *m,
709 struct drm_connector *connector)
711 struct intel_connector *intel_connector = to_intel_connector(connector);
712 const struct drm_connector_state *conn_state = connector->state;
713 struct intel_encoder *encoder =
714 to_intel_encoder(conn_state->best_encoder);
715 const struct drm_display_mode *mode;
717 seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n",
718 connector->base.id, connector->name,
719 drm_get_connector_status_name(connector->status));
721 if (connector->status == connector_status_disconnected)
724 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
725 connector->display_info.width_mm,
726 connector->display_info.height_mm);
727 seq_printf(m, "\tsubpixel order: %s\n",
728 drm_get_subpixel_order_name(connector->display_info.subpixel_order));
729 seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
734 switch (connector->connector_type) {
735 case DRM_MODE_CONNECTOR_DisplayPort:
736 case DRM_MODE_CONNECTOR_eDP:
737 if (encoder->type == INTEL_OUTPUT_DP_MST)
738 intel_dp_mst_info(m, intel_connector);
740 intel_dp_info(m, intel_connector);
742 case DRM_MODE_CONNECTOR_LVDS:
743 if (encoder->type == INTEL_OUTPUT_LVDS)
744 intel_lvds_info(m, intel_connector);
746 case DRM_MODE_CONNECTOR_HDMIA:
747 if (encoder->type == INTEL_OUTPUT_HDMI ||
748 encoder->type == INTEL_OUTPUT_DDI)
749 intel_hdmi_info(m, intel_connector);
755 seq_puts(m, "\tHDCP version: ");
756 intel_hdcp_info(m, intel_connector);
758 seq_printf(m, "\tmodes:\n");
759 list_for_each_entry(mode, &connector->modes, head)
760 intel_seq_print_mode(m, 2, mode);
763 static const char *plane_type(enum drm_plane_type type)
766 case DRM_PLANE_TYPE_OVERLAY:
768 case DRM_PLANE_TYPE_PRIMARY:
770 case DRM_PLANE_TYPE_CURSOR:
773 * Deliberately omitting default: to generate compiler warnings
774 * when a new drm_plane_type gets added.
781 static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
784 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
785 * will print them all to visualize if the values are misused
787 snprintf(buf, bufsize,
788 "%s%s%s%s%s%s(0x%08x)",
789 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
790 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
791 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
792 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
793 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
794 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
798 static const char *plane_visibility(const struct intel_plane_state *plane_state)
800 if (plane_state->uapi.visible)
803 if (plane_state->planar_slave)
804 return "planar-slave";
809 static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane)
811 const struct intel_plane_state *plane_state =
812 to_intel_plane_state(plane->base.state);
813 const struct drm_framebuffer *fb = plane_state->uapi.fb;
814 struct drm_rect src, dst;
817 src = drm_plane_state_src(&plane_state->uapi);
818 dst = drm_plane_state_dest(&plane_state->uapi);
820 plane_rotation(rot_str, sizeof(rot_str),
821 plane_state->uapi.rotation);
823 seq_puts(m, "\t\tuapi: [FB:");
825 seq_printf(m, "%d] %p4cc,0x%llx,%dx%d", fb->base.id,
826 &fb->format->format, fb->modifier, fb->width,
829 seq_puts(m, "0] n/a,0x0,0x0,");
830 seq_printf(m, ", visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT
831 ", rotation=%s\n", plane_visibility(plane_state),
832 DRM_RECT_FP_ARG(&src), DRM_RECT_ARG(&dst), rot_str);
834 if (plane_state->planar_linked_plane)
835 seq_printf(m, "\t\tplanar: Linked to [PLANE:%d:%s] as a %s\n",
836 plane_state->planar_linked_plane->base.base.id, plane_state->planar_linked_plane->base.name,
837 plane_state->planar_slave ? "slave" : "master");
840 static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane)
842 const struct intel_plane_state *plane_state =
843 to_intel_plane_state(plane->base.state);
844 const struct drm_framebuffer *fb = plane_state->hw.fb;
850 plane_rotation(rot_str, sizeof(rot_str),
851 plane_state->hw.rotation);
853 seq_printf(m, "\t\thw: [FB:%d] %p4cc,0x%llx,%dx%d, visible=%s, src="
854 DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
855 fb->base.id, &fb->format->format,
856 fb->modifier, fb->width, fb->height,
857 yesno(plane_state->uapi.visible),
858 DRM_RECT_FP_ARG(&plane_state->uapi.src),
859 DRM_RECT_ARG(&plane_state->uapi.dst),
863 static void intel_plane_info(struct seq_file *m, struct intel_crtc *crtc)
865 struct drm_i915_private *dev_priv = node_to_i915(m->private);
866 struct intel_plane *plane;
868 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
869 seq_printf(m, "\t[PLANE:%d:%s]: type=%s\n",
870 plane->base.base.id, plane->base.name,
871 plane_type(plane->base.type));
872 intel_plane_uapi_info(m, plane);
873 intel_plane_hw_info(m, plane);
877 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc)
879 const struct intel_crtc_state *crtc_state =
880 to_intel_crtc_state(crtc->base.state);
881 int num_scalers = crtc->num_scalers;
884 /* Not all platformas have a scaler */
886 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
888 crtc_state->scaler_state.scaler_users,
889 crtc_state->scaler_state.scaler_id);
891 for (i = 0; i < num_scalers; i++) {
892 const struct intel_scaler *sc =
893 &crtc_state->scaler_state.scalers[i];
895 seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
896 i, yesno(sc->in_use), sc->mode);
900 seq_puts(m, "\tNo scalers available on this platform\n");
904 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE)
905 static void crtc_updates_info(struct seq_file *m,
906 struct intel_crtc *crtc,
913 for (row = 0; row < ARRAY_SIZE(crtc->debug.vbl.times); row++)
914 count += crtc->debug.vbl.times[row];
915 seq_printf(m, "%sUpdates: %llu\n", hdr, count);
919 for (row = 0; row < ARRAY_SIZE(crtc->debug.vbl.times); row++) {
920 char columns[80] = " |";
934 snprintf(columns, sizeof(columns), "%4ld%s |",
935 DIV_ROUND_CLOSEST(BIT(row + 9), x), units);
938 if (crtc->debug.vbl.times[row]) {
939 x = ilog2(crtc->debug.vbl.times[row]);
940 memset(columns + 8, '*', x);
941 columns[8 + x] = '\0';
944 seq_printf(m, "%s%s\n", hdr, columns);
947 seq_printf(m, "%sMin update: %lluns\n",
948 hdr, crtc->debug.vbl.min);
949 seq_printf(m, "%sMax update: %lluns\n",
950 hdr, crtc->debug.vbl.max);
951 seq_printf(m, "%sAverage update: %lluns\n",
952 hdr, div64_u64(crtc->debug.vbl.sum, count));
953 seq_printf(m, "%sOverruns > %uus: %u\n",
954 hdr, VBLANK_EVASION_TIME_US, crtc->debug.vbl.over);
957 static int crtc_updates_show(struct seq_file *m, void *data)
959 crtc_updates_info(m, m->private, "");
963 static int crtc_updates_open(struct inode *inode, struct file *file)
965 return single_open(file, crtc_updates_show, inode->i_private);
968 static ssize_t crtc_updates_write(struct file *file,
969 const char __user *ubuf,
970 size_t len, loff_t *offp)
972 struct seq_file *m = file->private_data;
973 struct intel_crtc *crtc = m->private;
975 /* May race with an update. Meh. */
976 memset(&crtc->debug.vbl, 0, sizeof(crtc->debug.vbl));
981 static const struct file_operations crtc_updates_fops = {
982 .owner = THIS_MODULE,
983 .open = crtc_updates_open,
986 .release = single_release,
987 .write = crtc_updates_write
990 static void crtc_updates_add(struct drm_crtc *crtc)
992 debugfs_create_file("i915_update_info", 0644, crtc->debugfs_entry,
993 to_intel_crtc(crtc), &crtc_updates_fops);
997 static void crtc_updates_info(struct seq_file *m,
998 struct intel_crtc *crtc,
1003 static void crtc_updates_add(struct drm_crtc *crtc)
1008 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
1010 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1011 const struct intel_crtc_state *crtc_state =
1012 to_intel_crtc_state(crtc->base.state);
1013 struct intel_encoder *encoder;
1015 seq_printf(m, "[CRTC:%d:%s]:\n",
1016 crtc->base.base.id, crtc->base.name);
1018 seq_printf(m, "\tuapi: enable=%s, active=%s, mode=" DRM_MODE_FMT "\n",
1019 yesno(crtc_state->uapi.enable),
1020 yesno(crtc_state->uapi.active),
1021 DRM_MODE_ARG(&crtc_state->uapi.mode));
1023 if (crtc_state->hw.enable) {
1024 seq_printf(m, "\thw: active=%s, adjusted_mode=" DRM_MODE_FMT "\n",
1025 yesno(crtc_state->hw.active),
1026 DRM_MODE_ARG(&crtc_state->hw.adjusted_mode));
1028 seq_printf(m, "\tpipe src size=%dx%d, dither=%s, bpp=%d\n",
1029 crtc_state->pipe_src_w, crtc_state->pipe_src_h,
1030 yesno(crtc_state->dither), crtc_state->pipe_bpp);
1032 intel_scaler_info(m, crtc);
1035 if (crtc_state->bigjoiner)
1036 seq_printf(m, "\tLinked to [CRTC:%d:%s] as a %s\n",
1037 crtc_state->bigjoiner_linked_crtc->base.base.id,
1038 crtc_state->bigjoiner_linked_crtc->base.name,
1039 crtc_state->bigjoiner_slave ? "slave" : "master");
1041 for_each_intel_encoder_mask(&dev_priv->drm, encoder,
1042 crtc_state->uapi.encoder_mask)
1043 intel_encoder_info(m, crtc, encoder);
1045 intel_plane_info(m, crtc);
1047 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n",
1048 yesno(!crtc->cpu_fifo_underrun_disabled),
1049 yesno(!crtc->pch_fifo_underrun_disabled));
1051 crtc_updates_info(m, crtc, "\t");
1054 static int i915_display_info(struct seq_file *m, void *unused)
1056 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1057 struct drm_device *dev = &dev_priv->drm;
1058 struct intel_crtc *crtc;
1059 struct drm_connector *connector;
1060 struct drm_connector_list_iter conn_iter;
1061 intel_wakeref_t wakeref;
1063 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1065 drm_modeset_lock_all(dev);
1067 seq_printf(m, "CRTC info\n");
1068 seq_printf(m, "---------\n");
1069 for_each_intel_crtc(dev, crtc)
1070 intel_crtc_info(m, crtc);
1072 seq_printf(m, "\n");
1073 seq_printf(m, "Connector info\n");
1074 seq_printf(m, "--------------\n");
1075 drm_connector_list_iter_begin(dev, &conn_iter);
1076 drm_for_each_connector_iter(connector, &conn_iter)
1077 intel_connector_info(m, connector);
1078 drm_connector_list_iter_end(&conn_iter);
1080 drm_modeset_unlock_all(dev);
1082 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1087 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
1089 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1090 struct drm_device *dev = &dev_priv->drm;
1093 drm_modeset_lock_all(dev);
1095 seq_printf(m, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n",
1096 dev_priv->dpll.ref_clks.nssc,
1097 dev_priv->dpll.ref_clks.ssc);
1099 for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
1100 struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
1102 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
1104 seq_printf(m, " pipe_mask: 0x%x, active: 0x%x, on: %s\n",
1105 pll->state.pipe_mask, pll->active_mask, yesno(pll->on));
1106 seq_printf(m, " tracked hardware state:\n");
1107 seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
1108 seq_printf(m, " dpll_md: 0x%08x\n",
1109 pll->state.hw_state.dpll_md);
1110 seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0);
1111 seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1);
1112 seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
1113 seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0);
1114 seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1);
1115 seq_printf(m, " mg_refclkin_ctl: 0x%08x\n",
1116 pll->state.hw_state.mg_refclkin_ctl);
1117 seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
1118 pll->state.hw_state.mg_clktop2_coreclkctl1);
1119 seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n",
1120 pll->state.hw_state.mg_clktop2_hsclkctl);
1121 seq_printf(m, " mg_pll_div0: 0x%08x\n",
1122 pll->state.hw_state.mg_pll_div0);
1123 seq_printf(m, " mg_pll_div1: 0x%08x\n",
1124 pll->state.hw_state.mg_pll_div1);
1125 seq_printf(m, " mg_pll_lf: 0x%08x\n",
1126 pll->state.hw_state.mg_pll_lf);
1127 seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
1128 pll->state.hw_state.mg_pll_frac_lock);
1129 seq_printf(m, " mg_pll_ssc: 0x%08x\n",
1130 pll->state.hw_state.mg_pll_ssc);
1131 seq_printf(m, " mg_pll_bias: 0x%08x\n",
1132 pll->state.hw_state.mg_pll_bias);
1133 seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
1134 pll->state.hw_state.mg_pll_tdc_coldst_bias);
1136 drm_modeset_unlock_all(dev);
1141 static int i915_ipc_status_show(struct seq_file *m, void *data)
1143 struct drm_i915_private *dev_priv = m->private;
1145 seq_printf(m, "Isochronous Priority Control: %s\n",
1146 yesno(dev_priv->ipc_enabled));
1150 static int i915_ipc_status_open(struct inode *inode, struct file *file)
1152 struct drm_i915_private *dev_priv = inode->i_private;
1154 if (!HAS_IPC(dev_priv))
1157 return single_open(file, i915_ipc_status_show, dev_priv);
1160 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
1161 size_t len, loff_t *offp)
1163 struct seq_file *m = file->private_data;
1164 struct drm_i915_private *dev_priv = m->private;
1165 intel_wakeref_t wakeref;
1169 ret = kstrtobool_from_user(ubuf, len, &enable);
1173 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1174 if (!dev_priv->ipc_enabled && enable)
1175 drm_info(&dev_priv->drm,
1176 "Enabling IPC: WM will be proper only after next commit\n");
1177 dev_priv->ipc_enabled = enable;
1178 intel_enable_ipc(dev_priv);
1184 static const struct file_operations i915_ipc_status_fops = {
1185 .owner = THIS_MODULE,
1186 .open = i915_ipc_status_open,
1188 .llseek = seq_lseek,
1189 .release = single_release,
1190 .write = i915_ipc_status_write
1193 static int i915_ddb_info(struct seq_file *m, void *unused)
1195 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1196 struct drm_device *dev = &dev_priv->drm;
1197 struct skl_ddb_entry *entry;
1198 struct intel_crtc *crtc;
1200 if (DISPLAY_VER(dev_priv) < 9)
1203 drm_modeset_lock_all(dev);
1205 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
1207 for_each_intel_crtc(&dev_priv->drm, crtc) {
1208 struct intel_crtc_state *crtc_state =
1209 to_intel_crtc_state(crtc->base.state);
1210 enum pipe pipe = crtc->pipe;
1211 enum plane_id plane_id;
1213 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
1215 for_each_plane_id_on_crtc(crtc, plane_id) {
1216 entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
1217 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane_id + 1,
1218 entry->start, entry->end,
1219 skl_ddb_entry_size(entry));
1222 entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
1223 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
1224 entry->end, skl_ddb_entry_size(entry));
1227 drm_modeset_unlock_all(dev);
1232 static void drrs_status_per_crtc(struct seq_file *m,
1233 struct drm_device *dev,
1234 struct intel_crtc *crtc)
1236 struct drm_i915_private *dev_priv = to_i915(dev);
1237 struct i915_drrs *drrs = &dev_priv->drrs;
1239 struct drm_connector *connector;
1240 struct drm_connector_list_iter conn_iter;
1242 drm_connector_list_iter_begin(dev, &conn_iter);
1243 drm_for_each_connector_iter(connector, &conn_iter) {
1244 bool supported = false;
1246 if (connector->state->crtc != &crtc->base)
1249 seq_printf(m, "%s:\n", connector->name);
1251 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
1252 drrs->type == SEAMLESS_DRRS_SUPPORT)
1255 seq_printf(m, "\tDRRS Supported: %s\n", yesno(supported));
1257 drm_connector_list_iter_end(&conn_iter);
1261 if (to_intel_crtc_state(crtc->base.state)->has_drrs) {
1262 struct intel_panel *panel;
1264 mutex_lock(&drrs->mutex);
1265 /* DRRS Supported */
1266 seq_puts(m, "\tDRRS Enabled: Yes\n");
1268 /* disable_drrs() will make drrs->dp NULL */
1270 seq_puts(m, "Idleness DRRS: Disabled\n");
1271 mutex_unlock(&drrs->mutex);
1275 panel = &drrs->dp->attached_connector->panel;
1276 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
1277 drrs->busy_frontbuffer_bits);
1279 seq_puts(m, "\n\t\t");
1280 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
1281 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
1282 vrefresh = drm_mode_vrefresh(panel->fixed_mode);
1283 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
1284 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
1285 vrefresh = drm_mode_vrefresh(panel->downclock_mode);
1287 seq_printf(m, "DRRS_State: Unknown(%d)\n",
1288 drrs->refresh_rate_type);
1289 mutex_unlock(&drrs->mutex);
1292 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
1294 seq_puts(m, "\n\t\t");
1295 mutex_unlock(&drrs->mutex);
1297 /* DRRS not supported. Print the VBT parameter*/
1298 seq_puts(m, "\tDRRS Enabled : No");
1303 static int i915_drrs_status(struct seq_file *m, void *unused)
1305 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1306 struct drm_device *dev = &dev_priv->drm;
1307 struct intel_crtc *crtc;
1308 int active_crtc_cnt = 0;
1310 drm_modeset_lock_all(dev);
1311 for_each_intel_crtc(dev, crtc) {
1312 if (crtc->base.state->active) {
1314 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
1316 drrs_status_per_crtc(m, dev, crtc);
1319 drm_modeset_unlock_all(dev);
1321 if (!active_crtc_cnt)
1322 seq_puts(m, "No active crtc found\n");
1328 intel_lpsp_power_well_enabled(struct drm_i915_private *i915,
1329 enum i915_power_well_id power_well_id)
1331 intel_wakeref_t wakeref;
1334 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1335 is_enabled = intel_display_power_well_is_enabled(i915,
1337 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1342 static int i915_lpsp_status(struct seq_file *m, void *unused)
1344 struct drm_i915_private *i915 = node_to_i915(m->private);
1345 bool lpsp_enabled = false;
1347 if (DISPLAY_VER(i915) >= 13 || IS_DISPLAY_VER(i915, 9, 10)) {
1348 lpsp_enabled = !intel_lpsp_power_well_enabled(i915, SKL_DISP_PW_2);
1349 } else if (IS_DISPLAY_VER(i915, 11, 12)) {
1350 lpsp_enabled = !intel_lpsp_power_well_enabled(i915, ICL_DISP_PW_3);
1351 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
1352 lpsp_enabled = !intel_lpsp_power_well_enabled(i915, HSW_DISP_PW_GLOBAL);
1354 seq_puts(m, "LPSP: not supported\n");
1358 seq_printf(m, "LPSP: %s\n", enableddisabled(lpsp_enabled));
1363 static int i915_dp_mst_info(struct seq_file *m, void *unused)
1365 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1366 struct drm_device *dev = &dev_priv->drm;
1367 struct intel_encoder *intel_encoder;
1368 struct intel_digital_port *dig_port;
1369 struct drm_connector *connector;
1370 struct drm_connector_list_iter conn_iter;
1372 drm_connector_list_iter_begin(dev, &conn_iter);
1373 drm_for_each_connector_iter(connector, &conn_iter) {
1374 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
1377 intel_encoder = intel_attached_encoder(to_intel_connector(connector));
1378 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
1381 dig_port = enc_to_dig_port(intel_encoder);
1382 if (!dig_port->dp.can_mst)
1385 seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
1386 dig_port->base.base.base.id,
1387 dig_port->base.base.name);
1388 drm_dp_mst_dump_topology(m, &dig_port->dp.mst_mgr);
1390 drm_connector_list_iter_end(&conn_iter);
1395 static ssize_t i915_displayport_test_active_write(struct file *file,
1396 const char __user *ubuf,
1397 size_t len, loff_t *offp)
1401 struct drm_device *dev;
1402 struct drm_connector *connector;
1403 struct drm_connector_list_iter conn_iter;
1404 struct intel_dp *intel_dp;
1407 dev = ((struct seq_file *)file->private_data)->private;
1412 input_buffer = memdup_user_nul(ubuf, len);
1413 if (IS_ERR(input_buffer))
1414 return PTR_ERR(input_buffer);
1416 drm_dbg(&to_i915(dev)->drm,
1417 "Copied %d bytes from user\n", (unsigned int)len);
1419 drm_connector_list_iter_begin(dev, &conn_iter);
1420 drm_for_each_connector_iter(connector, &conn_iter) {
1421 struct intel_encoder *encoder;
1423 if (connector->connector_type !=
1424 DRM_MODE_CONNECTOR_DisplayPort)
1427 encoder = to_intel_encoder(connector->encoder);
1428 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1431 if (encoder && connector->status == connector_status_connected) {
1432 intel_dp = enc_to_intel_dp(encoder);
1433 status = kstrtoint(input_buffer, 10, &val);
1436 drm_dbg(&to_i915(dev)->drm,
1437 "Got %d for test active\n", val);
1438 /* To prevent erroneous activation of the compliance
1439 * testing code, only accept an actual value of 1 here
1442 intel_dp->compliance.test_active = true;
1444 intel_dp->compliance.test_active = false;
1447 drm_connector_list_iter_end(&conn_iter);
1448 kfree(input_buffer);
1456 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
1458 struct drm_i915_private *dev_priv = m->private;
1459 struct drm_device *dev = &dev_priv->drm;
1460 struct drm_connector *connector;
1461 struct drm_connector_list_iter conn_iter;
1462 struct intel_dp *intel_dp;
1464 drm_connector_list_iter_begin(dev, &conn_iter);
1465 drm_for_each_connector_iter(connector, &conn_iter) {
1466 struct intel_encoder *encoder;
1468 if (connector->connector_type !=
1469 DRM_MODE_CONNECTOR_DisplayPort)
1472 encoder = to_intel_encoder(connector->encoder);
1473 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1476 if (encoder && connector->status == connector_status_connected) {
1477 intel_dp = enc_to_intel_dp(encoder);
1478 if (intel_dp->compliance.test_active)
1485 drm_connector_list_iter_end(&conn_iter);
1490 static int i915_displayport_test_active_open(struct inode *inode,
1493 return single_open(file, i915_displayport_test_active_show,
1497 static const struct file_operations i915_displayport_test_active_fops = {
1498 .owner = THIS_MODULE,
1499 .open = i915_displayport_test_active_open,
1501 .llseek = seq_lseek,
1502 .release = single_release,
1503 .write = i915_displayport_test_active_write
1506 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
1508 struct drm_i915_private *dev_priv = m->private;
1509 struct drm_device *dev = &dev_priv->drm;
1510 struct drm_connector *connector;
1511 struct drm_connector_list_iter conn_iter;
1512 struct intel_dp *intel_dp;
1514 drm_connector_list_iter_begin(dev, &conn_iter);
1515 drm_for_each_connector_iter(connector, &conn_iter) {
1516 struct intel_encoder *encoder;
1518 if (connector->connector_type !=
1519 DRM_MODE_CONNECTOR_DisplayPort)
1522 encoder = to_intel_encoder(connector->encoder);
1523 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1526 if (encoder && connector->status == connector_status_connected) {
1527 intel_dp = enc_to_intel_dp(encoder);
1528 if (intel_dp->compliance.test_type ==
1529 DP_TEST_LINK_EDID_READ)
1530 seq_printf(m, "%lx",
1531 intel_dp->compliance.test_data.edid);
1532 else if (intel_dp->compliance.test_type ==
1533 DP_TEST_LINK_VIDEO_PATTERN) {
1534 seq_printf(m, "hdisplay: %d\n",
1535 intel_dp->compliance.test_data.hdisplay);
1536 seq_printf(m, "vdisplay: %d\n",
1537 intel_dp->compliance.test_data.vdisplay);
1538 seq_printf(m, "bpc: %u\n",
1539 intel_dp->compliance.test_data.bpc);
1540 } else if (intel_dp->compliance.test_type ==
1541 DP_TEST_LINK_PHY_TEST_PATTERN) {
1542 seq_printf(m, "pattern: %d\n",
1543 intel_dp->compliance.test_data.phytest.phy_pattern);
1544 seq_printf(m, "Number of lanes: %d\n",
1545 intel_dp->compliance.test_data.phytest.num_lanes);
1546 seq_printf(m, "Link Rate: %d\n",
1547 intel_dp->compliance.test_data.phytest.link_rate);
1548 seq_printf(m, "level: %02x\n",
1549 intel_dp->train_set[0]);
1554 drm_connector_list_iter_end(&conn_iter);
1558 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
1560 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
1562 struct drm_i915_private *dev_priv = m->private;
1563 struct drm_device *dev = &dev_priv->drm;
1564 struct drm_connector *connector;
1565 struct drm_connector_list_iter conn_iter;
1566 struct intel_dp *intel_dp;
1568 drm_connector_list_iter_begin(dev, &conn_iter);
1569 drm_for_each_connector_iter(connector, &conn_iter) {
1570 struct intel_encoder *encoder;
1572 if (connector->connector_type !=
1573 DRM_MODE_CONNECTOR_DisplayPort)
1576 encoder = to_intel_encoder(connector->encoder);
1577 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1580 if (encoder && connector->status == connector_status_connected) {
1581 intel_dp = enc_to_intel_dp(encoder);
1582 seq_printf(m, "%02lx\n", intel_dp->compliance.test_type);
1586 drm_connector_list_iter_end(&conn_iter);
1590 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
1592 static void wm_latency_show(struct seq_file *m, const u16 wm[8])
1594 struct drm_i915_private *dev_priv = m->private;
1595 struct drm_device *dev = &dev_priv->drm;
1599 if (IS_CHERRYVIEW(dev_priv))
1601 else if (IS_VALLEYVIEW(dev_priv))
1603 else if (IS_G4X(dev_priv))
1606 num_levels = ilk_wm_max_level(dev_priv) + 1;
1608 drm_modeset_lock_all(dev);
1610 for (level = 0; level < num_levels; level++) {
1611 unsigned int latency = wm[level];
1614 * - WM1+ latency values in 0.5us units
1615 * - latencies are in us on gen9/vlv/chv
1617 if (DISPLAY_VER(dev_priv) >= 9 ||
1618 IS_VALLEYVIEW(dev_priv) ||
1619 IS_CHERRYVIEW(dev_priv) ||
1625 seq_printf(m, "WM%d %u (%u.%u usec)\n",
1626 level, wm[level], latency / 10, latency % 10);
1629 drm_modeset_unlock_all(dev);
1632 static int pri_wm_latency_show(struct seq_file *m, void *data)
1634 struct drm_i915_private *dev_priv = m->private;
1635 const u16 *latencies;
1637 if (DISPLAY_VER(dev_priv) >= 9)
1638 latencies = dev_priv->wm.skl_latency;
1640 latencies = dev_priv->wm.pri_latency;
1642 wm_latency_show(m, latencies);
1647 static int spr_wm_latency_show(struct seq_file *m, void *data)
1649 struct drm_i915_private *dev_priv = m->private;
1650 const u16 *latencies;
1652 if (DISPLAY_VER(dev_priv) >= 9)
1653 latencies = dev_priv->wm.skl_latency;
1655 latencies = dev_priv->wm.spr_latency;
1657 wm_latency_show(m, latencies);
1662 static int cur_wm_latency_show(struct seq_file *m, void *data)
1664 struct drm_i915_private *dev_priv = m->private;
1665 const u16 *latencies;
1667 if (DISPLAY_VER(dev_priv) >= 9)
1668 latencies = dev_priv->wm.skl_latency;
1670 latencies = dev_priv->wm.cur_latency;
1672 wm_latency_show(m, latencies);
1677 static int pri_wm_latency_open(struct inode *inode, struct file *file)
1679 struct drm_i915_private *dev_priv = inode->i_private;
1681 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
1684 return single_open(file, pri_wm_latency_show, dev_priv);
1687 static int spr_wm_latency_open(struct inode *inode, struct file *file)
1689 struct drm_i915_private *dev_priv = inode->i_private;
1691 if (HAS_GMCH(dev_priv))
1694 return single_open(file, spr_wm_latency_show, dev_priv);
1697 static int cur_wm_latency_open(struct inode *inode, struct file *file)
1699 struct drm_i915_private *dev_priv = inode->i_private;
1701 if (HAS_GMCH(dev_priv))
1704 return single_open(file, cur_wm_latency_show, dev_priv);
1707 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
1708 size_t len, loff_t *offp, u16 wm[8])
1710 struct seq_file *m = file->private_data;
1711 struct drm_i915_private *dev_priv = m->private;
1712 struct drm_device *dev = &dev_priv->drm;
1719 if (IS_CHERRYVIEW(dev_priv))
1721 else if (IS_VALLEYVIEW(dev_priv))
1723 else if (IS_G4X(dev_priv))
1726 num_levels = ilk_wm_max_level(dev_priv) + 1;
1728 if (len >= sizeof(tmp))
1731 if (copy_from_user(tmp, ubuf, len))
1736 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
1737 &new[0], &new[1], &new[2], &new[3],
1738 &new[4], &new[5], &new[6], &new[7]);
1739 if (ret != num_levels)
1742 drm_modeset_lock_all(dev);
1744 for (level = 0; level < num_levels; level++)
1745 wm[level] = new[level];
1747 drm_modeset_unlock_all(dev);
1753 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
1754 size_t len, loff_t *offp)
1756 struct seq_file *m = file->private_data;
1757 struct drm_i915_private *dev_priv = m->private;
1760 if (DISPLAY_VER(dev_priv) >= 9)
1761 latencies = dev_priv->wm.skl_latency;
1763 latencies = dev_priv->wm.pri_latency;
1765 return wm_latency_write(file, ubuf, len, offp, latencies);
1768 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
1769 size_t len, loff_t *offp)
1771 struct seq_file *m = file->private_data;
1772 struct drm_i915_private *dev_priv = m->private;
1775 if (DISPLAY_VER(dev_priv) >= 9)
1776 latencies = dev_priv->wm.skl_latency;
1778 latencies = dev_priv->wm.spr_latency;
1780 return wm_latency_write(file, ubuf, len, offp, latencies);
1783 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
1784 size_t len, loff_t *offp)
1786 struct seq_file *m = file->private_data;
1787 struct drm_i915_private *dev_priv = m->private;
1790 if (DISPLAY_VER(dev_priv) >= 9)
1791 latencies = dev_priv->wm.skl_latency;
1793 latencies = dev_priv->wm.cur_latency;
1795 return wm_latency_write(file, ubuf, len, offp, latencies);
1798 static const struct file_operations i915_pri_wm_latency_fops = {
1799 .owner = THIS_MODULE,
1800 .open = pri_wm_latency_open,
1802 .llseek = seq_lseek,
1803 .release = single_release,
1804 .write = pri_wm_latency_write
1807 static const struct file_operations i915_spr_wm_latency_fops = {
1808 .owner = THIS_MODULE,
1809 .open = spr_wm_latency_open,
1811 .llseek = seq_lseek,
1812 .release = single_release,
1813 .write = spr_wm_latency_write
1816 static const struct file_operations i915_cur_wm_latency_fops = {
1817 .owner = THIS_MODULE,
1818 .open = cur_wm_latency_open,
1820 .llseek = seq_lseek,
1821 .release = single_release,
1822 .write = cur_wm_latency_write
1825 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
1827 struct drm_i915_private *dev_priv = m->private;
1828 struct i915_hotplug *hotplug = &dev_priv->hotplug;
1830 /* Synchronize with everything first in case there's been an HPD
1831 * storm, but we haven't finished handling it in the kernel yet
1833 intel_synchronize_irq(dev_priv);
1834 flush_work(&dev_priv->hotplug.dig_port_work);
1835 flush_delayed_work(&dev_priv->hotplug.hotplug_work);
1837 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
1838 seq_printf(m, "Detected: %s\n",
1839 yesno(delayed_work_pending(&hotplug->reenable_work)));
1844 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
1845 const char __user *ubuf, size_t len,
1848 struct seq_file *m = file->private_data;
1849 struct drm_i915_private *dev_priv = m->private;
1850 struct i915_hotplug *hotplug = &dev_priv->hotplug;
1851 unsigned int new_threshold;
1856 if (len >= sizeof(tmp))
1859 if (copy_from_user(tmp, ubuf, len))
1864 /* Strip newline, if any */
1865 newline = strchr(tmp, '\n');
1869 if (strcmp(tmp, "reset") == 0)
1870 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
1871 else if (kstrtouint(tmp, 10, &new_threshold) != 0)
1874 if (new_threshold > 0)
1875 drm_dbg_kms(&dev_priv->drm,
1876 "Setting HPD storm detection threshold to %d\n",
1879 drm_dbg_kms(&dev_priv->drm, "Disabling HPD storm detection\n");
1881 spin_lock_irq(&dev_priv->irq_lock);
1882 hotplug->hpd_storm_threshold = new_threshold;
1883 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
1885 hotplug->stats[i].count = 0;
1886 spin_unlock_irq(&dev_priv->irq_lock);
1888 /* Re-enable hpd immediately if we were in an irq storm */
1889 flush_delayed_work(&dev_priv->hotplug.reenable_work);
1894 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
1896 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
1899 static const struct file_operations i915_hpd_storm_ctl_fops = {
1900 .owner = THIS_MODULE,
1901 .open = i915_hpd_storm_ctl_open,
1903 .llseek = seq_lseek,
1904 .release = single_release,
1905 .write = i915_hpd_storm_ctl_write
1908 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
1910 struct drm_i915_private *dev_priv = m->private;
1912 seq_printf(m, "Enabled: %s\n",
1913 yesno(dev_priv->hotplug.hpd_short_storm_enabled));
1919 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
1921 return single_open(file, i915_hpd_short_storm_ctl_show,
1925 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
1926 const char __user *ubuf,
1927 size_t len, loff_t *offp)
1929 struct seq_file *m = file->private_data;
1930 struct drm_i915_private *dev_priv = m->private;
1931 struct i915_hotplug *hotplug = &dev_priv->hotplug;
1937 if (len >= sizeof(tmp))
1940 if (copy_from_user(tmp, ubuf, len))
1945 /* Strip newline, if any */
1946 newline = strchr(tmp, '\n');
1950 /* Reset to the "default" state for this system */
1951 if (strcmp(tmp, "reset") == 0)
1952 new_state = !HAS_DP_MST(dev_priv);
1953 else if (kstrtobool(tmp, &new_state) != 0)
1956 drm_dbg_kms(&dev_priv->drm, "%sabling HPD short storm detection\n",
1957 new_state ? "En" : "Dis");
1959 spin_lock_irq(&dev_priv->irq_lock);
1960 hotplug->hpd_short_storm_enabled = new_state;
1961 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
1963 hotplug->stats[i].count = 0;
1964 spin_unlock_irq(&dev_priv->irq_lock);
1966 /* Re-enable hpd immediately if we were in an irq storm */
1967 flush_delayed_work(&dev_priv->hotplug.reenable_work);
1972 static const struct file_operations i915_hpd_short_storm_ctl_fops = {
1973 .owner = THIS_MODULE,
1974 .open = i915_hpd_short_storm_ctl_open,
1976 .llseek = seq_lseek,
1977 .release = single_release,
1978 .write = i915_hpd_short_storm_ctl_write,
1981 static int i915_drrs_ctl_set(void *data, u64 val)
1983 struct drm_i915_private *dev_priv = data;
1984 struct drm_device *dev = &dev_priv->drm;
1985 struct intel_crtc *crtc;
1987 if (DISPLAY_VER(dev_priv) < 7)
1990 for_each_intel_crtc(dev, crtc) {
1991 struct drm_connector_list_iter conn_iter;
1992 struct intel_crtc_state *crtc_state;
1993 struct drm_connector *connector;
1994 struct drm_crtc_commit *commit;
1997 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
2001 crtc_state = to_intel_crtc_state(crtc->base.state);
2003 if (!crtc_state->hw.active ||
2004 !crtc_state->has_drrs)
2007 commit = crtc_state->uapi.commit;
2009 ret = wait_for_completion_interruptible(&commit->hw_done);
2014 drm_connector_list_iter_begin(dev, &conn_iter);
2015 drm_for_each_connector_iter(connector, &conn_iter) {
2016 struct intel_encoder *encoder;
2017 struct intel_dp *intel_dp;
2019 if (!(crtc_state->uapi.connector_mask &
2020 drm_connector_mask(connector)))
2023 encoder = intel_attached_encoder(to_intel_connector(connector));
2024 if (encoder->type != INTEL_OUTPUT_EDP)
2027 drm_dbg(&dev_priv->drm,
2028 "Manually %sabling DRRS. %llu\n",
2029 val ? "en" : "dis", val);
2031 intel_dp = enc_to_intel_dp(encoder);
2033 intel_drrs_enable(intel_dp, crtc_state);
2035 intel_drrs_disable(intel_dp, crtc_state);
2037 drm_connector_list_iter_end(&conn_iter);
2040 drm_modeset_unlock(&crtc->base.mutex);
2048 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
2051 i915_fifo_underrun_reset_write(struct file *filp,
2052 const char __user *ubuf,
2053 size_t cnt, loff_t *ppos)
2055 struct drm_i915_private *dev_priv = filp->private_data;
2056 struct intel_crtc *crtc;
2057 struct drm_device *dev = &dev_priv->drm;
2061 ret = kstrtobool_from_user(ubuf, cnt, &reset);
2068 for_each_intel_crtc(dev, crtc) {
2069 struct drm_crtc_commit *commit;
2070 struct intel_crtc_state *crtc_state;
2072 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
2076 crtc_state = to_intel_crtc_state(crtc->base.state);
2077 commit = crtc_state->uapi.commit;
2079 ret = wait_for_completion_interruptible(&commit->hw_done);
2081 ret = wait_for_completion_interruptible(&commit->flip_done);
2084 if (!ret && crtc_state->hw.active) {
2085 drm_dbg_kms(&dev_priv->drm,
2086 "Re-arming FIFO underruns on pipe %c\n",
2087 pipe_name(crtc->pipe));
2089 intel_crtc_arm_fifo_underrun(crtc, crtc_state);
2092 drm_modeset_unlock(&crtc->base.mutex);
2098 ret = intel_fbc_reset_underrun(dev_priv);
2105 static const struct file_operations i915_fifo_underrun_reset_ops = {
2106 .owner = THIS_MODULE,
2107 .open = simple_open,
2108 .write = i915_fifo_underrun_reset_write,
2109 .llseek = default_llseek,
2112 static const struct drm_info_list intel_display_debugfs_list[] = {
2113 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
2114 {"i915_fbc_status", i915_fbc_status, 0},
2115 {"i915_ips_status", i915_ips_status, 0},
2116 {"i915_sr_status", i915_sr_status, 0},
2117 {"i915_opregion", i915_opregion, 0},
2118 {"i915_vbt", i915_vbt, 0},
2119 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
2120 {"i915_edp_psr_status", i915_edp_psr_status, 0},
2121 {"i915_power_domain_info", i915_power_domain_info, 0},
2122 {"i915_dmc_info", i915_dmc_info, 0},
2123 {"i915_display_info", i915_display_info, 0},
2124 {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
2125 {"i915_dp_mst_info", i915_dp_mst_info, 0},
2126 {"i915_ddb_info", i915_ddb_info, 0},
2127 {"i915_drrs_status", i915_drrs_status, 0},
2128 {"i915_lpsp_status", i915_lpsp_status, 0},
2131 static const struct {
2133 const struct file_operations *fops;
2134 } intel_display_debugfs_files[] = {
2135 {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
2136 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
2137 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
2138 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
2139 {"i915_fbc_false_color", &i915_fbc_false_color_fops},
2140 {"i915_dp_test_data", &i915_displayport_test_data_fops},
2141 {"i915_dp_test_type", &i915_displayport_test_type_fops},
2142 {"i915_dp_test_active", &i915_displayport_test_active_fops},
2143 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
2144 {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
2145 {"i915_ipc_status", &i915_ipc_status_fops},
2146 {"i915_drrs_ctl", &i915_drrs_ctl_fops},
2147 {"i915_edp_psr_debug", &i915_edp_psr_debug_fops},
2150 void intel_display_debugfs_register(struct drm_i915_private *i915)
2152 struct drm_minor *minor = i915->drm.primary;
2155 for (i = 0; i < ARRAY_SIZE(intel_display_debugfs_files); i++) {
2156 debugfs_create_file(intel_display_debugfs_files[i].name,
2158 minor->debugfs_root,
2159 to_i915(minor->dev),
2160 intel_display_debugfs_files[i].fops);
2163 drm_debugfs_create_files(intel_display_debugfs_list,
2164 ARRAY_SIZE(intel_display_debugfs_list),
2165 minor->debugfs_root, minor);
2168 static int i915_panel_show(struct seq_file *m, void *data)
2170 struct drm_connector *connector = m->private;
2171 struct intel_dp *intel_dp =
2172 intel_attached_dp(to_intel_connector(connector));
2174 if (connector->status != connector_status_connected)
2177 seq_printf(m, "Panel power up delay: %d\n",
2178 intel_dp->pps.panel_power_up_delay);
2179 seq_printf(m, "Panel power down delay: %d\n",
2180 intel_dp->pps.panel_power_down_delay);
2181 seq_printf(m, "Backlight on delay: %d\n",
2182 intel_dp->pps.backlight_on_delay);
2183 seq_printf(m, "Backlight off delay: %d\n",
2184 intel_dp->pps.backlight_off_delay);
2188 DEFINE_SHOW_ATTRIBUTE(i915_panel);
2190 static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
2192 struct drm_connector *connector = m->private;
2193 struct drm_i915_private *i915 = to_i915(connector->dev);
2194 struct intel_connector *intel_connector = to_intel_connector(connector);
2197 ret = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
2201 if (!connector->encoder || connector->status != connector_status_connected) {
2206 seq_printf(m, "%s:%d HDCP version: ", connector->name,
2207 connector->base.id);
2208 intel_hdcp_info(m, intel_connector);
2211 drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
2215 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
2217 static int i915_psr_status_show(struct seq_file *m, void *data)
2219 struct drm_connector *connector = m->private;
2220 struct intel_dp *intel_dp =
2221 intel_attached_dp(to_intel_connector(connector));
2223 return intel_psr_status(m, intel_dp);
2225 DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
2227 static int i915_lpsp_capability_show(struct seq_file *m, void *data)
2229 struct drm_connector *connector = m->private;
2230 struct drm_i915_private *i915 = to_i915(connector->dev);
2231 struct intel_encoder *encoder;
2232 bool lpsp_capable = false;
2234 encoder = intel_attached_encoder(to_intel_connector(connector));
2238 if (connector->status != connector_status_connected)
2241 if (DISPLAY_VER(i915) >= 13)
2242 lpsp_capable = encoder->port <= PORT_B;
2243 else if (DISPLAY_VER(i915) >= 12)
2245 * Actually TGL can drive LPSP on port till DDI_C
2246 * but there is no physical connected DDI_C on TGL sku's,
2247 * even driver is not initilizing DDI_C port for gen12.
2249 lpsp_capable = encoder->port <= PORT_B;
2250 else if (DISPLAY_VER(i915) == 11)
2251 lpsp_capable = (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2252 connector->connector_type == DRM_MODE_CONNECTOR_eDP);
2253 else if (IS_DISPLAY_VER(i915, 9, 10))
2254 lpsp_capable = (encoder->port == PORT_A &&
2255 (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2256 connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
2257 connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort));
2258 else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
2259 lpsp_capable = connector->connector_type == DRM_MODE_CONNECTOR_eDP;
2261 seq_printf(m, "LPSP: %s\n", lpsp_capable ? "capable" : "incapable");
2265 DEFINE_SHOW_ATTRIBUTE(i915_lpsp_capability);
2267 static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
2269 struct drm_connector *connector = m->private;
2270 struct drm_device *dev = connector->dev;
2271 struct drm_crtc *crtc;
2272 struct intel_dp *intel_dp;
2273 struct drm_modeset_acquire_ctx ctx;
2274 struct intel_crtc_state *crtc_state = NULL;
2276 bool try_again = false;
2278 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2282 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
2285 if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
2291 crtc = connector->state->crtc;
2292 if (connector->status != connector_status_connected || !crtc) {
2296 ret = drm_modeset_lock(&crtc->mutex, &ctx);
2297 if (ret == -EDEADLK) {
2298 ret = drm_modeset_backoff(&ctx);
2307 intel_dp = intel_attached_dp(to_intel_connector(connector));
2308 crtc_state = to_intel_crtc_state(crtc->state);
2309 seq_printf(m, "DSC_Enabled: %s\n",
2310 yesno(crtc_state->dsc.compression_enable));
2311 seq_printf(m, "DSC_Sink_Support: %s\n",
2312 yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
2313 seq_printf(m, "Force_DSC_Enable: %s\n",
2314 yesno(intel_dp->force_dsc_en));
2315 if (!intel_dp_is_edp(intel_dp))
2316 seq_printf(m, "FEC_Sink_Support: %s\n",
2317 yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
2318 } while (try_again);
2320 drm_modeset_drop_locks(&ctx);
2321 drm_modeset_acquire_fini(&ctx);
2326 static ssize_t i915_dsc_fec_support_write(struct file *file,
2327 const char __user *ubuf,
2328 size_t len, loff_t *offp)
2330 bool dsc_enable = false;
2332 struct drm_connector *connector =
2333 ((struct seq_file *)file->private_data)->private;
2334 struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
2335 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2336 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2342 "Copied %zu bytes from user to force DSC\n", len);
2344 ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
2348 drm_dbg(&i915->drm, "Got %s for DSC Enable\n",
2349 (dsc_enable) ? "true" : "false");
2350 intel_dp->force_dsc_en = dsc_enable;
2356 static int i915_dsc_fec_support_open(struct inode *inode,
2359 return single_open(file, i915_dsc_fec_support_show,
2363 static const struct file_operations i915_dsc_fec_support_fops = {
2364 .owner = THIS_MODULE,
2365 .open = i915_dsc_fec_support_open,
2367 .llseek = seq_lseek,
2368 .release = single_release,
2369 .write = i915_dsc_fec_support_write
2372 static int i915_dsc_bpp_show(struct seq_file *m, void *data)
2374 struct drm_connector *connector = m->private;
2375 struct drm_device *dev = connector->dev;
2376 struct drm_crtc *crtc;
2377 struct intel_crtc_state *crtc_state;
2378 struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
2384 ret = drm_modeset_lock_single_interruptible(&dev->mode_config.connection_mutex);
2388 crtc = connector->state->crtc;
2389 if (connector->status != connector_status_connected || !crtc) {
2394 crtc_state = to_intel_crtc_state(crtc->state);
2395 seq_printf(m, "Compressed_BPP: %d\n", crtc_state->dsc.compressed_bpp);
2397 out: drm_modeset_unlock(&dev->mode_config.connection_mutex);
2402 static ssize_t i915_dsc_bpp_write(struct file *file,
2403 const char __user *ubuf,
2404 size_t len, loff_t *offp)
2406 struct drm_connector *connector =
2407 ((struct seq_file *)file->private_data)->private;
2408 struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
2409 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2413 ret = kstrtoint_from_user(ubuf, len, 0, &dsc_bpp);
2417 intel_dp->force_dsc_bpp = dsc_bpp;
2423 static int i915_dsc_bpp_open(struct inode *inode,
2426 return single_open(file, i915_dsc_bpp_show,
2430 static const struct file_operations i915_dsc_bpp_fops = {
2431 .owner = THIS_MODULE,
2432 .open = i915_dsc_bpp_open,
2434 .llseek = seq_lseek,
2435 .release = single_release,
2436 .write = i915_dsc_bpp_write
2440 * intel_connector_debugfs_add - add i915 specific connector debugfs files
2441 * @connector: pointer to a registered drm_connector
2443 * Cleanup will be done by drm_connector_unregister() through a call to
2444 * drm_debugfs_connector_remove().
2446 void intel_connector_debugfs_add(struct intel_connector *intel_connector)
2448 struct drm_connector *connector = &intel_connector->base;
2449 struct dentry *root = connector->debugfs_entry;
2450 struct drm_i915_private *dev_priv = to_i915(connector->dev);
2452 /* The connector must have been registered beforehands. */
2456 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
2457 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
2458 connector, &i915_panel_fops);
2459 debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
2460 connector, &i915_psr_sink_status_fops);
2463 if (HAS_PSR(dev_priv) &&
2464 connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
2465 debugfs_create_file("i915_psr_status", 0444, root,
2466 connector, &i915_psr_status_fops);
2469 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
2470 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
2471 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
2472 debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
2473 connector, &i915_hdcp_sink_capability_fops);
2476 if (DISPLAY_VER(dev_priv) >= 11 &&
2477 ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort &&
2478 !to_intel_connector(connector)->mst_port) ||
2479 connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
2480 debugfs_create_file("i915_dsc_fec_support", 0644, root,
2481 connector, &i915_dsc_fec_support_fops);
2483 debugfs_create_file("i915_dsc_bpp", 0644, root,
2484 connector, &i915_dsc_bpp_fops);
2487 if (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2488 connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
2489 connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
2490 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
2491 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
2492 debugfs_create_file("i915_lpsp_capability", 0444, root,
2493 connector, &i915_lpsp_capability_fops);
2497 * intel_crtc_debugfs_add - add i915 specific crtc debugfs files
2498 * @crtc: pointer to a drm_crtc
2500 * Failure to add debugfs entries should generally be ignored.
2502 void intel_crtc_debugfs_add(struct drm_crtc *crtc)
2504 if (crtc->debugfs_entry)
2505 crtc_updates_add(crtc);