1 /* SPDX-License-Identifier: MIT */
3 * Copyright (C) 2017 Google, Inc.
6 * Sean Paul <seanpaul@chromium.org>
10 #include <drm/drm_hdcp.h>
11 #include <linux/i2c.h>
12 #include <linux/random.h>
14 #include "intel_drv.h"
17 #define KEY_LOAD_TRIES 5
19 static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port,
20 const struct intel_hdcp_shim *shim)
25 /* Poll for ksv list ready (spec says max time allowed is 5s) */
26 ret = __wait_for(read_ret = shim->read_ksv_ready(intel_dig_port,
28 read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
40 static bool hdcp_key_loadable(struct drm_i915_private *dev_priv)
42 struct i915_power_domains *power_domains = &dev_priv->power_domains;
43 struct i915_power_well *power_well;
44 enum i915_power_well_id id;
48 * On HSW and BDW, Display HW loads the Key as soon as Display resumes.
49 * On all BXT+, SW can load the keys only when the PW#1 is turned on.
51 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
52 id = HSW_DISP_PW_GLOBAL;
56 mutex_lock(&power_domains->lock);
58 /* PG1 (power well #1) needs to be enabled */
59 for_each_power_well(dev_priv, power_well) {
60 if (power_well->id == id) {
61 enabled = power_well->ops->is_enabled(dev_priv,
66 mutex_unlock(&power_domains->lock);
69 * Another req for hdcp key loadability is enabled state of pll for
70 * cdclk. Without active crtc we wont land here. So we are assuming that
71 * cdclk is already on.
77 static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv)
79 I915_WRITE(HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
80 I915_WRITE(HDCP_KEY_STATUS, HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS |
81 HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
84 static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
89 val = I915_READ(HDCP_KEY_STATUS);
90 if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
94 * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
95 * out of reset. So if Key is not already loaded, its an error state.
97 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
98 if (!(I915_READ(HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
102 * Initiate loading the HDCP key from fuses.
104 * BXT+ platforms, HDCP key needs to be loaded by SW. Only SKL and KBL
105 * differ in the key load trigger process from other platforms.
107 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
108 mutex_lock(&dev_priv->pcu_lock);
109 ret = sandybridge_pcode_write(dev_priv,
110 SKL_PCODE_LOAD_HDCP_KEYS, 1);
111 mutex_unlock(&dev_priv->pcu_lock);
113 DRM_ERROR("Failed to initiate HDCP key load (%d)\n",
118 I915_WRITE(HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
121 /* Wait for the keys to load (500us) */
122 ret = __intel_wait_for_register(dev_priv, HDCP_KEY_STATUS,
123 HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
127 else if (!(val & HDCP_KEY_LOAD_STATUS))
130 /* Send Aksv over to PCH display for use in authentication */
131 I915_WRITE(HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
136 /* Returns updated SHA-1 index */
137 static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
139 I915_WRITE(HDCP_SHA_TEXT, sha_text);
140 if (intel_wait_for_register(dev_priv, HDCP_REP_CTL,
141 HDCP_SHA1_READY, HDCP_SHA1_READY, 1)) {
142 DRM_ERROR("Timed out waiting for SHA1 ready\n");
149 u32 intel_hdcp_get_repeater_ctl(struct intel_digital_port *intel_dig_port)
151 enum port port = intel_dig_port->base.port;
154 return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
156 return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0;
158 return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0;
160 return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0;
162 return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
166 DRM_ERROR("Unknown port %d\n", port);
171 bool intel_hdcp_is_ksv_valid(u8 *ksv)
174 /* KSV has 20 1's and 20 0's */
175 for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
176 ones += hweight8(ksv[i]);
183 int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
184 const struct intel_hdcp_shim *shim,
185 u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
187 struct drm_i915_private *dev_priv;
188 u32 vprime, sha_text, sha_leftovers, rep_ctl;
189 int ret, i, j, sha_idx;
191 dev_priv = intel_dig_port->base.base.dev->dev_private;
193 /* Process V' values from the receiver */
194 for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
195 ret = shim->read_v_prime_part(intel_dig_port, i, &vprime);
198 I915_WRITE(HDCP_SHA_V_PRIME(i), vprime);
202 * We need to write the concatenation of all device KSVs, BINFO (DP) ||
203 * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
204 * stream is written via the HDCP_SHA_TEXT register in 32-bit
205 * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
206 * index will keep track of our progress through the 64 bytes as well as
207 * helping us work the 40-bit KSVs through our 32-bit register.
209 * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
214 rep_ctl = intel_hdcp_get_repeater_ctl(intel_dig_port);
215 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
216 for (i = 0; i < num_downstream; i++) {
217 unsigned int sha_empty;
218 u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
220 /* Fill up the empty slots in sha_text and write it out */
221 sha_empty = sizeof(sha_text) - sha_leftovers;
222 for (j = 0; j < sha_empty; j++)
223 sha_text |= ksv[j] << ((sizeof(sha_text) - j - 1) * 8);
225 ret = intel_write_sha_text(dev_priv, sha_text);
229 /* Programming guide writes this every 64 bytes */
230 sha_idx += sizeof(sha_text);
232 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
234 /* Store the leftover bytes from the ksv in sha_text */
235 sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
237 for (j = 0; j < sha_leftovers; j++)
238 sha_text |= ksv[sha_empty + j] <<
239 ((sizeof(sha_text) - j - 1) * 8);
242 * If we still have room in sha_text for more data, continue.
243 * Otherwise, write it out immediately.
245 if (sizeof(sha_text) > sha_leftovers)
248 ret = intel_write_sha_text(dev_priv, sha_text);
253 sha_idx += sizeof(sha_text);
257 * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
258 * bytes are leftover from the last ksv, we might be able to fit them
259 * all in sha_text (first 2 cases), or we might need to split them up
260 * into 2 writes (last 2 cases).
262 if (sha_leftovers == 0) {
263 /* Write 16 bits of text, 16 bits of M0 */
264 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16);
265 ret = intel_write_sha_text(dev_priv,
266 bstatus[0] << 8 | bstatus[1]);
269 sha_idx += sizeof(sha_text);
271 /* Write 32 bits of M0 */
272 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
273 ret = intel_write_sha_text(dev_priv, 0);
276 sha_idx += sizeof(sha_text);
278 /* Write 16 bits of M0 */
279 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16);
280 ret = intel_write_sha_text(dev_priv, 0);
283 sha_idx += sizeof(sha_text);
285 } else if (sha_leftovers == 1) {
286 /* Write 24 bits of text, 8 bits of M0 */
287 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24);
288 sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
289 /* Only 24-bits of data, must be in the LSB */
290 sha_text = (sha_text & 0xffffff00) >> 8;
291 ret = intel_write_sha_text(dev_priv, sha_text);
294 sha_idx += sizeof(sha_text);
296 /* Write 32 bits of M0 */
297 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
298 ret = intel_write_sha_text(dev_priv, 0);
301 sha_idx += sizeof(sha_text);
303 /* Write 24 bits of M0 */
304 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8);
305 ret = intel_write_sha_text(dev_priv, 0);
308 sha_idx += sizeof(sha_text);
310 } else if (sha_leftovers == 2) {
311 /* Write 32 bits of text */
312 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
313 sha_text |= bstatus[0] << 24 | bstatus[1] << 16;
314 ret = intel_write_sha_text(dev_priv, sha_text);
317 sha_idx += sizeof(sha_text);
319 /* Write 64 bits of M0 */
320 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
321 for (i = 0; i < 2; i++) {
322 ret = intel_write_sha_text(dev_priv, 0);
325 sha_idx += sizeof(sha_text);
327 } else if (sha_leftovers == 3) {
328 /* Write 32 bits of text */
329 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
330 sha_text |= bstatus[0] << 24;
331 ret = intel_write_sha_text(dev_priv, sha_text);
334 sha_idx += sizeof(sha_text);
336 /* Write 8 bits of text, 24 bits of M0 */
337 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8);
338 ret = intel_write_sha_text(dev_priv, bstatus[1]);
341 sha_idx += sizeof(sha_text);
343 /* Write 32 bits of M0 */
344 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
345 ret = intel_write_sha_text(dev_priv, 0);
348 sha_idx += sizeof(sha_text);
350 /* Write 8 bits of M0 */
351 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24);
352 ret = intel_write_sha_text(dev_priv, 0);
355 sha_idx += sizeof(sha_text);
357 DRM_DEBUG_KMS("Invalid number of leftovers %d\n",
362 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
363 /* Fill up to 64-4 bytes with zeros (leave the last write for length) */
364 while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
365 ret = intel_write_sha_text(dev_priv, 0);
368 sha_idx += sizeof(sha_text);
372 * Last write gets the length of the concatenation in bits. That is:
373 * - 5 bytes per device
374 * - 10 bytes for BINFO/BSTATUS(2), M0(8)
376 sha_text = (num_downstream * 5 + 10) * 8;
377 ret = intel_write_sha_text(dev_priv, sha_text);
381 /* Tell the HW we're done with the hash and wait for it to ACK */
382 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_COMPLETE_HASH);
383 if (intel_wait_for_register(dev_priv, HDCP_REP_CTL,
385 HDCP_SHA1_COMPLETE, 1)) {
386 DRM_DEBUG_KMS("Timed out waiting for SHA1 complete\n");
389 if (!(I915_READ(HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
390 DRM_DEBUG_KMS("SHA-1 mismatch, HDCP failed\n");
397 /* Implements Part 2 of the HDCP authorization procedure */
399 int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
400 const struct intel_hdcp_shim *shim)
402 u8 bstatus[2], num_downstream, *ksv_fifo;
403 int ret, i, tries = 3;
405 ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim);
407 DRM_ERROR("KSV list failed to become ready (%d)\n", ret);
411 ret = shim->read_bstatus(intel_dig_port, bstatus);
415 if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
416 DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
417 DRM_ERROR("Max Topology Limit Exceeded\n");
422 * When repeater reports 0 device count, HDCP1.4 spec allows disabling
423 * the HDCP encryption. That implies that repeater can't have its own
424 * display. As there is no consumption of encrypted content in the
425 * repeater with 0 downstream devices, we are failing the
428 num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
429 if (num_downstream == 0)
432 ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
436 ret = shim->read_ksv_fifo(intel_dig_port, num_downstream, ksv_fifo);
441 * When V prime mismatches, DP Spec mandates re-read of
442 * V prime atleast twice.
444 for (i = 0; i < tries; i++) {
445 ret = intel_hdcp_validate_v_prime(intel_dig_port, shim,
446 ksv_fifo, num_downstream,
453 DRM_ERROR("V Prime validation failed.(%d)\n", ret);
457 DRM_DEBUG_KMS("HDCP is enabled (%d downstream devices)\n",
465 /* Implements Part 1 of the HDCP authorization procedure */
466 static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
467 const struct intel_hdcp_shim *shim)
469 struct drm_i915_private *dev_priv;
471 unsigned long r0_prime_gen_start;
472 int ret, i, tries = 2;
475 u8 shim[DRM_HDCP_AN_LEN];
479 u8 shim[DRM_HDCP_KSV_LEN];
483 u8 shim[DRM_HDCP_RI_LEN];
485 bool repeater_present, hdcp_capable;
487 dev_priv = intel_dig_port->base.base.dev->dev_private;
489 port = intel_dig_port->base.port;
492 * Detects whether the display is HDCP capable. Although we check for
493 * valid Bksv below, the HDCP over DP spec requires that we check
494 * whether the display supports HDCP before we write An. For HDMI
495 * displays, this is not necessary.
497 if (shim->hdcp_capable) {
498 ret = shim->hdcp_capable(intel_dig_port, &hdcp_capable);
502 DRM_ERROR("Panel is not HDCP capable\n");
507 /* Initialize An with 2 random values and acquire it */
508 for (i = 0; i < 2; i++)
509 I915_WRITE(PORT_HDCP_ANINIT(port), get_random_u32());
510 I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_CAPTURE_AN);
512 /* Wait for An to be acquired */
513 if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port),
514 HDCP_STATUS_AN_READY,
515 HDCP_STATUS_AN_READY, 1)) {
516 DRM_ERROR("Timed out waiting for An\n");
520 an.reg[0] = I915_READ(PORT_HDCP_ANLO(port));
521 an.reg[1] = I915_READ(PORT_HDCP_ANHI(port));
522 ret = shim->write_an_aksv(intel_dig_port, an.shim);
526 r0_prime_gen_start = jiffies;
528 memset(&bksv, 0, sizeof(bksv));
530 /* HDCP spec states that we must retry the bksv if it is invalid */
531 for (i = 0; i < tries; i++) {
532 ret = shim->read_bksv(intel_dig_port, bksv.shim);
535 if (intel_hdcp_is_ksv_valid(bksv.shim))
539 DRM_ERROR("HDCP failed, Bksv is invalid\n");
543 I915_WRITE(PORT_HDCP_BKSVLO(port), bksv.reg[0]);
544 I915_WRITE(PORT_HDCP_BKSVHI(port), bksv.reg[1]);
546 ret = shim->repeater_present(intel_dig_port, &repeater_present);
549 if (repeater_present)
550 I915_WRITE(HDCP_REP_CTL,
551 intel_hdcp_get_repeater_ctl(intel_dig_port));
553 ret = shim->toggle_signalling(intel_dig_port, true);
557 I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_AUTH_AND_ENC);
559 /* Wait for R0 ready */
560 if (wait_for(I915_READ(PORT_HDCP_STATUS(port)) &
561 (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
562 DRM_ERROR("Timed out waiting for R0 ready\n");
567 * Wait for R0' to become available. The spec says 100ms from Aksv, but
568 * some monitors can take longer than this. We'll set the timeout at
569 * 300ms just to be sure.
571 * On DP, there's an R0_READY bit available but no such bit
572 * exists on HDMI. Since the upper-bound is the same, we'll just do
573 * the stupid thing instead of polling on one and not the other.
575 wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300);
580 * DP HDCP Spec mandates the two more reattempt to read R0, incase
583 for (i = 0; i < tries; i++) {
585 ret = shim->read_ri_prime(intel_dig_port, ri.shim);
588 I915_WRITE(PORT_HDCP_RPRIME(port), ri.reg);
590 /* Wait for Ri prime match */
591 if (!wait_for(I915_READ(PORT_HDCP_STATUS(port)) &
592 (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
597 DRM_ERROR("Timed out waiting for Ri prime match (%x)\n",
598 I915_READ(PORT_HDCP_STATUS(port)));
602 /* Wait for encryption confirmation */
603 if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port),
604 HDCP_STATUS_ENC, HDCP_STATUS_ENC, 20)) {
605 DRM_ERROR("Timed out waiting for encryption\n");
610 * XXX: If we have MST-connected devices, we need to enable encryption
614 if (repeater_present)
615 return intel_hdcp_auth_downstream(intel_dig_port, shim);
617 DRM_DEBUG_KMS("HDCP is enabled (no repeater present)\n");
622 struct intel_digital_port *conn_to_dig_port(struct intel_connector *connector)
624 return enc_to_dig_port(&intel_attached_encoder(&connector->base)->base);
627 static int _intel_hdcp_disable(struct intel_connector *connector)
629 struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
630 struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
631 enum port port = intel_dig_port->base.port;
634 DRM_DEBUG_KMS("[%s:%d] HDCP is being disabled...\n",
635 connector->base.name, connector->base.base.id);
637 I915_WRITE(PORT_HDCP_CONF(port), 0);
638 if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port), ~0, 0,
640 DRM_ERROR("Failed to disable HDCP, timeout clearing status\n");
644 ret = connector->hdcp_shim->toggle_signalling(intel_dig_port, false);
646 DRM_ERROR("Failed to disable HDCP signalling\n");
650 DRM_DEBUG_KMS("HDCP is disabled\n");
654 static int _intel_hdcp_enable(struct intel_connector *connector)
656 struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
657 int i, ret, tries = 3;
659 DRM_DEBUG_KMS("[%s:%d] HDCP is being enabled...\n",
660 connector->base.name, connector->base.base.id);
662 if (!hdcp_key_loadable(dev_priv)) {
663 DRM_ERROR("HDCP key Load is not possible\n");
667 for (i = 0; i < KEY_LOAD_TRIES; i++) {
668 ret = intel_hdcp_load_keys(dev_priv);
671 intel_hdcp_clear_keys(dev_priv);
674 DRM_ERROR("Could not load HDCP keys, (%d)\n", ret);
678 /* Incase of authentication failures, HDCP spec expects reauth. */
679 for (i = 0; i < tries; i++) {
680 ret = intel_hdcp_auth(conn_to_dig_port(connector),
681 connector->hdcp_shim);
685 DRM_DEBUG_KMS("HDCP Auth failure (%d)\n", ret);
687 /* Ensuring HDCP encryption and signalling are stopped. */
688 _intel_hdcp_disable(connector);
691 DRM_ERROR("HDCP authentication failed (%d tries/%d)\n", tries, ret);
695 static void intel_hdcp_check_work(struct work_struct *work)
697 struct intel_connector *connector = container_of(to_delayed_work(work),
698 struct intel_connector,
700 if (!intel_hdcp_check_link(connector))
701 schedule_delayed_work(&connector->hdcp_check_work,
702 DRM_HDCP_CHECK_PERIOD_MS);
705 static void intel_hdcp_prop_work(struct work_struct *work)
707 struct intel_connector *connector = container_of(work,
708 struct intel_connector,
710 struct drm_device *dev = connector->base.dev;
711 struct drm_connector_state *state;
713 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
714 mutex_lock(&connector->hdcp_mutex);
717 * This worker is only used to flip between ENABLED/DESIRED. Either of
718 * those to UNDESIRED is handled by core. If hdcp_value == UNDESIRED,
719 * we're running just after hdcp has been disabled, so just exit
721 if (connector->hdcp_value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
722 state = connector->base.state;
723 state->content_protection = connector->hdcp_value;
726 mutex_unlock(&connector->hdcp_mutex);
727 drm_modeset_unlock(&dev->mode_config.connection_mutex);
730 bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
732 /* PORT E doesn't have HDCP, and PORT F is disabled */
733 return ((INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) &&
734 !IS_CHERRYVIEW(dev_priv) && port < PORT_E);
737 int intel_hdcp_init(struct intel_connector *connector,
738 const struct intel_hdcp_shim *hdcp_shim)
742 ret = drm_connector_attach_content_protection_property(
747 connector->hdcp_shim = hdcp_shim;
748 mutex_init(&connector->hdcp_mutex);
749 INIT_DELAYED_WORK(&connector->hdcp_check_work, intel_hdcp_check_work);
750 INIT_WORK(&connector->hdcp_prop_work, intel_hdcp_prop_work);
754 int intel_hdcp_enable(struct intel_connector *connector)
758 if (!connector->hdcp_shim)
761 mutex_lock(&connector->hdcp_mutex);
763 ret = _intel_hdcp_enable(connector);
767 connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
768 schedule_work(&connector->hdcp_prop_work);
769 schedule_delayed_work(&connector->hdcp_check_work,
770 DRM_HDCP_CHECK_PERIOD_MS);
772 mutex_unlock(&connector->hdcp_mutex);
776 int intel_hdcp_disable(struct intel_connector *connector)
780 if (!connector->hdcp_shim)
783 mutex_lock(&connector->hdcp_mutex);
785 if (connector->hdcp_value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
786 connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
787 ret = _intel_hdcp_disable(connector);
790 mutex_unlock(&connector->hdcp_mutex);
791 cancel_delayed_work_sync(&connector->hdcp_check_work);
795 void intel_hdcp_atomic_check(struct drm_connector *connector,
796 struct drm_connector_state *old_state,
797 struct drm_connector_state *new_state)
799 uint64_t old_cp = old_state->content_protection;
800 uint64_t new_cp = new_state->content_protection;
801 struct drm_crtc_state *crtc_state;
803 if (!new_state->crtc) {
805 * If the connector is being disabled with CP enabled, mark it
806 * desired so it's re-enabled when the connector is brought back
808 if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
809 new_state->content_protection =
810 DRM_MODE_CONTENT_PROTECTION_DESIRED;
815 * Nothing to do if the state didn't change, or HDCP was activated since
818 if (old_cp == new_cp ||
819 (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
820 new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED))
823 crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
825 crtc_state->mode_changed = true;
828 /* Implements Part 3 of the HDCP authorization procedure */
829 int intel_hdcp_check_link(struct intel_connector *connector)
831 struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
832 struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
833 enum port port = intel_dig_port->base.port;
836 if (!connector->hdcp_shim)
839 mutex_lock(&connector->hdcp_mutex);
841 if (connector->hdcp_value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
844 if (!(I915_READ(PORT_HDCP_STATUS(port)) & HDCP_STATUS_ENC)) {
845 DRM_ERROR("%s:%d HDCP check failed: link is not encrypted,%x\n",
846 connector->base.name, connector->base.base.id,
847 I915_READ(PORT_HDCP_STATUS(port)));
849 connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
850 schedule_work(&connector->hdcp_prop_work);
854 if (connector->hdcp_shim->check_link(intel_dig_port)) {
855 if (connector->hdcp_value !=
856 DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
857 connector->hdcp_value =
858 DRM_MODE_CONTENT_PROTECTION_ENABLED;
859 schedule_work(&connector->hdcp_prop_work);
864 DRM_DEBUG_KMS("[%s:%d] HDCP link failed, retrying authentication\n",
865 connector->base.name, connector->base.base.id);
867 ret = _intel_hdcp_disable(connector);
869 DRM_ERROR("Failed to disable hdcp (%d)\n", ret);
870 connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
871 schedule_work(&connector->hdcp_prop_work);
875 ret = _intel_hdcp_enable(connector);
877 DRM_ERROR("Failed to enable hdcp (%d)\n", ret);
878 connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
879 schedule_work(&connector->hdcp_prop_work);
884 mutex_unlock(&connector->hdcp_mutex);