1 // SPDX-License-Identifier: GPL-2.0
3 * Cadence MHDP8546 DP bridge driver.
5 * Copyright (C) 2020 Cadence Design Systems, Inc.
7 * Authors: Quentin Schulz <quentin.schulz@free-electrons.com>
8 * Swapnil Jakhade <sjakhade@cadence.com>
9 * Yuti Amonkar <yamonkar@cadence.com>
10 * Tomi Valkeinen <tomi.valkeinen@ti.com>
11 * Jyri Sarha <jsarha@ti.com>
14 * - Implement optimized mailbox communication using mailbox interrupts
15 * - Add support for power management
16 * - Add support for features like audio, MST and fast link training
17 * - Implement request_fw_cancel to handle HW_STATE
18 * - Fix asynchronous loading of firmware implementation
19 * - Add DRM helper function for cdns_mhdp_lower_link_rate
22 #include <linux/clk.h>
23 #include <linux/delay.h>
24 #include <linux/err.h>
25 #include <linux/firmware.h>
27 #include <linux/iopoll.h>
28 #include <linux/irq.h>
29 #include <linux/module.h>
31 #include <linux/of_device.h>
32 #include <linux/phy/phy.h>
33 #include <linux/phy/phy-dp.h>
34 #include <linux/platform_device.h>
35 #include <linux/slab.h>
36 #include <linux/wait.h>
38 #include <drm/drm_atomic.h>
39 #include <drm/drm_atomic_helper.h>
40 #include <drm/drm_atomic_state_helper.h>
41 #include <drm/drm_bridge.h>
42 #include <drm/drm_connector.h>
43 #include <drm/drm_crtc_helper.h>
44 #include <drm/drm_dp_helper.h>
45 #include <drm/drm_hdcp.h>
46 #include <drm/drm_modeset_helper_vtables.h>
47 #include <drm/drm_print.h>
48 #include <drm/drm_probe_helper.h>
50 #include <asm/unaligned.h>
52 #include "cdns-mhdp8546-core.h"
53 #include "cdns-mhdp8546-hdcp.h"
54 #include "cdns-mhdp8546-j721e.h"
56 static int cdns_mhdp_mailbox_read(struct cdns_mhdp_device *mhdp)
60 WARN_ON(!mutex_is_locked(&mhdp->mbox_mutex));
62 ret = readx_poll_timeout(readl, mhdp->regs + CDNS_MAILBOX_EMPTY,
63 empty, !empty, MAILBOX_RETRY_US,
68 return readl(mhdp->regs + CDNS_MAILBOX_RX_DATA) & 0xff;
71 static int cdns_mhdp_mailbox_write(struct cdns_mhdp_device *mhdp, u8 val)
75 WARN_ON(!mutex_is_locked(&mhdp->mbox_mutex));
77 ret = readx_poll_timeout(readl, mhdp->regs + CDNS_MAILBOX_FULL,
78 full, !full, MAILBOX_RETRY_US,
83 writel(val, mhdp->regs + CDNS_MAILBOX_TX_DATA);
88 static int cdns_mhdp_mailbox_recv_header(struct cdns_mhdp_device *mhdp,
89 u8 module_id, u8 opcode,
96 /* read the header of the message */
97 for (i = 0; i < sizeof(header); i++) {
98 ret = cdns_mhdp_mailbox_read(mhdp);
105 mbox_size = get_unaligned_be16(header + 2);
107 if (opcode != header[0] || module_id != header[1] ||
108 req_size != mbox_size) {
110 * If the message in mailbox is not what we want, we need to
111 * clear the mailbox by reading its contents.
113 for (i = 0; i < mbox_size; i++)
114 if (cdns_mhdp_mailbox_read(mhdp) < 0)
123 static int cdns_mhdp_mailbox_recv_data(struct cdns_mhdp_device *mhdp,
124 u8 *buff, u16 buff_size)
129 for (i = 0; i < buff_size; i++) {
130 ret = cdns_mhdp_mailbox_read(mhdp);
140 static int cdns_mhdp_mailbox_send(struct cdns_mhdp_device *mhdp, u8 module_id,
141 u8 opcode, u16 size, u8 *message)
147 header[1] = module_id;
148 put_unaligned_be16(size, header + 2);
150 for (i = 0; i < sizeof(header); i++) {
151 ret = cdns_mhdp_mailbox_write(mhdp, header[i]);
156 for (i = 0; i < size; i++) {
157 ret = cdns_mhdp_mailbox_write(mhdp, message[i]);
166 int cdns_mhdp_reg_read(struct cdns_mhdp_device *mhdp, u32 addr, u32 *value)
171 put_unaligned_be32(addr, msg);
173 mutex_lock(&mhdp->mbox_mutex);
175 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_GENERAL,
176 GENERAL_REGISTER_READ,
181 ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_GENERAL,
182 GENERAL_REGISTER_READ,
187 ret = cdns_mhdp_mailbox_recv_data(mhdp, resp, sizeof(resp));
191 /* Returned address value should be the same as requested */
192 if (memcmp(msg, resp, sizeof(msg))) {
197 *value = get_unaligned_be32(resp + 4);
200 mutex_unlock(&mhdp->mbox_mutex);
202 dev_err(mhdp->dev, "Failed to read register\n");
210 int cdns_mhdp_reg_write(struct cdns_mhdp_device *mhdp, u16 addr, u32 val)
215 put_unaligned_be16(addr, msg);
216 put_unaligned_be32(val, msg + 2);
218 mutex_lock(&mhdp->mbox_mutex);
220 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
221 DPTX_WRITE_REGISTER, sizeof(msg), msg);
223 mutex_unlock(&mhdp->mbox_mutex);
229 int cdns_mhdp_reg_write_bit(struct cdns_mhdp_device *mhdp, u16 addr,
230 u8 start_bit, u8 bits_no, u32 val)
235 put_unaligned_be16(addr, field);
236 field[2] = start_bit;
238 put_unaligned_be32(val, field + 4);
240 mutex_lock(&mhdp->mbox_mutex);
242 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
243 DPTX_WRITE_FIELD, sizeof(field), field);
245 mutex_unlock(&mhdp->mbox_mutex);
251 int cdns_mhdp_dpcd_read(struct cdns_mhdp_device *mhdp,
252 u32 addr, u8 *data, u16 len)
257 put_unaligned_be16(len, msg);
258 put_unaligned_be24(addr, msg + 2);
260 mutex_lock(&mhdp->mbox_mutex);
262 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
263 DPTX_READ_DPCD, sizeof(msg), msg);
267 ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
273 ret = cdns_mhdp_mailbox_recv_data(mhdp, reg, sizeof(reg));
277 ret = cdns_mhdp_mailbox_recv_data(mhdp, data, len);
280 mutex_unlock(&mhdp->mbox_mutex);
286 int cdns_mhdp_dpcd_write(struct cdns_mhdp_device *mhdp, u32 addr, u8 value)
291 put_unaligned_be16(1, msg);
292 put_unaligned_be24(addr, msg + 2);
295 mutex_lock(&mhdp->mbox_mutex);
297 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
298 DPTX_WRITE_DPCD, sizeof(msg), msg);
302 ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
303 DPTX_WRITE_DPCD, sizeof(reg));
307 ret = cdns_mhdp_mailbox_recv_data(mhdp, reg, sizeof(reg));
311 if (addr != get_unaligned_be24(reg + 2))
315 mutex_unlock(&mhdp->mbox_mutex);
318 dev_err(mhdp->dev, "dpcd write failed: %d\n", ret);
323 int cdns_mhdp_set_firmware_active(struct cdns_mhdp_device *mhdp, bool enable)
328 msg[0] = GENERAL_MAIN_CONTROL;
329 msg[1] = MB_MODULE_ID_GENERAL;
332 msg[4] = enable ? FW_ACTIVE : FW_STANDBY;
334 mutex_lock(&mhdp->mbox_mutex);
336 for (i = 0; i < sizeof(msg); i++) {
337 ret = cdns_mhdp_mailbox_write(mhdp, msg[i]);
342 /* read the firmware state */
343 ret = cdns_mhdp_mailbox_recv_data(mhdp, msg, sizeof(msg));
350 mutex_unlock(&mhdp->mbox_mutex);
353 dev_err(mhdp->dev, "set firmware active failed\n");
358 int cdns_mhdp_get_hpd_status(struct cdns_mhdp_device *mhdp)
363 mutex_lock(&mhdp->mbox_mutex);
365 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
366 DPTX_HPD_STATE, 0, NULL);
370 ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
376 ret = cdns_mhdp_mailbox_recv_data(mhdp, &status, sizeof(status));
380 mutex_unlock(&mhdp->mbox_mutex);
382 dev_dbg(mhdp->dev, "%s: HPD %splugged\n", __func__,
388 mutex_unlock(&mhdp->mbox_mutex);
394 int cdns_mhdp_get_edid_block(void *data, u8 *edid,
395 unsigned int block, size_t length)
397 struct cdns_mhdp_device *mhdp = data;
398 u8 msg[2], reg[2], i;
401 mutex_lock(&mhdp->mbox_mutex);
403 for (i = 0; i < 4; i++) {
407 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
408 DPTX_GET_EDID, sizeof(msg), msg);
412 ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
414 sizeof(reg) + length);
418 ret = cdns_mhdp_mailbox_recv_data(mhdp, reg, sizeof(reg));
422 ret = cdns_mhdp_mailbox_recv_data(mhdp, edid, length);
426 if (reg[0] == length && reg[1] == block / 2)
430 mutex_unlock(&mhdp->mbox_mutex);
433 dev_err(mhdp->dev, "get block[%d] edid failed: %d\n",
440 int cdns_mhdp_read_hpd_event(struct cdns_mhdp_device *mhdp)
445 mutex_lock(&mhdp->mbox_mutex);
447 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
448 DPTX_READ_EVENT, 0, NULL);
452 ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
453 DPTX_READ_EVENT, sizeof(event));
457 ret = cdns_mhdp_mailbox_recv_data(mhdp, &event, sizeof(event));
459 mutex_unlock(&mhdp->mbox_mutex);
464 dev_dbg(mhdp->dev, "%s: %s%s%s%s\n", __func__,
465 (event & DPTX_READ_EVENT_HPD_TO_HIGH) ? "TO_HIGH " : "",
466 (event & DPTX_READ_EVENT_HPD_TO_LOW) ? "TO_LOW " : "",
467 (event & DPTX_READ_EVENT_HPD_PULSE) ? "PULSE " : "",
468 (event & DPTX_READ_EVENT_HPD_STATE) ? "HPD_STATE " : "");
474 int cdns_mhdp_adjust_lt(struct cdns_mhdp_device *mhdp, unsigned int nlanes,
475 unsigned int udelay, const u8 *lanes_data,
476 u8 link_status[DP_LINK_STATUS_SIZE])
479 u8 hdr[5]; /* For DPCD read response header */
483 if (nlanes != 4 && nlanes != 2 && nlanes != 1) {
484 dev_err(mhdp->dev, "invalid number of lanes: %u\n", nlanes);
490 put_unaligned_be16(udelay, payload + 1);
491 memcpy(payload + 3, lanes_data, nlanes);
493 mutex_lock(&mhdp->mbox_mutex);
495 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
497 sizeof(payload), payload);
501 /* Yes, read the DPCD read command response */
502 ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
504 sizeof(hdr) + DP_LINK_STATUS_SIZE);
508 ret = cdns_mhdp_mailbox_recv_data(mhdp, hdr, sizeof(hdr));
512 addr = get_unaligned_be24(hdr + 2);
513 if (addr != DP_LANE0_1_STATUS)
516 ret = cdns_mhdp_mailbox_recv_data(mhdp, link_status,
517 DP_LINK_STATUS_SIZE);
520 mutex_unlock(&mhdp->mbox_mutex);
523 dev_err(mhdp->dev, "Failed to adjust Link Training.\n");
529 * cdns_mhdp_link_power_up() - power up a DisplayPort link
530 * @aux: DisplayPort AUX channel
531 * @link: pointer to a structure containing the link configuration
533 * Returns 0 on success or a negative error code on failure.
536 int cdns_mhdp_link_power_up(struct drm_dp_aux *aux, struct cdns_mhdp_link *link)
541 /* DP_SET_POWER register is only available on DPCD v1.1 and later */
542 if (link->revision < 0x11)
545 err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
549 value &= ~DP_SET_POWER_MASK;
550 value |= DP_SET_POWER_D0;
552 err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
557 * According to the DP 1.1 specification, a "Sink Device must exit the
558 * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
559 * Control Field" (register 0x600).
561 usleep_range(1000, 2000);
567 * cdns_mhdp_link_power_down() - power down a DisplayPort link
568 * @aux: DisplayPort AUX channel
569 * @link: pointer to a structure containing the link configuration
571 * Returns 0 on success or a negative error code on failure.
574 int cdns_mhdp_link_power_down(struct drm_dp_aux *aux,
575 struct cdns_mhdp_link *link)
580 /* DP_SET_POWER register is only available on DPCD v1.1 and later */
581 if (link->revision < 0x11)
584 err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
588 value &= ~DP_SET_POWER_MASK;
589 value |= DP_SET_POWER_D3;
591 err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
599 * cdns_mhdp_link_configure() - configure a DisplayPort link
600 * @aux: DisplayPort AUX channel
601 * @link: pointer to a structure containing the link configuration
603 * Returns 0 on success or a negative error code on failure.
606 int cdns_mhdp_link_configure(struct drm_dp_aux *aux,
607 struct cdns_mhdp_link *link)
612 values[0] = drm_dp_link_rate_to_bw_code(link->rate);
613 values[1] = link->num_lanes;
615 if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
616 values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
618 err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values));
625 static unsigned int cdns_mhdp_max_link_rate(struct cdns_mhdp_device *mhdp)
627 return min(mhdp->host.link_rate, mhdp->sink.link_rate);
630 static u8 cdns_mhdp_max_num_lanes(struct cdns_mhdp_device *mhdp)
632 return min(mhdp->sink.lanes_cnt, mhdp->host.lanes_cnt);
635 static u8 cdns_mhdp_eq_training_pattern_supported(struct cdns_mhdp_device *mhdp)
637 return fls(mhdp->host.pattern_supp & mhdp->sink.pattern_supp);
640 static bool cdns_mhdp_get_ssc_supported(struct cdns_mhdp_device *mhdp)
642 /* Check if SSC is supported by both sides */
643 return mhdp->host.ssc && mhdp->sink.ssc;
646 static enum drm_connector_status cdns_mhdp_detect(struct cdns_mhdp_device *mhdp)
648 dev_dbg(mhdp->dev, "%s: %d\n", __func__, mhdp->plugged);
651 return connector_status_connected;
653 return connector_status_disconnected;
656 static int cdns_mhdp_check_fw_version(struct cdns_mhdp_device *mhdp)
658 u32 major_num, minor_num, revision;
661 fw_ver = (readl(mhdp->regs + CDNS_VER_H) << 8)
662 | readl(mhdp->regs + CDNS_VER_L);
664 lib_ver = (readl(mhdp->regs + CDNS_LIB_H_ADDR) << 8)
665 | readl(mhdp->regs + CDNS_LIB_L_ADDR);
667 if (lib_ver < 33984) {
669 * Older FW versions with major number 1, used to store FW
670 * version information by storing repository revision number
671 * in registers. This is for identifying these FW versions.
675 if (fw_ver == 26098) {
677 } else if (lib_ver == 0 && fw_ver == 0) {
680 dev_err(mhdp->dev, "Unsupported FW version: fw_ver = %u, lib_ver = %u\n",
685 /* To identify newer FW versions with major number 2 onwards. */
686 major_num = fw_ver / 10000;
687 minor_num = (fw_ver / 100) % 100;
688 revision = (fw_ver % 10000) % 100;
691 dev_dbg(mhdp->dev, "FW version: v%u.%u.%u\n", major_num, minor_num,
696 static int cdns_mhdp_fw_activate(const struct firmware *fw,
697 struct cdns_mhdp_device *mhdp)
702 /* Release uCPU reset and stall it. */
703 writel(CDNS_CPU_STALL, mhdp->regs + CDNS_APB_CTRL);
705 memcpy_toio(mhdp->regs + CDNS_MHDP_IMEM, fw->data, fw->size);
707 /* Leave debug mode, release stall */
708 writel(0, mhdp->regs + CDNS_APB_CTRL);
711 * Wait for the KEEP_ALIVE "message" on the first 8 bits.
712 * Updated each sched "tick" (~2ms)
714 ret = readl_poll_timeout(mhdp->regs + CDNS_KEEP_ALIVE, reg,
715 reg & CDNS_KEEP_ALIVE_MASK, 500,
716 CDNS_KEEP_ALIVE_TIMEOUT);
719 "device didn't give any life sign: reg %d\n", reg);
723 ret = cdns_mhdp_check_fw_version(mhdp);
727 /* Init events to 0 as it's not cleared by FW at boot but on read */
728 readl(mhdp->regs + CDNS_SW_EVENT0);
729 readl(mhdp->regs + CDNS_SW_EVENT1);
730 readl(mhdp->regs + CDNS_SW_EVENT2);
731 readl(mhdp->regs + CDNS_SW_EVENT3);
734 ret = cdns_mhdp_set_firmware_active(mhdp, true);
738 spin_lock(&mhdp->start_lock);
740 mhdp->hw_state = MHDP_HW_READY;
743 * Here we must keep the lock while enabling the interrupts
744 * since it would otherwise be possible that interrupt enable
745 * code is executed after the bridge is detached. The similar
746 * situation is not possible in attach()/detach() callbacks
747 * since the hw_state changes from MHDP_HW_READY to
748 * MHDP_HW_STOPPED happens only due to driver removal when
749 * bridge should already be detached.
751 if (mhdp->bridge_attached)
752 writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT,
753 mhdp->regs + CDNS_APB_INT_MASK);
755 spin_unlock(&mhdp->start_lock);
757 wake_up(&mhdp->fw_load_wq);
758 dev_dbg(mhdp->dev, "DP FW activated\n");
763 static void cdns_mhdp_fw_cb(const struct firmware *fw, void *context)
765 struct cdns_mhdp_device *mhdp = context;
766 bool bridge_attached;
769 dev_dbg(mhdp->dev, "firmware callback\n");
771 if (!fw || !fw->data) {
772 dev_err(mhdp->dev, "%s: No firmware.\n", __func__);
776 ret = cdns_mhdp_fw_activate(fw, mhdp);
778 release_firmware(fw);
784 * XXX how to make sure the bridge is still attached when
785 * calling drm_kms_helper_hotplug_event() after releasing
786 * the lock? We should not hold the spin lock when
787 * calling drm_kms_helper_hotplug_event() since it may
788 * cause a dead lock. FB-dev console calls detect from the
789 * same thread just down the call stack started here.
791 spin_lock(&mhdp->start_lock);
792 bridge_attached = mhdp->bridge_attached;
793 spin_unlock(&mhdp->start_lock);
794 if (bridge_attached) {
795 if (mhdp->connector.dev)
796 drm_kms_helper_hotplug_event(mhdp->bridge.dev);
798 drm_bridge_hpd_notify(&mhdp->bridge, cdns_mhdp_detect(mhdp));
802 static int cdns_mhdp_load_firmware(struct cdns_mhdp_device *mhdp)
806 ret = request_firmware_nowait(THIS_MODULE, true, FW_NAME, mhdp->dev,
807 GFP_KERNEL, mhdp, cdns_mhdp_fw_cb);
809 dev_err(mhdp->dev, "failed to load firmware (%s), ret: %d\n",
817 static ssize_t cdns_mhdp_transfer(struct drm_dp_aux *aux,
818 struct drm_dp_aux_msg *msg)
820 struct cdns_mhdp_device *mhdp = dev_get_drvdata(aux->dev);
823 if (msg->request != DP_AUX_NATIVE_WRITE &&
824 msg->request != DP_AUX_NATIVE_READ)
827 if (msg->request == DP_AUX_NATIVE_WRITE) {
828 const u8 *buf = msg->buffer;
831 for (i = 0; i < msg->size; ++i) {
832 ret = cdns_mhdp_dpcd_write(mhdp,
833 msg->address + i, buf[i]);
838 "Failed to write DPCD addr %u\n",
844 ret = cdns_mhdp_dpcd_read(mhdp, msg->address,
845 msg->buffer, msg->size);
848 "Failed to read DPCD addr %u\n",
858 static int cdns_mhdp_link_training_init(struct cdns_mhdp_device *mhdp)
860 union phy_configure_opts phy_cfg;
864 drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
865 DP_TRAINING_PATTERN_DISABLE);
867 /* Reset PHY configuration */
868 reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1);
869 if (!mhdp->host.scrambler)
870 reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
872 cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
874 cdns_mhdp_reg_write(mhdp, CDNS_DP_ENHNCD,
875 mhdp->sink.enhanced & mhdp->host.enhanced);
877 cdns_mhdp_reg_write(mhdp, CDNS_DP_LANE_EN,
878 CDNS_DP_LANE_EN_LANES(mhdp->link.num_lanes));
880 cdns_mhdp_link_configure(&mhdp->aux, &mhdp->link);
881 phy_cfg.dp.link_rate = mhdp->link.rate / 100;
882 phy_cfg.dp.lanes = mhdp->link.num_lanes;
884 memset(phy_cfg.dp.voltage, 0, sizeof(phy_cfg.dp.voltage));
885 memset(phy_cfg.dp.pre, 0, sizeof(phy_cfg.dp.pre));
887 phy_cfg.dp.ssc = cdns_mhdp_get_ssc_supported(mhdp);
888 phy_cfg.dp.set_lanes = true;
889 phy_cfg.dp.set_rate = true;
890 phy_cfg.dp.set_voltages = true;
891 ret = phy_configure(mhdp->phy, &phy_cfg);
893 dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n",
898 cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG,
899 CDNS_PHY_COMMON_CONFIG |
900 CDNS_PHY_TRAINING_EN |
901 CDNS_PHY_TRAINING_TYPE(1) |
902 CDNS_PHY_SCRAMBLER_BYPASS);
904 drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
905 DP_TRAINING_PATTERN_1 | DP_LINK_SCRAMBLING_DISABLE);
910 static void cdns_mhdp_get_adjust_train(struct cdns_mhdp_device *mhdp,
911 u8 link_status[DP_LINK_STATUS_SIZE],
912 u8 lanes_data[CDNS_DP_MAX_NUM_LANES],
913 union phy_configure_opts *phy_cfg)
915 u8 adjust, max_pre_emph, max_volt_swing;
916 u8 set_volt, set_pre;
919 max_pre_emph = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis)
920 << DP_TRAIN_PRE_EMPHASIS_SHIFT;
921 max_volt_swing = CDNS_VOLT_SWING(mhdp->host.volt_swing);
923 for (i = 0; i < mhdp->link.num_lanes; i++) {
924 /* Check if Voltage swing and pre-emphasis are within limits */
925 adjust = drm_dp_get_adjust_request_voltage(link_status, i);
926 set_volt = min(adjust, max_volt_swing);
928 adjust = drm_dp_get_adjust_request_pre_emphasis(link_status, i);
929 set_pre = min(adjust, max_pre_emph)
930 >> DP_TRAIN_PRE_EMPHASIS_SHIFT;
933 * Voltage swing level and pre-emphasis level combination is
934 * not allowed: leaving pre-emphasis as-is, and adjusting
937 if (set_volt + set_pre > 3)
938 set_volt = 3 - set_pre;
940 phy_cfg->dp.voltage[i] = set_volt;
941 lanes_data[i] = set_volt;
943 if (set_volt == max_volt_swing)
944 lanes_data[i] |= DP_TRAIN_MAX_SWING_REACHED;
946 phy_cfg->dp.pre[i] = set_pre;
947 lanes_data[i] |= (set_pre << DP_TRAIN_PRE_EMPHASIS_SHIFT);
949 if (set_pre == (max_pre_emph >> DP_TRAIN_PRE_EMPHASIS_SHIFT))
950 lanes_data[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
955 void cdns_mhdp_set_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
956 unsigned int lane, u8 volt)
958 unsigned int s = ((lane & 1) ?
959 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
960 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
961 unsigned int idx = DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS + (lane >> 1);
963 link_status[idx] &= ~(DP_ADJUST_VOLTAGE_SWING_LANE0_MASK << s);
964 link_status[idx] |= volt << s;
968 void cdns_mhdp_set_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
969 unsigned int lane, u8 pre_emphasis)
971 unsigned int s = ((lane & 1) ?
972 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
973 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
974 unsigned int idx = DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS + (lane >> 1);
976 link_status[idx] &= ~(DP_ADJUST_PRE_EMPHASIS_LANE0_MASK << s);
977 link_status[idx] |= pre_emphasis << s;
980 static void cdns_mhdp_adjust_requested_eq(struct cdns_mhdp_device *mhdp,
981 u8 link_status[DP_LINK_STATUS_SIZE])
983 u8 max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis);
984 u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing);
988 for (i = 0; i < mhdp->link.num_lanes; i++) {
989 volt = drm_dp_get_adjust_request_voltage(link_status, i);
990 pre = drm_dp_get_adjust_request_pre_emphasis(link_status, i);
992 cdns_mhdp_set_adjust_request_voltage(link_status, i,
994 if (mhdp->host.volt_swing & CDNS_FORCE_VOLT_SWING)
995 cdns_mhdp_set_adjust_request_voltage(link_status, i,
997 if (mhdp->host.pre_emphasis & CDNS_FORCE_PRE_EMPHASIS)
998 cdns_mhdp_set_adjust_request_pre_emphasis(link_status,
1003 static void cdns_mhdp_print_lt_status(const char *prefix,
1004 struct cdns_mhdp_device *mhdp,
1005 union phy_configure_opts *phy_cfg)
1007 char vs[8] = "0/0/0/0";
1008 char pe[8] = "0/0/0/0";
1011 for (i = 0; i < mhdp->link.num_lanes; i++) {
1012 vs[i * 2] = '0' + phy_cfg->dp.voltage[i];
1013 pe[i * 2] = '0' + phy_cfg->dp.pre[i];
1016 vs[i * 2 - 1] = '\0';
1017 pe[i * 2 - 1] = '\0';
1019 dev_dbg(mhdp->dev, "%s, %u lanes, %u Mbps, vs %s, pe %s\n",
1021 mhdp->link.num_lanes, mhdp->link.rate / 100,
1025 static bool cdns_mhdp_link_training_channel_eq(struct cdns_mhdp_device *mhdp,
1027 unsigned int training_interval)
1029 u8 lanes_data[CDNS_DP_MAX_NUM_LANES], fail_counter_short = 0;
1030 u8 link_status[DP_LINK_STATUS_SIZE];
1031 union phy_configure_opts phy_cfg;
1036 dev_dbg(mhdp->dev, "Starting EQ phase\n");
1038 /* Enable link training TPS[eq_tps] in PHY */
1039 reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_EN |
1040 CDNS_PHY_TRAINING_TYPE(eq_tps);
1042 reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
1043 cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
1045 drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
1046 (eq_tps != 4) ? eq_tps | DP_LINK_SCRAMBLING_DISABLE :
1047 CDNS_DP_TRAINING_PATTERN_4);
1049 drm_dp_dpcd_read_link_status(&mhdp->aux, link_status);
1052 cdns_mhdp_get_adjust_train(mhdp, link_status, lanes_data,
1054 phy_cfg.dp.lanes = mhdp->link.num_lanes;
1055 phy_cfg.dp.ssc = cdns_mhdp_get_ssc_supported(mhdp);
1056 phy_cfg.dp.set_lanes = false;
1057 phy_cfg.dp.set_rate = false;
1058 phy_cfg.dp.set_voltages = true;
1059 ret = phy_configure(mhdp->phy, &phy_cfg);
1061 dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n",
1066 cdns_mhdp_adjust_lt(mhdp, mhdp->link.num_lanes,
1067 training_interval, lanes_data, link_status);
1069 r = drm_dp_clock_recovery_ok(link_status, mhdp->link.num_lanes);
1073 if (drm_dp_channel_eq_ok(link_status, mhdp->link.num_lanes)) {
1074 cdns_mhdp_print_lt_status("EQ phase ok", mhdp,
1079 fail_counter_short++;
1081 cdns_mhdp_adjust_requested_eq(mhdp, link_status);
1082 } while (fail_counter_short < 5);
1085 cdns_mhdp_print_lt_status("EQ phase failed", mhdp, &phy_cfg);
1090 static void cdns_mhdp_adjust_requested_cr(struct cdns_mhdp_device *mhdp,
1091 u8 link_status[DP_LINK_STATUS_SIZE],
1092 u8 *req_volt, u8 *req_pre)
1094 const u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing);
1095 const u8 max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis);
1098 for (i = 0; i < mhdp->link.num_lanes; i++) {
1101 val = mhdp->host.volt_swing & CDNS_FORCE_VOLT_SWING ?
1102 max_volt : req_volt[i];
1103 cdns_mhdp_set_adjust_request_voltage(link_status, i, val);
1105 val = mhdp->host.pre_emphasis & CDNS_FORCE_PRE_EMPHASIS ?
1106 max_pre : req_pre[i];
1107 cdns_mhdp_set_adjust_request_pre_emphasis(link_status, i, val);
1112 void cdns_mhdp_validate_cr(struct cdns_mhdp_device *mhdp, bool *cr_done,
1113 bool *same_before_adjust, bool *max_swing_reached,
1114 u8 before_cr[CDNS_DP_MAX_NUM_LANES],
1115 u8 after_cr[DP_LINK_STATUS_SIZE], u8 *req_volt,
1118 const u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing);
1119 const u8 max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis);
1120 bool same_pre, same_volt;
1124 *same_before_adjust = false;
1125 *max_swing_reached = false;
1126 *cr_done = drm_dp_clock_recovery_ok(after_cr, mhdp->link.num_lanes);
1128 for (i = 0; i < mhdp->link.num_lanes; i++) {
1129 adjust = drm_dp_get_adjust_request_voltage(after_cr, i);
1130 req_volt[i] = min(adjust, max_volt);
1132 adjust = drm_dp_get_adjust_request_pre_emphasis(after_cr, i) >>
1133 DP_TRAIN_PRE_EMPHASIS_SHIFT;
1134 req_pre[i] = min(adjust, max_pre);
1136 same_pre = (before_cr[i] & DP_TRAIN_PRE_EMPHASIS_MASK) ==
1137 req_pre[i] << DP_TRAIN_PRE_EMPHASIS_SHIFT;
1138 same_volt = (before_cr[i] & DP_TRAIN_VOLTAGE_SWING_MASK) ==
1140 if (same_pre && same_volt)
1141 *same_before_adjust = true;
1143 /* 3.1.5.2 in DP Standard v1.4. Table 3-1 */
1144 if (!*cr_done && req_volt[i] + req_pre[i] >= 3) {
1145 *max_swing_reached = true;
1151 static bool cdns_mhdp_link_training_cr(struct cdns_mhdp_device *mhdp)
1153 u8 lanes_data[CDNS_DP_MAX_NUM_LANES],
1154 fail_counter_short = 0, fail_counter_cr_long = 0;
1155 u8 link_status[DP_LINK_STATUS_SIZE];
1157 union phy_configure_opts phy_cfg;
1160 dev_dbg(mhdp->dev, "Starting CR phase\n");
1162 ret = cdns_mhdp_link_training_init(mhdp);
1166 drm_dp_dpcd_read_link_status(&mhdp->aux, link_status);
1169 u8 requested_adjust_volt_swing[CDNS_DP_MAX_NUM_LANES] = {};
1170 u8 requested_adjust_pre_emphasis[CDNS_DP_MAX_NUM_LANES] = {};
1171 bool same_before_adjust, max_swing_reached;
1173 cdns_mhdp_get_adjust_train(mhdp, link_status, lanes_data,
1175 phy_cfg.dp.lanes = mhdp->link.num_lanes;
1176 phy_cfg.dp.ssc = cdns_mhdp_get_ssc_supported(mhdp);
1177 phy_cfg.dp.set_lanes = false;
1178 phy_cfg.dp.set_rate = false;
1179 phy_cfg.dp.set_voltages = true;
1180 ret = phy_configure(mhdp->phy, &phy_cfg);
1182 dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n",
1187 cdns_mhdp_adjust_lt(mhdp, mhdp->link.num_lanes, 100,
1188 lanes_data, link_status);
1190 cdns_mhdp_validate_cr(mhdp, &cr_done, &same_before_adjust,
1191 &max_swing_reached, lanes_data,
1193 requested_adjust_volt_swing,
1194 requested_adjust_pre_emphasis);
1196 if (max_swing_reached) {
1197 dev_err(mhdp->dev, "CR: max swing reached\n");
1202 cdns_mhdp_print_lt_status("CR phase ok", mhdp,
1207 /* Not all CR_DONE bits set */
1208 fail_counter_cr_long++;
1210 if (same_before_adjust) {
1211 fail_counter_short++;
1215 fail_counter_short = 0;
1217 * Voltage swing/pre-emphasis adjust requested
1220 cdns_mhdp_adjust_requested_cr(mhdp, link_status,
1221 requested_adjust_volt_swing,
1222 requested_adjust_pre_emphasis);
1223 } while (fail_counter_short < 5 && fail_counter_cr_long < 10);
1226 cdns_mhdp_print_lt_status("CR phase failed", mhdp, &phy_cfg);
1231 static void cdns_mhdp_lower_link_rate(struct cdns_mhdp_link *link)
1233 switch (drm_dp_link_rate_to_bw_code(link->rate)) {
1234 case DP_LINK_BW_2_7:
1235 link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_1_62);
1237 case DP_LINK_BW_5_4:
1238 link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_2_7);
1240 case DP_LINK_BW_8_1:
1241 link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_5_4);
1246 static int cdns_mhdp_link_training(struct cdns_mhdp_device *mhdp,
1247 unsigned int training_interval)
1250 const u8 eq_tps = cdns_mhdp_eq_training_pattern_supported(mhdp);
1254 if (!cdns_mhdp_link_training_cr(mhdp)) {
1255 if (drm_dp_link_rate_to_bw_code(mhdp->link.rate) !=
1258 "Reducing link rate during CR phase\n");
1259 cdns_mhdp_lower_link_rate(&mhdp->link);
1262 } else if (mhdp->link.num_lanes > 1) {
1264 "Reducing lanes number during CR phase\n");
1265 mhdp->link.num_lanes >>= 1;
1266 mhdp->link.rate = cdns_mhdp_max_link_rate(mhdp);
1272 "Link training failed during CR phase\n");
1276 if (cdns_mhdp_link_training_channel_eq(mhdp, eq_tps,
1280 if (mhdp->link.num_lanes > 1) {
1282 "Reducing lanes number during EQ phase\n");
1283 mhdp->link.num_lanes >>= 1;
1286 } else if (drm_dp_link_rate_to_bw_code(mhdp->link.rate) !=
1289 "Reducing link rate during EQ phase\n");
1290 cdns_mhdp_lower_link_rate(&mhdp->link);
1291 mhdp->link.num_lanes = cdns_mhdp_max_num_lanes(mhdp);
1296 dev_err(mhdp->dev, "Link training failed during EQ phase\n");
1300 dev_dbg(mhdp->dev, "Link training ok. Lanes: %u, Rate %u Mbps\n",
1301 mhdp->link.num_lanes, mhdp->link.rate / 100);
1303 drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
1304 mhdp->host.scrambler ? 0 :
1305 DP_LINK_SCRAMBLING_DISABLE);
1307 ret = cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, ®32);
1310 "Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n",
1314 reg32 &= ~GENMASK(1, 0);
1315 reg32 |= CDNS_DP_NUM_LANES(mhdp->link.num_lanes);
1316 reg32 |= CDNS_DP_WR_FAILING_EDGE_VSYNC;
1317 reg32 |= CDNS_DP_FRAMER_EN;
1318 cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, reg32);
1320 /* Reset PHY config */
1321 reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1);
1322 if (!mhdp->host.scrambler)
1323 reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
1324 cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
1328 /* Reset PHY config */
1329 reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1);
1330 if (!mhdp->host.scrambler)
1331 reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
1332 cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
1334 drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
1335 DP_TRAINING_PATTERN_DISABLE);
1340 static u32 cdns_mhdp_get_training_interval_us(struct cdns_mhdp_device *mhdp,
1346 return 4000 << (interval - 1);
1348 "wrong training interval returned by DPCD: %d\n", interval);
1352 static void cdns_mhdp_fill_host_caps(struct cdns_mhdp_device *mhdp)
1354 unsigned int link_rate;
1356 /* Get source capabilities based on PHY attributes */
1358 mhdp->host.lanes_cnt = mhdp->phy->attrs.bus_width;
1359 if (!mhdp->host.lanes_cnt)
1360 mhdp->host.lanes_cnt = 4;
1362 link_rate = mhdp->phy->attrs.max_link_rate;
1364 link_rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_8_1);
1366 /* PHY uses Mb/s, DRM uses tens of kb/s. */
1369 mhdp->host.link_rate = link_rate;
1370 mhdp->host.volt_swing = CDNS_VOLT_SWING(3);
1371 mhdp->host.pre_emphasis = CDNS_PRE_EMPHASIS(3);
1372 mhdp->host.pattern_supp = CDNS_SUPPORT_TPS(1) |
1373 CDNS_SUPPORT_TPS(2) | CDNS_SUPPORT_TPS(3) |
1374 CDNS_SUPPORT_TPS(4);
1375 mhdp->host.lane_mapping = CDNS_LANE_MAPPING_NORMAL;
1376 mhdp->host.fast_link = false;
1377 mhdp->host.enhanced = true;
1378 mhdp->host.scrambler = true;
1379 mhdp->host.ssc = false;
1382 static void cdns_mhdp_fill_sink_caps(struct cdns_mhdp_device *mhdp,
1383 u8 dpcd[DP_RECEIVER_CAP_SIZE])
1385 mhdp->sink.link_rate = mhdp->link.rate;
1386 mhdp->sink.lanes_cnt = mhdp->link.num_lanes;
1387 mhdp->sink.enhanced = !!(mhdp->link.capabilities &
1388 DP_LINK_CAP_ENHANCED_FRAMING);
1390 /* Set SSC support */
1391 mhdp->sink.ssc = !!(dpcd[DP_MAX_DOWNSPREAD] &
1392 DP_MAX_DOWNSPREAD_0_5);
1394 /* Set TPS support */
1395 mhdp->sink.pattern_supp = CDNS_SUPPORT_TPS(1) | CDNS_SUPPORT_TPS(2);
1396 if (drm_dp_tps3_supported(dpcd))
1397 mhdp->sink.pattern_supp |= CDNS_SUPPORT_TPS(3);
1398 if (drm_dp_tps4_supported(dpcd))
1399 mhdp->sink.pattern_supp |= CDNS_SUPPORT_TPS(4);
1401 /* Set fast link support */
1402 mhdp->sink.fast_link = !!(dpcd[DP_MAX_DOWNSPREAD] &
1403 DP_NO_AUX_HANDSHAKE_LINK_TRAINING);
1406 static int cdns_mhdp_link_up(struct cdns_mhdp_device *mhdp)
1408 u8 dpcd[DP_RECEIVER_CAP_SIZE], amp[2];
1409 u32 resp, interval, interval_us;
1414 WARN_ON(!mutex_is_locked(&mhdp->link_mutex));
1416 drm_dp_dpcd_readb(&mhdp->aux, DP_TRAINING_AUX_RD_INTERVAL,
1419 if (ext_cap_chk & DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT)
1420 addr = DP_DP13_DPCD_REV;
1424 err = drm_dp_dpcd_read(&mhdp->aux, addr, dpcd, DP_RECEIVER_CAP_SIZE);
1426 dev_err(mhdp->dev, "Failed to read receiver capabilities\n");
1430 mhdp->link.revision = dpcd[0];
1431 mhdp->link.rate = drm_dp_bw_code_to_link_rate(dpcd[1]);
1432 mhdp->link.num_lanes = dpcd[2] & DP_MAX_LANE_COUNT_MASK;
1434 if (dpcd[2] & DP_ENHANCED_FRAME_CAP)
1435 mhdp->link.capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
1437 dev_dbg(mhdp->dev, "Set sink device power state via DPCD\n");
1438 cdns_mhdp_link_power_up(&mhdp->aux, &mhdp->link);
1440 cdns_mhdp_fill_sink_caps(mhdp, dpcd);
1442 mhdp->link.rate = cdns_mhdp_max_link_rate(mhdp);
1443 mhdp->link.num_lanes = cdns_mhdp_max_num_lanes(mhdp);
1445 /* Disable framer for link training */
1446 err = cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &resp);
1449 "Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n",
1454 resp &= ~CDNS_DP_FRAMER_EN;
1455 cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, resp);
1457 /* Spread AMP if required, enable 8b/10b coding */
1458 amp[0] = cdns_mhdp_get_ssc_supported(mhdp) ? DP_SPREAD_AMP_0_5 : 0;
1459 amp[1] = DP_SET_ANSI_8B10B;
1460 drm_dp_dpcd_write(&mhdp->aux, DP_DOWNSPREAD_CTRL, amp, 2);
1462 if (mhdp->host.fast_link & mhdp->sink.fast_link) {
1463 dev_err(mhdp->dev, "fastlink not supported\n");
1467 interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] & DP_TRAINING_AUX_RD_MASK;
1468 interval_us = cdns_mhdp_get_training_interval_us(mhdp, interval);
1470 cdns_mhdp_link_training(mhdp, interval_us)) {
1471 dev_err(mhdp->dev, "Link training failed. Exiting.\n");
1475 mhdp->link_up = true;
1480 static void cdns_mhdp_link_down(struct cdns_mhdp_device *mhdp)
1482 WARN_ON(!mutex_is_locked(&mhdp->link_mutex));
1485 cdns_mhdp_link_power_down(&mhdp->aux, &mhdp->link);
1487 mhdp->link_up = false;
1490 static struct edid *cdns_mhdp_get_edid(struct cdns_mhdp_device *mhdp,
1491 struct drm_connector *connector)
1496 return drm_do_get_edid(connector, cdns_mhdp_get_edid_block, mhdp);
1499 static int cdns_mhdp_get_modes(struct drm_connector *connector)
1501 struct cdns_mhdp_device *mhdp = connector_to_mhdp(connector);
1508 edid = cdns_mhdp_get_edid(mhdp, connector);
1510 dev_err(mhdp->dev, "Failed to read EDID\n");
1514 drm_connector_update_edid_property(connector, edid);
1515 num_modes = drm_add_edid_modes(connector, edid);
1519 * HACK: Warn about unsupported display formats until we deal
1520 * with them correctly.
1522 if (connector->display_info.color_formats &&
1523 !(connector->display_info.color_formats &
1524 mhdp->display_fmt.color_format))
1526 "%s: No supported color_format found (0x%08x)\n",
1527 __func__, connector->display_info.color_formats);
1529 if (connector->display_info.bpc &&
1530 connector->display_info.bpc < mhdp->display_fmt.bpc)
1531 dev_warn(mhdp->dev, "%s: Display bpc only %d < %d\n",
1532 __func__, connector->display_info.bpc,
1533 mhdp->display_fmt.bpc);
1538 static int cdns_mhdp_connector_detect(struct drm_connector *conn,
1539 struct drm_modeset_acquire_ctx *ctx,
1542 struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn);
1544 return cdns_mhdp_detect(mhdp);
1547 static u32 cdns_mhdp_get_bpp(struct cdns_mhdp_display_fmt *fmt)
1554 switch (fmt->color_format) {
1555 case DRM_COLOR_FORMAT_RGB444:
1556 case DRM_COLOR_FORMAT_YCRCB444:
1559 case DRM_COLOR_FORMAT_YCRCB422:
1562 case DRM_COLOR_FORMAT_YCRCB420:
1563 bpp = fmt->bpc * 3 / 2;
1573 bool cdns_mhdp_bandwidth_ok(struct cdns_mhdp_device *mhdp,
1574 const struct drm_display_mode *mode,
1575 unsigned int lanes, unsigned int rate)
1577 u32 max_bw, req_bw, bpp;
1580 * mode->clock is expressed in kHz. Multiplying by bpp and dividing by 8
1581 * we get the number of kB/s. DisplayPort applies a 8b-10b encoding, the
1582 * value thus equals the bandwidth in 10kb/s units, which matches the
1583 * units of the rate parameter.
1586 bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt);
1587 req_bw = mode->clock * bpp / 8;
1588 max_bw = lanes * rate;
1589 if (req_bw > max_bw) {
1591 "Unsupported Mode: %s, Req BW: %u, Available Max BW:%u\n",
1592 mode->name, req_bw, max_bw);
1601 enum drm_mode_status cdns_mhdp_mode_valid(struct drm_connector *conn,
1602 struct drm_display_mode *mode)
1604 struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn);
1606 mutex_lock(&mhdp->link_mutex);
1608 if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes,
1610 mutex_unlock(&mhdp->link_mutex);
1611 return MODE_CLOCK_HIGH;
1614 mutex_unlock(&mhdp->link_mutex);
1618 static int cdns_mhdp_connector_atomic_check(struct drm_connector *conn,
1619 struct drm_atomic_state *state)
1621 struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn);
1622 struct drm_connector_state *old_state, *new_state;
1623 struct drm_crtc_state *crtc_state;
1626 if (!mhdp->hdcp_supported)
1629 old_state = drm_atomic_get_old_connector_state(state, conn);
1630 new_state = drm_atomic_get_new_connector_state(state, conn);
1631 old_cp = old_state->content_protection;
1632 new_cp = new_state->content_protection;
1634 if (old_state->hdcp_content_type != new_state->hdcp_content_type &&
1635 new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
1636 new_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1640 if (!new_state->crtc) {
1641 if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
1642 new_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1646 if (old_cp == new_cp ||
1647 (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
1648 new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED))
1652 crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc);
1653 crtc_state->mode_changed = true;
1658 static const struct drm_connector_helper_funcs cdns_mhdp_conn_helper_funcs = {
1659 .detect_ctx = cdns_mhdp_connector_detect,
1660 .get_modes = cdns_mhdp_get_modes,
1661 .mode_valid = cdns_mhdp_mode_valid,
1662 .atomic_check = cdns_mhdp_connector_atomic_check,
1665 static const struct drm_connector_funcs cdns_mhdp_conn_funcs = {
1666 .fill_modes = drm_helper_probe_single_connector_modes,
1667 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
1668 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
1669 .reset = drm_atomic_helper_connector_reset,
1670 .destroy = drm_connector_cleanup,
1673 static int cdns_mhdp_connector_init(struct cdns_mhdp_device *mhdp)
1675 u32 bus_format = MEDIA_BUS_FMT_RGB121212_1X36;
1676 struct drm_connector *conn = &mhdp->connector;
1677 struct drm_bridge *bridge = &mhdp->bridge;
1680 if (!bridge->encoder) {
1681 dev_err(mhdp->dev, "Parent encoder object not found");
1685 conn->polled = DRM_CONNECTOR_POLL_HPD;
1687 ret = drm_connector_init(bridge->dev, conn, &cdns_mhdp_conn_funcs,
1688 DRM_MODE_CONNECTOR_DisplayPort);
1690 dev_err(mhdp->dev, "Failed to initialize connector with drm\n");
1694 drm_connector_helper_add(conn, &cdns_mhdp_conn_helper_funcs);
1696 ret = drm_display_info_set_bus_formats(&conn->display_info,
1701 ret = drm_connector_attach_encoder(conn, bridge->encoder);
1703 dev_err(mhdp->dev, "Failed to attach connector to encoder\n");
1707 if (mhdp->hdcp_supported)
1708 ret = drm_connector_attach_content_protection_property(conn, true);
1713 static int cdns_mhdp_attach(struct drm_bridge *bridge,
1714 enum drm_bridge_attach_flags flags)
1716 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
1720 dev_dbg(mhdp->dev, "%s\n", __func__);
1722 mhdp->aux.drm_dev = bridge->dev;
1723 ret = drm_dp_aux_register(&mhdp->aux);
1727 if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
1728 ret = cdns_mhdp_connector_init(mhdp);
1730 goto aux_unregister;
1733 spin_lock(&mhdp->start_lock);
1735 mhdp->bridge_attached = true;
1736 hw_ready = mhdp->hw_state == MHDP_HW_READY;
1738 spin_unlock(&mhdp->start_lock);
1740 /* Enable SW event interrupts */
1742 writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT,
1743 mhdp->regs + CDNS_APB_INT_MASK);
1747 drm_dp_aux_unregister(&mhdp->aux);
1751 static void cdns_mhdp_configure_video(struct cdns_mhdp_device *mhdp,
1752 const struct drm_display_mode *mode)
1754 unsigned int dp_framer_sp = 0, msa_horizontal_1,
1755 msa_vertical_1, bnd_hsync2vsync, hsync2vsync_pol_ctrl,
1756 misc0 = 0, misc1 = 0, pxl_repr,
1757 front_porch, back_porch, msa_h0, msa_v0, hsync, vsync,
1759 u8 stream_id = mhdp->stream_id;
1760 u32 bpp, bpc, pxlfmt, framer;
1763 pxlfmt = mhdp->display_fmt.color_format;
1764 bpc = mhdp->display_fmt.bpc;
1767 * If YCBCR supported and stream not SD, use ITU709
1768 * Need to handle ITU version with YCBCR420 when supported
1770 if ((pxlfmt == DRM_COLOR_FORMAT_YCRCB444 ||
1771 pxlfmt == DRM_COLOR_FORMAT_YCRCB422) && mode->crtc_vdisplay >= 720)
1772 misc0 = DP_YCBCR_COEFFICIENTS_ITU709;
1774 bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt);
1777 case DRM_COLOR_FORMAT_RGB444:
1778 pxl_repr = CDNS_DP_FRAMER_RGB << CDNS_DP_FRAMER_PXL_FORMAT;
1779 misc0 |= DP_COLOR_FORMAT_RGB;
1781 case DRM_COLOR_FORMAT_YCRCB444:
1782 pxl_repr = CDNS_DP_FRAMER_YCBCR444 << CDNS_DP_FRAMER_PXL_FORMAT;
1783 misc0 |= DP_COLOR_FORMAT_YCbCr444 | DP_TEST_DYNAMIC_RANGE_CEA;
1785 case DRM_COLOR_FORMAT_YCRCB422:
1786 pxl_repr = CDNS_DP_FRAMER_YCBCR422 << CDNS_DP_FRAMER_PXL_FORMAT;
1787 misc0 |= DP_COLOR_FORMAT_YCbCr422 | DP_TEST_DYNAMIC_RANGE_CEA;
1789 case DRM_COLOR_FORMAT_YCRCB420:
1790 pxl_repr = CDNS_DP_FRAMER_YCBCR420 << CDNS_DP_FRAMER_PXL_FORMAT;
1793 pxl_repr = CDNS_DP_FRAMER_Y_ONLY << CDNS_DP_FRAMER_PXL_FORMAT;
1798 misc0 |= DP_TEST_BIT_DEPTH_6;
1799 pxl_repr |= CDNS_DP_FRAMER_6_BPC;
1802 misc0 |= DP_TEST_BIT_DEPTH_8;
1803 pxl_repr |= CDNS_DP_FRAMER_8_BPC;
1806 misc0 |= DP_TEST_BIT_DEPTH_10;
1807 pxl_repr |= CDNS_DP_FRAMER_10_BPC;
1810 misc0 |= DP_TEST_BIT_DEPTH_12;
1811 pxl_repr |= CDNS_DP_FRAMER_12_BPC;
1814 misc0 |= DP_TEST_BIT_DEPTH_16;
1815 pxl_repr |= CDNS_DP_FRAMER_16_BPC;
1819 bnd_hsync2vsync = CDNS_IP_BYPASS_V_INTERFACE;
1820 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1821 bnd_hsync2vsync |= CDNS_IP_DET_INTERLACE_FORMAT;
1823 cdns_mhdp_reg_write(mhdp, CDNS_BND_HSYNC2VSYNC(stream_id),
1826 hsync2vsync_pol_ctrl = 0;
1827 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
1828 hsync2vsync_pol_ctrl |= CDNS_H2V_HSYNC_POL_ACTIVE_LOW;
1829 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
1830 hsync2vsync_pol_ctrl |= CDNS_H2V_VSYNC_POL_ACTIVE_LOW;
1831 cdns_mhdp_reg_write(mhdp, CDNS_HSYNC2VSYNC_POL_CTRL(stream_id),
1832 hsync2vsync_pol_ctrl);
1834 cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_PXL_REPR(stream_id), pxl_repr);
1836 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1837 dp_framer_sp |= CDNS_DP_FRAMER_INTERLACE;
1838 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
1839 dp_framer_sp |= CDNS_DP_FRAMER_HSYNC_POL_LOW;
1840 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
1841 dp_framer_sp |= CDNS_DP_FRAMER_VSYNC_POL_LOW;
1842 cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_SP(stream_id), dp_framer_sp);
1844 front_porch = mode->crtc_hsync_start - mode->crtc_hdisplay;
1845 back_porch = mode->crtc_htotal - mode->crtc_hsync_end;
1846 cdns_mhdp_reg_write(mhdp, CDNS_DP_FRONT_BACK_PORCH(stream_id),
1847 CDNS_DP_FRONT_PORCH(front_porch) |
1848 CDNS_DP_BACK_PORCH(back_porch));
1850 cdns_mhdp_reg_write(mhdp, CDNS_DP_BYTE_COUNT(stream_id),
1851 mode->crtc_hdisplay * bpp / 8);
1853 msa_h0 = mode->crtc_htotal - mode->crtc_hsync_start;
1854 cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_HORIZONTAL_0(stream_id),
1855 CDNS_DP_MSAH0_H_TOTAL(mode->crtc_htotal) |
1856 CDNS_DP_MSAH0_HSYNC_START(msa_h0));
1858 hsync = mode->crtc_hsync_end - mode->crtc_hsync_start;
1859 msa_horizontal_1 = CDNS_DP_MSAH1_HSYNC_WIDTH(hsync) |
1860 CDNS_DP_MSAH1_HDISP_WIDTH(mode->crtc_hdisplay);
1861 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
1862 msa_horizontal_1 |= CDNS_DP_MSAH1_HSYNC_POL_LOW;
1863 cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_HORIZONTAL_1(stream_id),
1866 msa_v0 = mode->crtc_vtotal - mode->crtc_vsync_start;
1867 cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_VERTICAL_0(stream_id),
1868 CDNS_DP_MSAV0_V_TOTAL(mode->crtc_vtotal) |
1869 CDNS_DP_MSAV0_VSYNC_START(msa_v0));
1871 vsync = mode->crtc_vsync_end - mode->crtc_vsync_start;
1872 msa_vertical_1 = CDNS_DP_MSAV1_VSYNC_WIDTH(vsync) |
1873 CDNS_DP_MSAV1_VDISP_WIDTH(mode->crtc_vdisplay);
1874 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
1875 msa_vertical_1 |= CDNS_DP_MSAV1_VSYNC_POL_LOW;
1876 cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_VERTICAL_1(stream_id),
1879 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
1880 mode->crtc_vtotal % 2 == 0)
1881 misc1 = DP_TEST_INTERLACED;
1882 if (mhdp->display_fmt.y_only)
1883 misc1 |= CDNS_DP_TEST_COLOR_FORMAT_RAW_Y_ONLY;
1884 /* Use VSC SDP for Y420 */
1885 if (pxlfmt == DRM_COLOR_FORMAT_YCRCB420)
1886 misc1 = CDNS_DP_TEST_VSC_SDP;
1888 cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_MISC(stream_id),
1889 misc0 | (misc1 << 8));
1891 cdns_mhdp_reg_write(mhdp, CDNS_DP_HORIZONTAL(stream_id),
1892 CDNS_DP_H_HSYNC_WIDTH(hsync) |
1893 CDNS_DP_H_H_TOTAL(mode->crtc_hdisplay));
1895 cdns_mhdp_reg_write(mhdp, CDNS_DP_VERTICAL_0(stream_id),
1896 CDNS_DP_V0_VHEIGHT(mode->crtc_vdisplay) |
1897 CDNS_DP_V0_VSTART(msa_v0));
1899 dp_vertical_1 = CDNS_DP_V1_VTOTAL(mode->crtc_vtotal);
1900 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
1901 mode->crtc_vtotal % 2 == 0)
1902 dp_vertical_1 |= CDNS_DP_V1_VTOTAL_EVEN;
1904 cdns_mhdp_reg_write(mhdp, CDNS_DP_VERTICAL_1(stream_id), dp_vertical_1);
1906 cdns_mhdp_reg_write_bit(mhdp, CDNS_DP_VB_ID(stream_id), 2, 1,
1907 (mode->flags & DRM_MODE_FLAG_INTERLACE) ?
1908 CDNS_DP_VB_ID_INTERLACED : 0);
1910 ret = cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &framer);
1913 "Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n",
1917 framer |= CDNS_DP_FRAMER_EN;
1918 framer &= ~CDNS_DP_NO_VIDEO_MODE;
1919 cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, framer);
1922 static void cdns_mhdp_sst_enable(struct cdns_mhdp_device *mhdp,
1923 const struct drm_display_mode *mode)
1925 u32 rate, vs, required_bandwidth, available_bandwidth;
1926 s32 line_thresh1, line_thresh2, line_thresh = 0;
1927 int pxlclock = mode->crtc_clock;
1931 /* Get rate in MSymbols per second per lane */
1932 rate = mhdp->link.rate / 1000;
1934 bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt);
1936 required_bandwidth = pxlclock * bpp / 8;
1937 available_bandwidth = mhdp->link.num_lanes * rate;
1939 vs = tu_size * required_bandwidth / available_bandwidth;
1945 line_thresh1 = ((vs + 1) << 5) * 8 / bpp;
1946 line_thresh2 = (pxlclock << 5) / 1000 / rate * (vs + 1) - (1 << 5);
1947 line_thresh = line_thresh1 - line_thresh2 / (s32)mhdp->link.num_lanes;
1948 line_thresh = (line_thresh >> 5) + 2;
1950 mhdp->stream_id = 0;
1952 cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_TU,
1953 CDNS_DP_FRAMER_TU_VS(vs) |
1954 CDNS_DP_FRAMER_TU_SIZE(tu_size) |
1955 CDNS_DP_FRAMER_TU_CNT_RST_EN);
1957 cdns_mhdp_reg_write(mhdp, CDNS_DP_LINE_THRESH(0),
1958 line_thresh & GENMASK(5, 0));
1960 cdns_mhdp_reg_write(mhdp, CDNS_DP_STREAM_CONFIG_2(0),
1961 CDNS_DP_SC2_TU_VS_DIFF((tu_size - vs > 3) ?
1964 cdns_mhdp_configure_video(mhdp, mode);
1967 static void cdns_mhdp_atomic_enable(struct drm_bridge *bridge,
1968 struct drm_bridge_state *bridge_state)
1970 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
1971 struct drm_atomic_state *state = bridge_state->base.state;
1972 struct cdns_mhdp_bridge_state *mhdp_state;
1973 struct drm_crtc_state *crtc_state;
1974 struct drm_connector *connector;
1975 struct drm_connector_state *conn_state;
1976 struct drm_bridge_state *new_state;
1977 const struct drm_display_mode *mode;
1981 dev_dbg(mhdp->dev, "bridge enable\n");
1983 mutex_lock(&mhdp->link_mutex);
1985 if (mhdp->plugged && !mhdp->link_up) {
1986 ret = cdns_mhdp_link_up(mhdp);
1991 if (mhdp->info && mhdp->info->ops && mhdp->info->ops->enable)
1992 mhdp->info->ops->enable(mhdp);
1994 /* Enable VIF clock for stream 0 */
1995 ret = cdns_mhdp_reg_read(mhdp, CDNS_DPTX_CAR, &resp);
1997 dev_err(mhdp->dev, "Failed to read CDNS_DPTX_CAR %d\n", ret);
2001 cdns_mhdp_reg_write(mhdp, CDNS_DPTX_CAR,
2002 resp | CDNS_VIF_CLK_EN | CDNS_VIF_CLK_RSTN);
2004 connector = drm_atomic_get_new_connector_for_encoder(state,
2006 if (WARN_ON(!connector))
2009 conn_state = drm_atomic_get_new_connector_state(state, connector);
2010 if (WARN_ON(!conn_state))
2013 if (mhdp->hdcp_supported &&
2014 mhdp->hw_state == MHDP_HW_READY &&
2015 conn_state->content_protection ==
2016 DRM_MODE_CONTENT_PROTECTION_DESIRED) {
2017 mutex_unlock(&mhdp->link_mutex);
2018 cdns_mhdp_hdcp_enable(mhdp, conn_state->hdcp_content_type);
2019 mutex_lock(&mhdp->link_mutex);
2022 crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
2023 if (WARN_ON(!crtc_state))
2026 mode = &crtc_state->adjusted_mode;
2028 new_state = drm_atomic_get_new_bridge_state(state, bridge);
2029 if (WARN_ON(!new_state))
2032 if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes,
2038 cdns_mhdp_sst_enable(mhdp, mode);
2040 mhdp_state = to_cdns_mhdp_bridge_state(new_state);
2042 mhdp_state->current_mode = drm_mode_duplicate(bridge->dev, mode);
2043 drm_mode_set_name(mhdp_state->current_mode);
2045 dev_dbg(mhdp->dev, "%s: Enabling mode %s\n", __func__, mode->name);
2047 mhdp->bridge_enabled = true;
2050 mutex_unlock(&mhdp->link_mutex);
2052 schedule_work(&mhdp->modeset_retry_work);
2055 static void cdns_mhdp_atomic_disable(struct drm_bridge *bridge,
2056 struct drm_bridge_state *bridge_state)
2058 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2061 dev_dbg(mhdp->dev, "%s\n", __func__);
2063 mutex_lock(&mhdp->link_mutex);
2065 if (mhdp->hdcp_supported)
2066 cdns_mhdp_hdcp_disable(mhdp);
2068 mhdp->bridge_enabled = false;
2069 cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &resp);
2070 resp &= ~CDNS_DP_FRAMER_EN;
2071 resp |= CDNS_DP_NO_VIDEO_MODE;
2072 cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, resp);
2074 cdns_mhdp_link_down(mhdp);
2076 /* Disable VIF clock for stream 0 */
2077 cdns_mhdp_reg_read(mhdp, CDNS_DPTX_CAR, &resp);
2078 cdns_mhdp_reg_write(mhdp, CDNS_DPTX_CAR,
2079 resp & ~(CDNS_VIF_CLK_EN | CDNS_VIF_CLK_RSTN));
2081 if (mhdp->info && mhdp->info->ops && mhdp->info->ops->disable)
2082 mhdp->info->ops->disable(mhdp);
2084 mutex_unlock(&mhdp->link_mutex);
2087 static void cdns_mhdp_detach(struct drm_bridge *bridge)
2089 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2091 dev_dbg(mhdp->dev, "%s\n", __func__);
2093 drm_dp_aux_unregister(&mhdp->aux);
2095 spin_lock(&mhdp->start_lock);
2097 mhdp->bridge_attached = false;
2099 spin_unlock(&mhdp->start_lock);
2101 writel(~0, mhdp->regs + CDNS_APB_INT_MASK);
2104 static struct drm_bridge_state *
2105 cdns_mhdp_bridge_atomic_duplicate_state(struct drm_bridge *bridge)
2107 struct cdns_mhdp_bridge_state *state;
2109 state = kzalloc(sizeof(*state), GFP_KERNEL);
2113 __drm_atomic_helper_bridge_duplicate_state(bridge, &state->base);
2115 return &state->base;
2119 cdns_mhdp_bridge_atomic_destroy_state(struct drm_bridge *bridge,
2120 struct drm_bridge_state *state)
2122 struct cdns_mhdp_bridge_state *cdns_mhdp_state;
2124 cdns_mhdp_state = to_cdns_mhdp_bridge_state(state);
2126 if (cdns_mhdp_state->current_mode) {
2127 drm_mode_destroy(bridge->dev, cdns_mhdp_state->current_mode);
2128 cdns_mhdp_state->current_mode = NULL;
2131 kfree(cdns_mhdp_state);
2134 static struct drm_bridge_state *
2135 cdns_mhdp_bridge_atomic_reset(struct drm_bridge *bridge)
2137 struct cdns_mhdp_bridge_state *cdns_mhdp_state;
2139 cdns_mhdp_state = kzalloc(sizeof(*cdns_mhdp_state), GFP_KERNEL);
2140 if (!cdns_mhdp_state)
2143 __drm_atomic_helper_bridge_reset(bridge, &cdns_mhdp_state->base);
2145 return &cdns_mhdp_state->base;
2148 static int cdns_mhdp_atomic_check(struct drm_bridge *bridge,
2149 struct drm_bridge_state *bridge_state,
2150 struct drm_crtc_state *crtc_state,
2151 struct drm_connector_state *conn_state)
2153 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2154 const struct drm_display_mode *mode = &crtc_state->adjusted_mode;
2156 mutex_lock(&mhdp->link_mutex);
2158 if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes,
2160 dev_err(mhdp->dev, "%s: Not enough BW for %s (%u lanes at %u Mbps)\n",
2161 __func__, mode->name, mhdp->link.num_lanes,
2162 mhdp->link.rate / 100);
2163 mutex_unlock(&mhdp->link_mutex);
2167 mutex_unlock(&mhdp->link_mutex);
2171 static enum drm_connector_status cdns_mhdp_bridge_detect(struct drm_bridge *bridge)
2173 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2175 return cdns_mhdp_detect(mhdp);
2178 static struct edid *cdns_mhdp_bridge_get_edid(struct drm_bridge *bridge,
2179 struct drm_connector *connector)
2181 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2183 return cdns_mhdp_get_edid(mhdp, connector);
2186 static void cdns_mhdp_bridge_hpd_enable(struct drm_bridge *bridge)
2188 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2190 /* Enable SW event interrupts */
2191 if (mhdp->bridge_attached)
2192 writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT,
2193 mhdp->regs + CDNS_APB_INT_MASK);
2196 static void cdns_mhdp_bridge_hpd_disable(struct drm_bridge *bridge)
2198 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2200 writel(CDNS_APB_INT_MASK_SW_EVENT_INT, mhdp->regs + CDNS_APB_INT_MASK);
2203 static const struct drm_bridge_funcs cdns_mhdp_bridge_funcs = {
2204 .atomic_enable = cdns_mhdp_atomic_enable,
2205 .atomic_disable = cdns_mhdp_atomic_disable,
2206 .atomic_check = cdns_mhdp_atomic_check,
2207 .attach = cdns_mhdp_attach,
2208 .detach = cdns_mhdp_detach,
2209 .atomic_duplicate_state = cdns_mhdp_bridge_atomic_duplicate_state,
2210 .atomic_destroy_state = cdns_mhdp_bridge_atomic_destroy_state,
2211 .atomic_reset = cdns_mhdp_bridge_atomic_reset,
2212 .detect = cdns_mhdp_bridge_detect,
2213 .get_edid = cdns_mhdp_bridge_get_edid,
2214 .hpd_enable = cdns_mhdp_bridge_hpd_enable,
2215 .hpd_disable = cdns_mhdp_bridge_hpd_disable,
2218 static bool cdns_mhdp_detect_hpd(struct cdns_mhdp_device *mhdp, bool *hpd_pulse)
2220 int hpd_event, hpd_status;
2224 hpd_event = cdns_mhdp_read_hpd_event(mhdp);
2226 /* Getting event bits failed, bail out */
2227 if (hpd_event < 0) {
2228 dev_warn(mhdp->dev, "%s: read event failed: %d\n",
2229 __func__, hpd_event);
2233 hpd_status = cdns_mhdp_get_hpd_status(mhdp);
2234 if (hpd_status < 0) {
2235 dev_warn(mhdp->dev, "%s: get hpd status failed: %d\n",
2236 __func__, hpd_status);
2240 if (hpd_event & DPTX_READ_EVENT_HPD_PULSE)
2243 return !!hpd_status;
2246 static int cdns_mhdp_update_link_status(struct cdns_mhdp_device *mhdp)
2248 struct cdns_mhdp_bridge_state *cdns_bridge_state;
2249 struct drm_display_mode *current_mode;
2250 bool old_plugged = mhdp->plugged;
2251 struct drm_bridge_state *state;
2252 u8 status[DP_LINK_STATUS_SIZE];
2256 mutex_lock(&mhdp->link_mutex);
2258 mhdp->plugged = cdns_mhdp_detect_hpd(mhdp, &hpd_pulse);
2260 if (!mhdp->plugged) {
2261 cdns_mhdp_link_down(mhdp);
2262 mhdp->link.rate = mhdp->host.link_rate;
2263 mhdp->link.num_lanes = mhdp->host.lanes_cnt;
2268 * If we get a HPD pulse event and we were and still are connected,
2269 * check the link status. If link status is ok, there's nothing to do
2270 * as we don't handle DP interrupts. If link status is bad, continue
2271 * with full link setup.
2273 if (hpd_pulse && old_plugged == mhdp->plugged) {
2274 ret = drm_dp_dpcd_read_link_status(&mhdp->aux, status);
2277 * If everything looks fine, just return, as we don't handle
2281 drm_dp_channel_eq_ok(status, mhdp->link.num_lanes) &&
2282 drm_dp_clock_recovery_ok(status, mhdp->link.num_lanes))
2285 /* If link is bad, mark link as down so that we do a new LT */
2286 mhdp->link_up = false;
2289 if (!mhdp->link_up) {
2290 ret = cdns_mhdp_link_up(mhdp);
2295 if (mhdp->bridge_enabled) {
2296 state = drm_priv_to_bridge_state(mhdp->bridge.base.state);
2302 cdns_bridge_state = to_cdns_mhdp_bridge_state(state);
2303 if (!cdns_bridge_state) {
2308 current_mode = cdns_bridge_state->current_mode;
2309 if (!current_mode) {
2314 if (!cdns_mhdp_bandwidth_ok(mhdp, current_mode, mhdp->link.num_lanes,
2320 dev_dbg(mhdp->dev, "%s: Enabling mode %s\n", __func__,
2321 current_mode->name);
2323 cdns_mhdp_sst_enable(mhdp, current_mode);
2326 mutex_unlock(&mhdp->link_mutex);
2330 static void cdns_mhdp_modeset_retry_fn(struct work_struct *work)
2332 struct cdns_mhdp_device *mhdp;
2333 struct drm_connector *conn;
2335 mhdp = container_of(work, typeof(*mhdp), modeset_retry_work);
2337 conn = &mhdp->connector;
2339 /* Grab the locks before changing connector property */
2340 mutex_lock(&conn->dev->mode_config.mutex);
2343 * Set connector link status to BAD and send a Uevent to notify
2344 * userspace to do a modeset.
2346 drm_connector_set_link_status_property(conn, DRM_MODE_LINK_STATUS_BAD);
2347 mutex_unlock(&conn->dev->mode_config.mutex);
2349 /* Send Hotplug uevent so userspace can reprobe */
2350 drm_kms_helper_hotplug_event(mhdp->bridge.dev);
2353 static irqreturn_t cdns_mhdp_irq_handler(int irq, void *data)
2355 struct cdns_mhdp_device *mhdp = data;
2356 u32 apb_stat, sw_ev0;
2357 bool bridge_attached;
2359 apb_stat = readl(mhdp->regs + CDNS_APB_INT_STATUS);
2360 if (!(apb_stat & CDNS_APB_INT_MASK_SW_EVENT_INT))
2363 sw_ev0 = readl(mhdp->regs + CDNS_SW_EVENT0);
2366 * Calling drm_kms_helper_hotplug_event() when not attached
2367 * to drm device causes an oops because the drm_bridge->dev
2368 * is NULL. See cdns_mhdp_fw_cb() comments for details about the
2369 * problems related drm_kms_helper_hotplug_event() call.
2371 spin_lock(&mhdp->start_lock);
2372 bridge_attached = mhdp->bridge_attached;
2373 spin_unlock(&mhdp->start_lock);
2375 if (bridge_attached && (sw_ev0 & CDNS_DPTX_HPD)) {
2376 schedule_work(&mhdp->hpd_work);
2379 if (sw_ev0 & ~CDNS_DPTX_HPD) {
2380 mhdp->sw_events |= (sw_ev0 & ~CDNS_DPTX_HPD);
2381 wake_up(&mhdp->sw_events_wq);
2387 u32 cdns_mhdp_wait_for_sw_event(struct cdns_mhdp_device *mhdp, u32 event)
2391 ret = wait_event_timeout(mhdp->sw_events_wq,
2392 mhdp->sw_events & event,
2393 msecs_to_jiffies(500));
2395 dev_dbg(mhdp->dev, "SW event 0x%x timeout\n", event);
2399 ret = mhdp->sw_events;
2400 mhdp->sw_events &= ~event;
2406 static void cdns_mhdp_hpd_work(struct work_struct *work)
2408 struct cdns_mhdp_device *mhdp = container_of(work,
2409 struct cdns_mhdp_device,
2413 ret = cdns_mhdp_update_link_status(mhdp);
2414 if (mhdp->connector.dev) {
2416 schedule_work(&mhdp->modeset_retry_work);
2418 drm_kms_helper_hotplug_event(mhdp->bridge.dev);
2420 drm_bridge_hpd_notify(&mhdp->bridge, cdns_mhdp_detect(mhdp));
2424 static int cdns_mhdp_probe(struct platform_device *pdev)
2426 struct device *dev = &pdev->dev;
2427 struct cdns_mhdp_device *mhdp;
2433 mhdp = devm_kzalloc(dev, sizeof(*mhdp), GFP_KERNEL);
2437 clk = devm_clk_get(dev, NULL);
2439 dev_err(dev, "couldn't get clk: %ld\n", PTR_ERR(clk));
2440 return PTR_ERR(clk);
2445 mutex_init(&mhdp->mbox_mutex);
2446 mutex_init(&mhdp->link_mutex);
2447 spin_lock_init(&mhdp->start_lock);
2449 drm_dp_aux_init(&mhdp->aux);
2450 mhdp->aux.dev = dev;
2451 mhdp->aux.transfer = cdns_mhdp_transfer;
2453 mhdp->regs = devm_platform_ioremap_resource(pdev, 0);
2454 if (IS_ERR(mhdp->regs)) {
2455 dev_err(dev, "Failed to get memory resource\n");
2456 return PTR_ERR(mhdp->regs);
2459 mhdp->sapb_regs = devm_platform_ioremap_resource_byname(pdev, "mhdptx-sapb");
2460 if (IS_ERR(mhdp->sapb_regs)) {
2461 mhdp->hdcp_supported = false;
2463 "Failed to get SAPB memory resource, HDCP not supported\n");
2465 mhdp->hdcp_supported = true;
2468 mhdp->phy = devm_of_phy_get_by_index(dev, pdev->dev.of_node, 0);
2469 if (IS_ERR(mhdp->phy)) {
2470 dev_err(dev, "no PHY configured\n");
2471 return PTR_ERR(mhdp->phy);
2474 platform_set_drvdata(pdev, mhdp);
2476 mhdp->info = of_device_get_match_data(dev);
2478 clk_prepare_enable(clk);
2480 pm_runtime_enable(dev);
2481 ret = pm_runtime_get_sync(dev);
2483 dev_err(dev, "pm_runtime_get_sync failed\n");
2484 pm_runtime_disable(dev);
2488 if (mhdp->info && mhdp->info->ops && mhdp->info->ops->init) {
2489 ret = mhdp->info->ops->init(mhdp);
2491 dev_err(dev, "MHDP platform initialization failed: %d\n",
2497 rate = clk_get_rate(clk);
2498 writel(rate % 1000000, mhdp->regs + CDNS_SW_CLK_L);
2499 writel(rate / 1000000, mhdp->regs + CDNS_SW_CLK_H);
2501 dev_dbg(dev, "func clk rate %lu Hz\n", rate);
2503 writel(~0, mhdp->regs + CDNS_APB_INT_MASK);
2505 irq = platform_get_irq(pdev, 0);
2506 ret = devm_request_threaded_irq(mhdp->dev, irq, NULL,
2507 cdns_mhdp_irq_handler, IRQF_ONESHOT,
2510 dev_err(dev, "cannot install IRQ %d\n", irq);
2515 cdns_mhdp_fill_host_caps(mhdp);
2517 /* Initialize link rate and num of lanes to host values */
2518 mhdp->link.rate = mhdp->host.link_rate;
2519 mhdp->link.num_lanes = mhdp->host.lanes_cnt;
2521 /* The only currently supported format */
2522 mhdp->display_fmt.y_only = false;
2523 mhdp->display_fmt.color_format = DRM_COLOR_FORMAT_RGB444;
2524 mhdp->display_fmt.bpc = 8;
2526 mhdp->bridge.of_node = pdev->dev.of_node;
2527 mhdp->bridge.funcs = &cdns_mhdp_bridge_funcs;
2528 mhdp->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
2530 mhdp->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
2532 mhdp->bridge.timings = mhdp->info->timings;
2534 ret = phy_init(mhdp->phy);
2536 dev_err(mhdp->dev, "Failed to initialize PHY: %d\n", ret);
2540 /* Initialize the work for modeset in case of link train failure */
2541 INIT_WORK(&mhdp->modeset_retry_work, cdns_mhdp_modeset_retry_fn);
2542 INIT_WORK(&mhdp->hpd_work, cdns_mhdp_hpd_work);
2544 init_waitqueue_head(&mhdp->fw_load_wq);
2545 init_waitqueue_head(&mhdp->sw_events_wq);
2547 ret = cdns_mhdp_load_firmware(mhdp);
2551 if (mhdp->hdcp_supported)
2552 cdns_mhdp_hdcp_init(mhdp);
2554 drm_bridge_add(&mhdp->bridge);
2559 phy_exit(mhdp->phy);
2561 if (mhdp->info && mhdp->info->ops && mhdp->info->ops->exit)
2562 mhdp->info->ops->exit(mhdp);
2564 pm_runtime_put_sync(dev);
2565 pm_runtime_disable(dev);
2567 clk_disable_unprepare(mhdp->clk);
2572 static int cdns_mhdp_remove(struct platform_device *pdev)
2574 struct cdns_mhdp_device *mhdp = platform_get_drvdata(pdev);
2575 unsigned long timeout = msecs_to_jiffies(100);
2576 bool stop_fw = false;
2579 drm_bridge_remove(&mhdp->bridge);
2581 ret = wait_event_timeout(mhdp->fw_load_wq,
2582 mhdp->hw_state == MHDP_HW_READY,
2585 dev_err(mhdp->dev, "%s: Timeout waiting for fw loading\n",
2590 spin_lock(&mhdp->start_lock);
2591 mhdp->hw_state = MHDP_HW_STOPPED;
2592 spin_unlock(&mhdp->start_lock);
2595 ret = cdns_mhdp_set_firmware_active(mhdp, false);
2597 phy_exit(mhdp->phy);
2599 if (mhdp->info && mhdp->info->ops && mhdp->info->ops->exit)
2600 mhdp->info->ops->exit(mhdp);
2602 pm_runtime_put_sync(&pdev->dev);
2603 pm_runtime_disable(&pdev->dev);
2605 cancel_work_sync(&mhdp->modeset_retry_work);
2606 flush_scheduled_work();
2608 clk_disable_unprepare(mhdp->clk);
2613 static const struct of_device_id mhdp_ids[] = {
2614 { .compatible = "cdns,mhdp8546", },
2615 #ifdef CONFIG_DRM_CDNS_MHDP8546_J721E
2616 { .compatible = "ti,j721e-mhdp8546",
2617 .data = &(const struct cdns_mhdp_platform_info) {
2618 .timings = &mhdp_ti_j721e_bridge_timings,
2619 .ops = &mhdp_ti_j721e_ops,
2625 MODULE_DEVICE_TABLE(of, mhdp_ids);
2627 static struct platform_driver mhdp_driver = {
2629 .name = "cdns-mhdp8546",
2630 .of_match_table = of_match_ptr(mhdp_ids),
2632 .probe = cdns_mhdp_probe,
2633 .remove = cdns_mhdp_remove,
2635 module_platform_driver(mhdp_driver);
2637 MODULE_FIRMWARE(FW_NAME);
2639 MODULE_AUTHOR("Quentin Schulz <quentin.schulz@free-electrons.com>");
2640 MODULE_AUTHOR("Swapnil Jakhade <sjakhade@cadence.com>");
2641 MODULE_AUTHOR("Yuti Amonkar <yamonkar@cadence.com>");
2642 MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
2643 MODULE_AUTHOR("Jyri Sarha <jsarha@ti.com>");
2644 MODULE_DESCRIPTION("Cadence MHDP8546 DP bridge driver");
2645 MODULE_LICENSE("GPL");
2646 MODULE_ALIAS("platform:cdns-mhdp8546");