2 * Copyright © 2014 Red Hat
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that copyright
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
23 #include <linux/delay.h>
24 #include <linux/errno.h>
25 #include <linux/i2c.h>
26 #include <linux/init.h>
27 #include <linux/kernel.h>
28 #include <linux/sched.h>
29 #include <linux/seq_file.h>
30 #include <linux/iopoll.h>
32 #include <drm/drm_atomic.h>
33 #include <drm/drm_atomic_helper.h>
34 #include <drm/drm_dp_mst_helper.h>
35 #include <drm/drm_drv.h>
36 #include <drm/drm_fixed.h>
37 #include <drm/drm_print.h>
38 #include <drm/drm_probe_helper.h>
40 #include "drm_crtc_helper_internal.h"
45 * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
46 * protocol. The helpers contain a topology manager and bandwidth manager.
47 * The helpers encapsulate the sending and received of sideband msgs.
49 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
51 static int test_calc_pbn_mode(void);
53 static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
55 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
57 struct drm_dp_payload *payload);
59 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
60 struct drm_dp_mst_port *port,
61 int offset, int size, u8 *bytes);
62 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
63 struct drm_dp_mst_port *port,
64 int offset, int size, u8 *bytes);
66 static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
67 struct drm_dp_mst_branch *mstb);
68 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
69 struct drm_dp_mst_branch *mstb,
70 struct drm_dp_mst_port *port);
71 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
74 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
75 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
76 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
78 #define DP_STR(x) [DP_ ## x] = #x
80 static const char *drm_dp_mst_req_type_str(u8 req_type)
82 static const char * const req_type_str[] = {
83 DP_STR(GET_MSG_TRANSACTION_VERSION),
85 DP_STR(CONNECTION_STATUS_NOTIFY),
86 DP_STR(ENUM_PATH_RESOURCES),
87 DP_STR(ALLOCATE_PAYLOAD),
88 DP_STR(QUERY_PAYLOAD),
89 DP_STR(RESOURCE_STATUS_NOTIFY),
90 DP_STR(CLEAR_PAYLOAD_ID_TABLE),
91 DP_STR(REMOTE_DPCD_READ),
92 DP_STR(REMOTE_DPCD_WRITE),
93 DP_STR(REMOTE_I2C_READ),
94 DP_STR(REMOTE_I2C_WRITE),
96 DP_STR(POWER_DOWN_PHY),
97 DP_STR(SINK_EVENT_NOTIFY),
98 DP_STR(QUERY_STREAM_ENC_STATUS),
101 if (req_type >= ARRAY_SIZE(req_type_str) ||
102 !req_type_str[req_type])
105 return req_type_str[req_type];
109 #define DP_STR(x) [DP_NAK_ ## x] = #x
111 static const char *drm_dp_mst_nak_reason_str(u8 nak_reason)
113 static const char * const nak_reason_str[] = {
114 DP_STR(WRITE_FAILURE),
115 DP_STR(INVALID_READ),
119 DP_STR(LINK_FAILURE),
120 DP_STR(NO_RESOURCES),
123 DP_STR(ALLOCATE_FAIL),
126 if (nak_reason >= ARRAY_SIZE(nak_reason_str) ||
127 !nak_reason_str[nak_reason])
130 return nak_reason_str[nak_reason];
135 /* sideband msg handling */
136 static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
141 int number_of_bits = num_nibbles * 4;
144 while (number_of_bits != 0) {
147 remainder |= (data[array_index] & bitmask) >> bitshift;
155 if ((remainder & 0x10) == 0x10)
160 while (number_of_bits != 0) {
163 if ((remainder & 0x10) != 0)
170 static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
175 int number_of_bits = number_of_bytes * 8;
178 while (number_of_bits != 0) {
181 remainder |= (data[array_index] & bitmask) >> bitshift;
189 if ((remainder & 0x100) == 0x100)
194 while (number_of_bits != 0) {
197 if ((remainder & 0x100) != 0)
201 return remainder & 0xff;
203 static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
206 size += (hdr->lct / 2);
210 static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
216 buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
217 for (i = 0; i < (hdr->lct / 2); i++)
218 buf[idx++] = hdr->rad[i];
219 buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
220 (hdr->msg_len & 0x3f);
221 buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
223 crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
224 buf[idx - 1] |= (crc4 & 0xf);
229 static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
230 u8 *buf, int buflen, u8 *hdrlen)
239 len += ((buf[0] & 0xf0) >> 4) / 2;
242 crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
244 if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
245 DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
249 hdr->lct = (buf[0] & 0xf0) >> 4;
250 hdr->lcr = (buf[0] & 0xf);
252 for (i = 0; i < (hdr->lct / 2); i++)
253 hdr->rad[i] = buf[idx++];
254 hdr->broadcast = (buf[idx] >> 7) & 0x1;
255 hdr->path_msg = (buf[idx] >> 6) & 0x1;
256 hdr->msg_len = buf[idx] & 0x3f;
258 hdr->somt = (buf[idx] >> 7) & 0x1;
259 hdr->eomt = (buf[idx] >> 6) & 0x1;
260 hdr->seqno = (buf[idx] >> 4) & 0x1;
266 static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
267 struct drm_dp_sideband_msg_tx *raw)
272 buf[idx++] = req->req_type & 0x7f;
274 switch (req->req_type) {
275 case DP_ENUM_PATH_RESOURCES:
276 buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
279 case DP_ALLOCATE_PAYLOAD:
280 buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
281 (req->u.allocate_payload.number_sdp_streams & 0xf);
283 buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
285 buf[idx] = (req->u.allocate_payload.pbn >> 8);
287 buf[idx] = (req->u.allocate_payload.pbn & 0xff);
289 for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
290 buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
291 (req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
294 if (req->u.allocate_payload.number_sdp_streams & 1) {
295 i = req->u.allocate_payload.number_sdp_streams - 1;
296 buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
300 case DP_QUERY_PAYLOAD:
301 buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
303 buf[idx] = (req->u.query_payload.vcpi & 0x7f);
306 case DP_REMOTE_DPCD_READ:
307 buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
308 buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
310 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
312 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
314 buf[idx] = (req->u.dpcd_read.num_bytes);
318 case DP_REMOTE_DPCD_WRITE:
319 buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
320 buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
322 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
324 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
326 buf[idx] = (req->u.dpcd_write.num_bytes);
328 memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
329 idx += req->u.dpcd_write.num_bytes;
331 case DP_REMOTE_I2C_READ:
332 buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
333 buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
335 for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
336 buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
338 buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
340 memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
341 idx += req->u.i2c_read.transactions[i].num_bytes;
343 buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 4;
344 buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
347 buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
349 buf[idx] = (req->u.i2c_read.num_bytes_read);
353 case DP_REMOTE_I2C_WRITE:
354 buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
356 buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
358 buf[idx] = (req->u.i2c_write.num_bytes);
360 memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
361 idx += req->u.i2c_write.num_bytes;
364 case DP_POWER_DOWN_PHY:
365 case DP_POWER_UP_PHY:
366 buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
373 static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
376 crc4 = drm_dp_msg_data_crc4(msg, len);
380 static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
381 struct drm_dp_sideband_msg_tx *raw)
386 buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
391 /* this adds a chunk of msg to the builder to get the final msg */
392 static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
393 u8 *replybuf, u8 replybuflen, bool hdr)
400 struct drm_dp_sideband_msg_hdr recv_hdr;
401 ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
403 print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
408 * ignore out-of-order messages or messages that are part of a
411 if (!recv_hdr.somt && !msg->have_somt)
414 /* get length contained in this portion */
415 msg->curchunk_len = recv_hdr.msg_len;
416 msg->curchunk_hdrlen = hdrlen;
418 /* we have already gotten an somt - don't bother parsing */
419 if (recv_hdr.somt && msg->have_somt)
423 memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
424 msg->have_somt = true;
427 msg->have_eomt = true;
429 /* copy the bytes for the remainder of this header chunk */
430 msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
431 memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
433 memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
434 msg->curchunk_idx += replybuflen;
437 if (msg->curchunk_idx >= msg->curchunk_len) {
439 crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
440 /* copy chunk into bigger msg */
441 memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
442 msg->curlen += msg->curchunk_len - 1;
447 static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
448 struct drm_dp_sideband_msg_reply_body *repmsg)
452 memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
454 repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
456 if (idx > raw->curlen)
458 for (i = 0; i < repmsg->u.link_addr.nports; i++) {
459 if (raw->msg[idx] & 0x80)
460 repmsg->u.link_addr.ports[i].input_port = 1;
462 repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
463 repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
466 if (idx > raw->curlen)
468 repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
469 repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
470 if (repmsg->u.link_addr.ports[i].input_port == 0)
471 repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
473 if (idx > raw->curlen)
475 if (repmsg->u.link_addr.ports[i].input_port == 0) {
476 repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
478 if (idx > raw->curlen)
480 memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
482 if (idx > raw->curlen)
484 repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
485 repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
489 if (idx > raw->curlen)
495 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
499 static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
500 struct drm_dp_sideband_msg_reply_body *repmsg)
503 repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
505 if (idx > raw->curlen)
507 repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
509 if (idx > raw->curlen)
512 memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
515 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
519 static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
520 struct drm_dp_sideband_msg_reply_body *repmsg)
523 repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
525 if (idx > raw->curlen)
529 DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
533 static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
534 struct drm_dp_sideband_msg_reply_body *repmsg)
538 repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
540 if (idx > raw->curlen)
542 repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
545 memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
548 DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
552 static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
553 struct drm_dp_sideband_msg_reply_body *repmsg)
556 repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
558 if (idx > raw->curlen)
560 repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
562 if (idx > raw->curlen)
564 repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
566 if (idx > raw->curlen)
570 DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
574 static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
575 struct drm_dp_sideband_msg_reply_body *repmsg)
578 repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
580 if (idx > raw->curlen)
582 repmsg->u.allocate_payload.vcpi = raw->msg[idx];
584 if (idx > raw->curlen)
586 repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
588 if (idx > raw->curlen)
592 DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
596 static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
597 struct drm_dp_sideband_msg_reply_body *repmsg)
600 repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
602 if (idx > raw->curlen)
604 repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
606 if (idx > raw->curlen)
610 DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
614 static bool drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_msg_rx *raw,
615 struct drm_dp_sideband_msg_reply_body *repmsg)
619 repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & 0xf;
621 if (idx > raw->curlen) {
622 DRM_DEBUG_KMS("power up/down phy parse length fail %d %d\n",
629 static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
630 struct drm_dp_sideband_msg_reply_body *msg)
632 memset(msg, 0, sizeof(*msg));
633 msg->reply_type = (raw->msg[0] & 0x80) >> 7;
634 msg->req_type = (raw->msg[0] & 0x7f);
636 if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) {
637 memcpy(msg->u.nak.guid, &raw->msg[1], 16);
638 msg->u.nak.reason = raw->msg[17];
639 msg->u.nak.nak_data = raw->msg[18];
643 switch (msg->req_type) {
644 case DP_LINK_ADDRESS:
645 return drm_dp_sideband_parse_link_address(raw, msg);
646 case DP_QUERY_PAYLOAD:
647 return drm_dp_sideband_parse_query_payload_ack(raw, msg);
648 case DP_REMOTE_DPCD_READ:
649 return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
650 case DP_REMOTE_DPCD_WRITE:
651 return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
652 case DP_REMOTE_I2C_READ:
653 return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
654 case DP_ENUM_PATH_RESOURCES:
655 return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
656 case DP_ALLOCATE_PAYLOAD:
657 return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
658 case DP_POWER_DOWN_PHY:
659 case DP_POWER_UP_PHY:
660 return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
662 DRM_ERROR("Got unknown reply 0x%02x (%s)\n", msg->req_type,
663 drm_dp_mst_req_type_str(msg->req_type));
668 static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
669 struct drm_dp_sideband_msg_req_body *msg)
673 msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
675 if (idx > raw->curlen)
678 memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
680 if (idx > raw->curlen)
683 msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
684 msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
685 msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
686 msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
687 msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
691 DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
695 static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
696 struct drm_dp_sideband_msg_req_body *msg)
700 msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
702 if (idx > raw->curlen)
705 memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
707 if (idx > raw->curlen)
710 msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
714 DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
718 static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
719 struct drm_dp_sideband_msg_req_body *msg)
721 memset(msg, 0, sizeof(*msg));
722 msg->req_type = (raw->msg[0] & 0x7f);
724 switch (msg->req_type) {
725 case DP_CONNECTION_STATUS_NOTIFY:
726 return drm_dp_sideband_parse_connection_status_notify(raw, msg);
727 case DP_RESOURCE_STATUS_NOTIFY:
728 return drm_dp_sideband_parse_resource_status_notify(raw, msg);
730 DRM_ERROR("Got unknown request 0x%02x (%s)\n", msg->req_type,
731 drm_dp_mst_req_type_str(msg->req_type));
736 static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
738 struct drm_dp_sideband_msg_req_body req;
740 req.req_type = DP_REMOTE_DPCD_WRITE;
741 req.u.dpcd_write.port_number = port_num;
742 req.u.dpcd_write.dpcd_address = offset;
743 req.u.dpcd_write.num_bytes = num_bytes;
744 req.u.dpcd_write.bytes = bytes;
745 drm_dp_encode_sideband_req(&req, msg);
750 static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
752 struct drm_dp_sideband_msg_req_body req;
754 req.req_type = DP_LINK_ADDRESS;
755 drm_dp_encode_sideband_req(&req, msg);
759 static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
761 struct drm_dp_sideband_msg_req_body req;
763 req.req_type = DP_ENUM_PATH_RESOURCES;
764 req.u.port_num.port_number = port_num;
765 drm_dp_encode_sideband_req(&req, msg);
766 msg->path_msg = true;
770 static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
771 u8 vcpi, uint16_t pbn,
772 u8 number_sdp_streams,
775 struct drm_dp_sideband_msg_req_body req;
776 memset(&req, 0, sizeof(req));
777 req.req_type = DP_ALLOCATE_PAYLOAD;
778 req.u.allocate_payload.port_number = port_num;
779 req.u.allocate_payload.vcpi = vcpi;
780 req.u.allocate_payload.pbn = pbn;
781 req.u.allocate_payload.number_sdp_streams = number_sdp_streams;
782 memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink,
784 drm_dp_encode_sideband_req(&req, msg);
785 msg->path_msg = true;
789 static int build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
790 int port_num, bool power_up)
792 struct drm_dp_sideband_msg_req_body req;
795 req.req_type = DP_POWER_UP_PHY;
797 req.req_type = DP_POWER_DOWN_PHY;
799 req.u.port_num.port_number = port_num;
800 drm_dp_encode_sideband_req(&req, msg);
801 msg->path_msg = true;
805 static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
806 struct drm_dp_vcpi *vcpi)
810 mutex_lock(&mgr->payload_lock);
811 ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
812 if (ret > mgr->max_payloads) {
814 DRM_DEBUG_KMS("out of payload ids %d\n", ret);
818 vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
819 if (vcpi_ret > mgr->max_payloads) {
821 DRM_DEBUG_KMS("out of vcpi ids %d\n", ret);
825 set_bit(ret, &mgr->payload_mask);
826 set_bit(vcpi_ret, &mgr->vcpi_mask);
827 vcpi->vcpi = vcpi_ret + 1;
828 mgr->proposed_vcpis[ret - 1] = vcpi;
830 mutex_unlock(&mgr->payload_lock);
834 static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
841 mutex_lock(&mgr->payload_lock);
842 DRM_DEBUG_KMS("putting payload %d\n", vcpi);
843 clear_bit(vcpi - 1, &mgr->vcpi_mask);
845 for (i = 0; i < mgr->max_payloads; i++) {
846 if (mgr->proposed_vcpis[i])
847 if (mgr->proposed_vcpis[i]->vcpi == vcpi) {
848 mgr->proposed_vcpis[i] = NULL;
849 clear_bit(i + 1, &mgr->payload_mask);
852 mutex_unlock(&mgr->payload_lock);
855 static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
856 struct drm_dp_sideband_msg_tx *txmsg)
861 * All updates to txmsg->state are protected by mgr->qlock, and the two
862 * cases we check here are terminal states. For those the barriers
863 * provided by the wake_up/wait_event pair are enough.
865 state = READ_ONCE(txmsg->state);
866 return (state == DRM_DP_SIDEBAND_TX_RX ||
867 state == DRM_DP_SIDEBAND_TX_TIMEOUT);
870 static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
871 struct drm_dp_sideband_msg_tx *txmsg)
873 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
876 ret = wait_event_timeout(mgr->tx_waitq,
877 check_txmsg_state(mgr, txmsg),
879 mutex_lock(&mstb->mgr->qlock);
881 if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
886 DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
888 /* dump some state */
892 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
893 txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
894 list_del(&txmsg->next);
897 if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
898 txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
899 mstb->tx_slots[txmsg->seqno] = NULL;
903 mutex_unlock(&mgr->qlock);
908 static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
910 struct drm_dp_mst_branch *mstb;
912 mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
918 memcpy(mstb->rad, rad, lct / 2);
919 INIT_LIST_HEAD(&mstb->ports);
920 kref_init(&mstb->topology_kref);
921 kref_init(&mstb->malloc_kref);
925 static void drm_dp_free_mst_branch_device(struct kref *kref)
927 struct drm_dp_mst_branch *mstb =
928 container_of(kref, struct drm_dp_mst_branch, malloc_kref);
930 if (mstb->port_parent)
931 drm_dp_mst_put_port_malloc(mstb->port_parent);
937 * DOC: Branch device and port refcounting
939 * Topology refcount overview
940 * ~~~~~~~~~~~~~~~~~~~~~~~~~~
942 * The refcounting schemes for &struct drm_dp_mst_branch and &struct
943 * drm_dp_mst_port are somewhat unusual. Both ports and branch devices have
944 * two different kinds of refcounts: topology refcounts, and malloc refcounts.
946 * Topology refcounts are not exposed to drivers, and are handled internally
947 * by the DP MST helpers. The helpers use them in order to prevent the
948 * in-memory topology state from being changed in the middle of critical
949 * operations like changing the internal state of payload allocations. This
950 * means each branch and port will be considered to be connected to the rest
951 * of the topology until its topology refcount reaches zero. Additionally,
952 * for ports this means that their associated &struct drm_connector will stay
953 * registered with userspace until the port's refcount reaches 0.
955 * Malloc refcount overview
956 * ~~~~~~~~~~~~~~~~~~~~~~~~
958 * Malloc references are used to keep a &struct drm_dp_mst_port or &struct
959 * drm_dp_mst_branch allocated even after all of its topology references have
960 * been dropped, so that the driver or MST helpers can safely access each
961 * branch's last known state before it was disconnected from the topology.
962 * When the malloc refcount of a port or branch reaches 0, the memory
963 * allocation containing the &struct drm_dp_mst_branch or &struct
964 * drm_dp_mst_port respectively will be freed.
966 * For &struct drm_dp_mst_branch, malloc refcounts are not currently exposed
967 * to drivers. As of writing this documentation, there are no drivers that
968 * have a usecase for accessing &struct drm_dp_mst_branch outside of the MST
969 * helpers. Exposing this API to drivers in a race-free manner would take more
970 * tweaking of the refcounting scheme, however patches are welcome provided
971 * there is a legitimate driver usecase for this.
973 * Refcount relationships in a topology
974 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
976 * Let's take a look at why the relationship between topology and malloc
977 * refcounts is designed the way it is.
979 * .. kernel-figure:: dp-mst/topology-figure-1.dot
981 * An example of topology and malloc refs in a DP MST topology with two
982 * active payloads. Topology refcount increments are indicated by solid
983 * lines, and malloc refcount increments are indicated by dashed lines.
984 * Each starts from the branch which incremented the refcount, and ends at
985 * the branch to which the refcount belongs to, i.e. the arrow points the
986 * same way as the C pointers used to reference a structure.
988 * As you can see in the above figure, every branch increments the topology
989 * refcount of its children, and increments the malloc refcount of its
990 * parent. Additionally, every payload increments the malloc refcount of its
991 * assigned port by 1.
993 * So, what would happen if MSTB #3 from the above figure was unplugged from
994 * the system, but the driver hadn't yet removed payload #2 from port #3? The
995 * topology would start to look like the figure below.
997 * .. kernel-figure:: dp-mst/topology-figure-2.dot
999 * Ports and branch devices which have been released from memory are
1000 * colored grey, and references which have been removed are colored red.
1002 * Whenever a port or branch device's topology refcount reaches zero, it will
1003 * decrement the topology refcounts of all its children, the malloc refcount
1004 * of its parent, and finally its own malloc refcount. For MSTB #4 and port
1005 * #4, this means they both have been disconnected from the topology and freed
1006 * from memory. But, because payload #2 is still holding a reference to port
1007 * #3, port #3 is removed from the topology but its &struct drm_dp_mst_port
1008 * is still accessible from memory. This also means port #3 has not yet
1009 * decremented the malloc refcount of MSTB #3, so its &struct
1010 * drm_dp_mst_branch will also stay allocated in memory until port #3's
1011 * malloc refcount reaches 0.
1013 * This relationship is necessary because in order to release payload #2, we
1014 * need to be able to figure out the last relative of port #3 that's still
1015 * connected to the topology. In this case, we would travel up the topology as
1018 * .. kernel-figure:: dp-mst/topology-figure-3.dot
1020 * And finally, remove payload #2 by communicating with port #2 through
1021 * sideband transactions.
1025 * drm_dp_mst_get_mstb_malloc() - Increment the malloc refcount of a branch
1027 * @mstb: The &struct drm_dp_mst_branch to increment the malloc refcount of
1029 * Increments &drm_dp_mst_branch.malloc_kref. When
1030 * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
1031 * will be released and @mstb may no longer be used.
1033 * See also: drm_dp_mst_put_mstb_malloc()
1036 drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb)
1038 kref_get(&mstb->malloc_kref);
1039 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref));
1043 * drm_dp_mst_put_mstb_malloc() - Decrement the malloc refcount of a branch
1045 * @mstb: The &struct drm_dp_mst_branch to decrement the malloc refcount of
1047 * Decrements &drm_dp_mst_branch.malloc_kref. When
1048 * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
1049 * will be released and @mstb may no longer be used.
1051 * See also: drm_dp_mst_get_mstb_malloc()
1054 drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb)
1056 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1);
1057 kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device);
1060 static void drm_dp_free_mst_port(struct kref *kref)
1062 struct drm_dp_mst_port *port =
1063 container_of(kref, struct drm_dp_mst_port, malloc_kref);
1065 drm_dp_mst_put_mstb_malloc(port->parent);
1070 * drm_dp_mst_get_port_malloc() - Increment the malloc refcount of an MST port
1071 * @port: The &struct drm_dp_mst_port to increment the malloc refcount of
1073 * Increments &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
1074 * reaches 0, the memory allocation for @port will be released and @port may
1075 * no longer be used.
1077 * Because @port could potentially be freed at any time by the DP MST helpers
1078 * if &drm_dp_mst_port.malloc_kref reaches 0, including during a call to this
1079 * function, drivers that which to make use of &struct drm_dp_mst_port should
1080 * ensure that they grab at least one main malloc reference to their MST ports
1081 * in &drm_dp_mst_topology_cbs.add_connector. This callback is called before
1082 * there is any chance for &drm_dp_mst_port.malloc_kref to reach 0.
1084 * See also: drm_dp_mst_put_port_malloc()
1087 drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port)
1089 kref_get(&port->malloc_kref);
1090 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref));
1092 EXPORT_SYMBOL(drm_dp_mst_get_port_malloc);
1095 * drm_dp_mst_put_port_malloc() - Decrement the malloc refcount of an MST port
1096 * @port: The &struct drm_dp_mst_port to decrement the malloc refcount of
1098 * Decrements &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
1099 * reaches 0, the memory allocation for @port will be released and @port may
1100 * no longer be used.
1102 * See also: drm_dp_mst_get_port_malloc()
1105 drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)
1107 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1);
1108 kref_put(&port->malloc_kref, drm_dp_free_mst_port);
1110 EXPORT_SYMBOL(drm_dp_mst_put_port_malloc);
1112 static void drm_dp_destroy_mst_branch_device(struct kref *kref)
1114 struct drm_dp_mst_branch *mstb =
1115 container_of(kref, struct drm_dp_mst_branch, topology_kref);
1116 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1117 struct drm_dp_mst_port *port, *tmp;
1118 bool wake_tx = false;
1120 mutex_lock(&mgr->lock);
1121 list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
1122 list_del(&port->next);
1123 drm_dp_mst_topology_put_port(port);
1125 mutex_unlock(&mgr->lock);
1127 /* drop any tx slots msg */
1128 mutex_lock(&mstb->mgr->qlock);
1129 if (mstb->tx_slots[0]) {
1130 mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
1131 mstb->tx_slots[0] = NULL;
1134 if (mstb->tx_slots[1]) {
1135 mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
1136 mstb->tx_slots[1] = NULL;
1139 mutex_unlock(&mstb->mgr->qlock);
1142 wake_up_all(&mstb->mgr->tx_waitq);
1144 drm_dp_mst_put_mstb_malloc(mstb);
1148 * drm_dp_mst_topology_try_get_mstb() - Increment the topology refcount of a
1149 * branch device unless it's zero
1150 * @mstb: &struct drm_dp_mst_branch to increment the topology refcount of
1152 * Attempts to grab a topology reference to @mstb, if it hasn't yet been
1153 * removed from the topology (e.g. &drm_dp_mst_branch.topology_kref has
1154 * reached 0). Holding a topology reference implies that a malloc reference
1155 * will be held to @mstb as long as the user holds the topology reference.
1157 * Care should be taken to ensure that the user has at least one malloc
1158 * reference to @mstb. If you already have a topology reference to @mstb, you
1159 * should use drm_dp_mst_topology_get_mstb() instead.
1162 * drm_dp_mst_topology_get_mstb()
1163 * drm_dp_mst_topology_put_mstb()
1166 * * 1: A topology reference was grabbed successfully
1167 * * 0: @port is no longer in the topology, no reference was grabbed
1169 static int __must_check
1170 drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
1172 int ret = kref_get_unless_zero(&mstb->topology_kref);
1175 DRM_DEBUG("mstb %p (%d)\n", mstb,
1176 kref_read(&mstb->topology_kref));
1182 * drm_dp_mst_topology_get_mstb() - Increment the topology refcount of a
1184 * @mstb: The &struct drm_dp_mst_branch to increment the topology refcount of
1186 * Increments &drm_dp_mst_branch.topology_refcount without checking whether or
1187 * not it's already reached 0. This is only valid to use in scenarios where
1188 * you are already guaranteed to have at least one active topology reference
1189 * to @mstb. Otherwise, drm_dp_mst_topology_try_get_mstb() must be used.
1192 * drm_dp_mst_topology_try_get_mstb()
1193 * drm_dp_mst_topology_put_mstb()
1195 static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
1197 WARN_ON(kref_read(&mstb->topology_kref) == 0);
1198 kref_get(&mstb->topology_kref);
1199 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
1203 * drm_dp_mst_topology_put_mstb() - release a topology reference to a branch
1205 * @mstb: The &struct drm_dp_mst_branch to release the topology reference from
1207 * Releases a topology reference from @mstb by decrementing
1208 * &drm_dp_mst_branch.topology_kref.
1211 * drm_dp_mst_topology_try_get_mstb()
1212 * drm_dp_mst_topology_get_mstb()
1215 drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)
1217 DRM_DEBUG("mstb %p (%d)\n",
1218 mstb, kref_read(&mstb->topology_kref) - 1);
1219 kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
1222 static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
1224 struct drm_dp_mst_branch *mstb;
1227 case DP_PEER_DEVICE_DP_LEGACY_CONV:
1228 case DP_PEER_DEVICE_SST_SINK:
1229 /* remove i2c over sideband */
1230 drm_dp_mst_unregister_i2c_bus(&port->aux);
1232 case DP_PEER_DEVICE_MST_BRANCHING:
1235 drm_dp_mst_topology_put_mstb(mstb);
1240 static void drm_dp_destroy_port(struct kref *kref)
1242 struct drm_dp_mst_port *port =
1243 container_of(kref, struct drm_dp_mst_port, topology_kref);
1244 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
1247 kfree(port->cached_edid);
1250 * The only time we don't have a connector
1251 * on an output port is if the connector init
1254 if (port->connector) {
1255 /* we can't destroy the connector here, as
1256 * we might be holding the mode_config.mutex
1257 * from an EDID retrieval */
1259 mutex_lock(&mgr->destroy_connector_lock);
1260 list_add(&port->next, &mgr->destroy_connector_list);
1261 mutex_unlock(&mgr->destroy_connector_lock);
1262 schedule_work(&mgr->destroy_connector_work);
1265 /* no need to clean up vcpi
1266 * as if we have no connector we never setup a vcpi */
1267 drm_dp_port_teardown_pdt(port, port->pdt);
1268 port->pdt = DP_PEER_DEVICE_NONE;
1270 drm_dp_mst_put_port_malloc(port);
1274 * drm_dp_mst_topology_try_get_port() - Increment the topology refcount of a
1275 * port unless it's zero
1276 * @port: &struct drm_dp_mst_port to increment the topology refcount of
1278 * Attempts to grab a topology reference to @port, if it hasn't yet been
1279 * removed from the topology (e.g. &drm_dp_mst_port.topology_kref has reached
1280 * 0). Holding a topology reference implies that a malloc reference will be
1281 * held to @port as long as the user holds the topology reference.
1283 * Care should be taken to ensure that the user has at least one malloc
1284 * reference to @port. If you already have a topology reference to @port, you
1285 * should use drm_dp_mst_topology_get_port() instead.
1288 * drm_dp_mst_topology_get_port()
1289 * drm_dp_mst_topology_put_port()
1292 * * 1: A topology reference was grabbed successfully
1293 * * 0: @port is no longer in the topology, no reference was grabbed
1295 static int __must_check
1296 drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
1298 int ret = kref_get_unless_zero(&port->topology_kref);
1301 DRM_DEBUG("port %p (%d)\n", port,
1302 kref_read(&port->topology_kref));
1308 * drm_dp_mst_topology_get_port() - Increment the topology refcount of a port
1309 * @port: The &struct drm_dp_mst_port to increment the topology refcount of
1311 * Increments &drm_dp_mst_port.topology_refcount without checking whether or
1312 * not it's already reached 0. This is only valid to use in scenarios where
1313 * you are already guaranteed to have at least one active topology reference
1314 * to @port. Otherwise, drm_dp_mst_topology_try_get_port() must be used.
1317 * drm_dp_mst_topology_try_get_port()
1318 * drm_dp_mst_topology_put_port()
1320 static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
1322 WARN_ON(kref_read(&port->topology_kref) == 0);
1323 kref_get(&port->topology_kref);
1324 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref));
1328 * drm_dp_mst_topology_put_port() - release a topology reference to a port
1329 * @port: The &struct drm_dp_mst_port to release the topology reference from
1331 * Releases a topology reference from @port by decrementing
1332 * &drm_dp_mst_port.topology_kref.
1335 * drm_dp_mst_topology_try_get_port()
1336 * drm_dp_mst_topology_get_port()
1338 static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)
1340 DRM_DEBUG("port %p (%d)\n",
1341 port, kref_read(&port->topology_kref) - 1);
1342 kref_put(&port->topology_kref, drm_dp_destroy_port);
1345 static struct drm_dp_mst_branch *
1346 drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb,
1347 struct drm_dp_mst_branch *to_find)
1349 struct drm_dp_mst_port *port;
1350 struct drm_dp_mst_branch *rmstb;
1352 if (to_find == mstb)
1355 list_for_each_entry(port, &mstb->ports, next) {
1357 rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1358 port->mstb, to_find);
1366 static struct drm_dp_mst_branch *
1367 drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr,
1368 struct drm_dp_mst_branch *mstb)
1370 struct drm_dp_mst_branch *rmstb = NULL;
1372 mutex_lock(&mgr->lock);
1373 if (mgr->mst_primary) {
1374 rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1375 mgr->mst_primary, mstb);
1377 if (rmstb && !drm_dp_mst_topology_try_get_mstb(rmstb))
1380 mutex_unlock(&mgr->lock);
1384 static struct drm_dp_mst_port *
1385 drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch *mstb,
1386 struct drm_dp_mst_port *to_find)
1388 struct drm_dp_mst_port *port, *mport;
1390 list_for_each_entry(port, &mstb->ports, next) {
1391 if (port == to_find)
1395 mport = drm_dp_mst_topology_get_port_validated_locked(
1396 port->mstb, to_find);
1404 static struct drm_dp_mst_port *
1405 drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr,
1406 struct drm_dp_mst_port *port)
1408 struct drm_dp_mst_port *rport = NULL;
1410 mutex_lock(&mgr->lock);
1411 if (mgr->mst_primary) {
1412 rport = drm_dp_mst_topology_get_port_validated_locked(
1413 mgr->mst_primary, port);
1415 if (rport && !drm_dp_mst_topology_try_get_port(rport))
1418 mutex_unlock(&mgr->lock);
1422 static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
1424 struct drm_dp_mst_port *port;
1427 list_for_each_entry(port, &mstb->ports, next) {
1428 if (port->port_num == port_num) {
1429 ret = drm_dp_mst_topology_try_get_port(port);
1430 return ret ? port : NULL;
1438 * calculate a new RAD for this MST branch device
1439 * if parent has an LCT of 2 then it has 1 nibble of RAD,
1440 * if parent has an LCT of 3 then it has 2 nibbles of RAD,
1442 static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
1445 int parent_lct = port->parent->lct;
1447 int idx = (parent_lct - 1) / 2;
1448 if (parent_lct > 1) {
1449 memcpy(rad, port->parent->rad, idx + 1);
1450 shift = (parent_lct % 2) ? 4 : 0;
1454 rad[idx] |= port->port_num << shift;
1455 return parent_lct + 1;
1459 * return sends link address for new mstb
1461 static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
1465 bool send_link = false;
1466 switch (port->pdt) {
1467 case DP_PEER_DEVICE_DP_LEGACY_CONV:
1468 case DP_PEER_DEVICE_SST_SINK:
1469 /* add i2c over sideband */
1470 ret = drm_dp_mst_register_i2c_bus(&port->aux);
1472 case DP_PEER_DEVICE_MST_BRANCHING:
1473 lct = drm_dp_calculate_rad(port, rad);
1475 port->mstb = drm_dp_add_mst_branch_device(lct, rad);
1477 port->mstb->mgr = port->mgr;
1478 port->mstb->port_parent = port;
1480 * Make sure this port's memory allocation stays
1481 * around until its child MSTB releases it
1483 drm_dp_mst_get_port_malloc(port);
1493 * drm_dp_mst_dpcd_read() - read a series of bytes from the DPCD via sideband
1494 * @aux: Fake sideband AUX CH
1495 * @offset: address of the (first) register to read
1496 * @buffer: buffer to store the register values
1497 * @size: number of bytes in @buffer
1499 * Performs the same functionality for remote devices via
1500 * sideband messaging as drm_dp_dpcd_read() does for local
1501 * devices via actual AUX CH.
1503 * Return: Number of bytes read, or negative error code on failure.
1505 ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
1506 unsigned int offset, void *buffer, size_t size)
1508 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
1511 return drm_dp_send_dpcd_read(port->mgr, port,
1512 offset, size, buffer);
1516 * drm_dp_mst_dpcd_write() - write a series of bytes to the DPCD via sideband
1517 * @aux: Fake sideband AUX CH
1518 * @offset: address of the (first) register to write
1519 * @buffer: buffer containing the values to write
1520 * @size: number of bytes in @buffer
1522 * Performs the same functionality for remote devices via
1523 * sideband messaging as drm_dp_dpcd_write() does for local
1524 * devices via actual AUX CH.
1526 * Return: 0 on success, negative error code on failure.
1528 ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
1529 unsigned int offset, void *buffer, size_t size)
1531 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
1534 return drm_dp_send_dpcd_write(port->mgr, port,
1535 offset, size, buffer);
1538 static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
1542 memcpy(mstb->guid, guid, 16);
1544 if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
1545 if (mstb->port_parent) {
1546 ret = drm_dp_send_dpcd_write(
1554 ret = drm_dp_dpcd_write(
1563 static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
1566 size_t proppath_size)
1570 snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
1571 for (i = 0; i < (mstb->lct - 1); i++) {
1572 int shift = (i % 2) ? 0 : 4;
1573 int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
1574 snprintf(temp, sizeof(temp), "-%d", port_num);
1575 strlcat(proppath, temp, proppath_size);
1577 snprintf(temp, sizeof(temp), "-%d", pnum);
1578 strlcat(proppath, temp, proppath_size);
1582 * drm_dp_mst_connector_late_register() - Late MST connector registration
1583 * @connector: The MST connector
1584 * @port: The MST port for this connector
1586 * Helper to register the remote aux device for this MST port. Drivers should
1587 * call this from their mst connector's late_register hook to enable MST aux
1590 * Return: 0 on success, negative error code on failure.
1592 int drm_dp_mst_connector_late_register(struct drm_connector *connector,
1593 struct drm_dp_mst_port *port)
1595 DRM_DEBUG_KMS("registering %s remote bus for %s\n",
1596 port->aux.name, connector->kdev->kobj.name);
1598 port->aux.dev = connector->kdev;
1599 return drm_dp_aux_register_devnode(&port->aux);
1601 EXPORT_SYMBOL(drm_dp_mst_connector_late_register);
1604 * drm_dp_mst_connector_early_unregister() - Early MST connector unregistration
1605 * @connector: The MST connector
1606 * @port: The MST port for this connector
1608 * Helper to unregister the remote aux device for this MST port, registered by
1609 * drm_dp_mst_connector_late_register(). Drivers should call this from their mst
1610 * connector's early_unregister hook.
1612 void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
1613 struct drm_dp_mst_port *port)
1615 DRM_DEBUG_KMS("unregistering %s remote bus for %s\n",
1616 port->aux.name, connector->kdev->kobj.name);
1617 drm_dp_aux_unregister_devnode(&port->aux);
1619 EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister);
1621 static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1622 struct drm_device *dev,
1623 struct drm_dp_link_addr_reply_port *port_msg)
1625 struct drm_dp_mst_port *port;
1627 bool created = false;
1631 port = drm_dp_get_port(mstb, port_msg->port_number);
1633 port = kzalloc(sizeof(*port), GFP_KERNEL);
1636 kref_init(&port->topology_kref);
1637 kref_init(&port->malloc_kref);
1638 port->parent = mstb;
1639 port->port_num = port_msg->port_number;
1640 port->mgr = mstb->mgr;
1641 port->aux.name = "DPMST";
1642 port->aux.dev = dev->dev;
1643 port->aux.is_remote = true;
1646 * Make sure the memory allocation for our parent branch stays
1647 * around until our own memory allocation is released
1649 drm_dp_mst_get_mstb_malloc(mstb);
1653 old_pdt = port->pdt;
1654 old_ddps = port->ddps;
1657 port->pdt = port_msg->peer_device_type;
1658 port->input = port_msg->input_port;
1659 port->mcs = port_msg->mcs;
1660 port->ddps = port_msg->ddps;
1661 port->ldps = port_msg->legacy_device_plug_status;
1662 port->dpcd_rev = port_msg->dpcd_revision;
1663 port->num_sdp_streams = port_msg->num_sdp_streams;
1664 port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
1666 /* manage mstb port lists with mgr lock - take a reference
1669 mutex_lock(&mstb->mgr->lock);
1670 drm_dp_mst_topology_get_port(port);
1671 list_add(&port->next, &mstb->ports);
1672 mutex_unlock(&mstb->mgr->lock);
1675 if (old_ddps != port->ddps) {
1678 drm_dp_send_enum_path_resources(mstb->mgr,
1682 port->available_pbn = 0;
1686 if (old_pdt != port->pdt && !port->input) {
1687 drm_dp_port_teardown_pdt(port, old_pdt);
1689 ret = drm_dp_port_setup_pdt(port);
1691 drm_dp_send_link_address(mstb->mgr, port->mstb);
1694 if (created && !port->input) {
1697 build_mst_prop_path(mstb, port->port_num, proppath,
1699 port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr,
1702 if (!port->connector) {
1703 /* remove it from the port list */
1704 mutex_lock(&mstb->mgr->lock);
1705 list_del(&port->next);
1706 mutex_unlock(&mstb->mgr->lock);
1707 /* drop port list reference */
1708 drm_dp_mst_topology_put_port(port);
1711 if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
1712 port->pdt == DP_PEER_DEVICE_SST_SINK) &&
1713 port->port_num >= DP_MST_LOGICAL_PORT_0) {
1714 port->cached_edid = drm_get_edid(port->connector,
1716 drm_connector_set_tile_property(port->connector);
1718 (*mstb->mgr->cbs->register_connector)(port->connector);
1722 /* put reference to this port */
1723 drm_dp_mst_topology_put_port(port);
1726 static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
1727 struct drm_dp_connection_status_notify *conn_stat)
1729 struct drm_dp_mst_port *port;
1732 bool dowork = false;
1733 port = drm_dp_get_port(mstb, conn_stat->port_number);
1737 old_ddps = port->ddps;
1738 old_pdt = port->pdt;
1739 port->pdt = conn_stat->peer_device_type;
1740 port->mcs = conn_stat->message_capability_status;
1741 port->ldps = conn_stat->legacy_device_plug_status;
1742 port->ddps = conn_stat->displayport_device_plug_status;
1744 if (old_ddps != port->ddps) {
1748 port->available_pbn = 0;
1751 if (old_pdt != port->pdt && !port->input) {
1752 drm_dp_port_teardown_pdt(port, old_pdt);
1754 if (drm_dp_port_setup_pdt(port))
1758 drm_dp_mst_topology_put_port(port);
1760 queue_work(system_long_wq, &mstb->mgr->work);
1764 static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
1767 struct drm_dp_mst_branch *mstb;
1768 struct drm_dp_mst_port *port;
1770 /* find the port by iterating down */
1772 mutex_lock(&mgr->lock);
1773 mstb = mgr->mst_primary;
1778 for (i = 0; i < lct - 1; i++) {
1779 int shift = (i % 2) ? 0 : 4;
1780 int port_num = (rad[i / 2] >> shift) & 0xf;
1782 list_for_each_entry(port, &mstb->ports, next) {
1783 if (port->port_num == port_num) {
1786 DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
1794 ret = drm_dp_mst_topology_try_get_mstb(mstb);
1798 mutex_unlock(&mgr->lock);
1802 static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
1803 struct drm_dp_mst_branch *mstb,
1806 struct drm_dp_mst_branch *found_mstb;
1807 struct drm_dp_mst_port *port;
1809 if (memcmp(mstb->guid, guid, 16) == 0)
1813 list_for_each_entry(port, &mstb->ports, next) {
1817 found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
1826 static struct drm_dp_mst_branch *
1827 drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
1830 struct drm_dp_mst_branch *mstb;
1833 /* find the port by iterating down */
1834 mutex_lock(&mgr->lock);
1836 mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
1838 ret = drm_dp_mst_topology_try_get_mstb(mstb);
1843 mutex_unlock(&mgr->lock);
1847 static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1848 struct drm_dp_mst_branch *mstb)
1850 struct drm_dp_mst_port *port;
1851 struct drm_dp_mst_branch *mstb_child;
1852 if (!mstb->link_address_sent)
1853 drm_dp_send_link_address(mgr, mstb);
1855 list_for_each_entry(port, &mstb->ports, next) {
1862 if (!port->available_pbn)
1863 drm_dp_send_enum_path_resources(mgr, mstb, port);
1866 mstb_child = drm_dp_mst_topology_get_mstb_validated(
1869 drm_dp_check_and_send_link_address(mgr, mstb_child);
1870 drm_dp_mst_topology_put_mstb(mstb_child);
1876 static void drm_dp_mst_link_probe_work(struct work_struct *work)
1878 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
1879 struct drm_dp_mst_branch *mstb;
1882 mutex_lock(&mgr->lock);
1883 mstb = mgr->mst_primary;
1885 ret = drm_dp_mst_topology_try_get_mstb(mstb);
1889 mutex_unlock(&mgr->lock);
1891 drm_dp_check_and_send_link_address(mgr, mstb);
1892 drm_dp_mst_topology_put_mstb(mstb);
1896 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
1901 if (memchr_inv(guid, 0, 16))
1904 salt = get_jiffies_64();
1906 memcpy(&guid[0], &salt, sizeof(u64));
1907 memcpy(&guid[8], &salt, sizeof(u64));
1912 static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
1914 struct drm_dp_sideband_msg_req_body req;
1916 req.req_type = DP_REMOTE_DPCD_READ;
1917 req.u.dpcd_read.port_number = port_num;
1918 req.u.dpcd_read.dpcd_address = offset;
1919 req.u.dpcd_read.num_bytes = num_bytes;
1920 drm_dp_encode_sideband_req(&req, msg);
1925 static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
1926 bool up, u8 *msg, int len)
1929 int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
1930 int tosend, total, offset;
1937 tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
1939 ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
1942 if (ret != tosend) {
1943 if (ret == -EIO && retries < 5) {
1947 DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
1953 } while (total > 0);
1957 static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
1958 struct drm_dp_sideband_msg_tx *txmsg)
1960 struct drm_dp_mst_branch *mstb = txmsg->dst;
1963 /* both msg slots are full */
1964 if (txmsg->seqno == -1) {
1965 if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
1966 DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
1969 if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
1970 txmsg->seqno = mstb->last_seqno;
1971 mstb->last_seqno ^= 1;
1972 } else if (mstb->tx_slots[0] == NULL)
1976 mstb->tx_slots[txmsg->seqno] = txmsg;
1979 req_type = txmsg->msg[0] & 0x7f;
1980 if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
1981 req_type == DP_RESOURCE_STATUS_NOTIFY)
1985 hdr->path_msg = txmsg->path_msg;
1986 hdr->lct = mstb->lct;
1987 hdr->lcr = mstb->lct - 1;
1989 memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
1990 hdr->seqno = txmsg->seqno;
1994 * process a single block of the next message in the sideband queue
1996 static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
1997 struct drm_dp_sideband_msg_tx *txmsg,
2001 struct drm_dp_sideband_msg_hdr hdr;
2002 int len, space, idx, tosend;
2005 memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
2007 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
2009 txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
2012 /* make hdr from dst mst - for replies use seqno
2013 otherwise assign one */
2014 ret = set_hdr_from_dst_qlock(&hdr, txmsg);
2018 /* amount left to send in this message */
2019 len = txmsg->cur_len - txmsg->cur_offset;
2021 /* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
2022 space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
2024 tosend = min(len, space);
2025 if (len == txmsg->cur_len)
2031 hdr.msg_len = tosend + 1;
2032 drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
2033 memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
2034 /* add crc at end */
2035 drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
2038 ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
2040 DRM_DEBUG_KMS("sideband msg failed to send\n");
2044 txmsg->cur_offset += tosend;
2045 if (txmsg->cur_offset == txmsg->cur_len) {
2046 txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
2052 static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
2054 struct drm_dp_sideband_msg_tx *txmsg;
2057 WARN_ON(!mutex_is_locked(&mgr->qlock));
2059 /* construct a chunk from the first msg in the tx_msg queue */
2060 if (list_empty(&mgr->tx_msg_downq))
2063 txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
2064 ret = process_single_tx_qlock(mgr, txmsg, false);
2066 /* txmsg is sent it should be in the slots now */
2067 list_del(&txmsg->next);
2069 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
2070 list_del(&txmsg->next);
2071 if (txmsg->seqno != -1)
2072 txmsg->dst->tx_slots[txmsg->seqno] = NULL;
2073 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
2074 wake_up_all(&mgr->tx_waitq);
2078 /* called holding qlock */
2079 static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
2080 struct drm_dp_sideband_msg_tx *txmsg)
2084 /* construct a chunk from the first msg in the tx_msg queue */
2085 ret = process_single_tx_qlock(mgr, txmsg, true);
2088 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
2090 if (txmsg->seqno != -1) {
2091 WARN_ON((unsigned int)txmsg->seqno >
2092 ARRAY_SIZE(txmsg->dst->tx_slots));
2093 txmsg->dst->tx_slots[txmsg->seqno] = NULL;
2097 static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
2098 struct drm_dp_sideband_msg_tx *txmsg)
2100 mutex_lock(&mgr->qlock);
2101 list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
2102 if (list_is_singular(&mgr->tx_msg_downq))
2103 process_single_down_tx_qlock(mgr);
2104 mutex_unlock(&mgr->qlock);
2107 static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2108 struct drm_dp_mst_branch *mstb)
2111 struct drm_dp_sideband_msg_tx *txmsg;
2114 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2119 len = build_link_address(txmsg);
2121 mstb->link_address_sent = true;
2122 drm_dp_queue_down_tx(mgr, txmsg);
2124 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2128 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
2129 DRM_DEBUG_KMS("link address nak received\n");
2131 DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports);
2132 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
2133 DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i,
2134 txmsg->reply.u.link_addr.ports[i].input_port,
2135 txmsg->reply.u.link_addr.ports[i].peer_device_type,
2136 txmsg->reply.u.link_addr.ports[i].port_number,
2137 txmsg->reply.u.link_addr.ports[i].dpcd_revision,
2138 txmsg->reply.u.link_addr.ports[i].mcs,
2139 txmsg->reply.u.link_addr.ports[i].ddps,
2140 txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status,
2141 txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
2142 txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
2145 drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid);
2147 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
2148 drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
2150 drm_kms_helper_hotplug_event(mgr->dev);
2153 mstb->link_address_sent = false;
2154 DRM_DEBUG_KMS("link address failed %d\n", ret);
2160 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
2161 struct drm_dp_mst_branch *mstb,
2162 struct drm_dp_mst_port *port)
2165 struct drm_dp_sideband_msg_tx *txmsg;
2168 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2173 len = build_enum_path_resources(txmsg, port->port_num);
2175 drm_dp_queue_down_tx(mgr, txmsg);
2177 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2179 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
2180 DRM_DEBUG_KMS("enum path resources nak received\n");
2182 if (port->port_num != txmsg->reply.u.path_resources.port_number)
2183 DRM_ERROR("got incorrect port in response\n");
2184 DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number,
2185 txmsg->reply.u.path_resources.avail_payload_bw_number);
2186 port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number;
2194 static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
2196 if (!mstb->port_parent)
2199 if (mstb->port_parent->mstb != mstb)
2200 return mstb->port_parent;
2202 return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
2206 * Searches upwards in the topology starting from mstb to try to find the
2207 * closest available parent of mstb that's still connected to the rest of the
2208 * topology. This can be used in order to perform operations like releasing
2209 * payloads, where the branch device which owned the payload may no longer be
2210 * around and thus would require that the payload on the last living relative
2213 static struct drm_dp_mst_branch *
2214 drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
2215 struct drm_dp_mst_branch *mstb,
2218 struct drm_dp_mst_branch *rmstb = NULL;
2219 struct drm_dp_mst_port *found_port;
2221 mutex_lock(&mgr->lock);
2222 if (!mgr->mst_primary)
2226 found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
2230 if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) {
2231 rmstb = found_port->parent;
2232 *port_num = found_port->port_num;
2234 /* Search again, starting from this parent */
2235 mstb = found_port->parent;
2239 mutex_unlock(&mgr->lock);
2243 static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
2244 struct drm_dp_mst_port *port,
2248 struct drm_dp_sideband_msg_tx *txmsg;
2249 struct drm_dp_mst_branch *mstb;
2250 int len, ret, port_num;
2251 u8 sinks[DRM_DP_MAX_SDP_STREAMS];
2254 port_num = port->port_num;
2255 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
2257 mstb = drm_dp_get_last_connected_port_and_mstb(mgr,
2265 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2271 for (i = 0; i < port->num_sdp_streams; i++)
2275 len = build_allocate_payload(txmsg, port_num,
2277 pbn, port->num_sdp_streams, sinks);
2279 drm_dp_queue_down_tx(mgr, txmsg);
2282 * FIXME: there is a small chance that between getting the last
2283 * connected mstb and sending the payload message, the last connected
2284 * mstb could also be removed from the topology. In the future, this
2285 * needs to be fixed by restarting the
2286 * drm_dp_get_last_connected_port_and_mstb() search in the event of a
2287 * timeout if the topology is still connected to the system.
2289 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2291 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
2298 drm_dp_mst_topology_put_mstb(mstb);
2302 int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
2303 struct drm_dp_mst_port *port, bool power_up)
2305 struct drm_dp_sideband_msg_tx *txmsg;
2308 port = drm_dp_mst_topology_get_port_validated(mgr, port);
2312 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2314 drm_dp_mst_topology_put_port(port);
2318 txmsg->dst = port->parent;
2319 len = build_power_updown_phy(txmsg, port->port_num, power_up);
2320 drm_dp_queue_down_tx(mgr, txmsg);
2322 ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
2324 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
2330 drm_dp_mst_topology_put_port(port);
2334 EXPORT_SYMBOL(drm_dp_send_power_updown_phy);
2336 static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
2338 struct drm_dp_payload *payload)
2342 ret = drm_dp_dpcd_write_payload(mgr, id, payload);
2344 payload->payload_state = 0;
2347 payload->payload_state = DP_PAYLOAD_LOCAL;
2351 static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
2352 struct drm_dp_mst_port *port,
2354 struct drm_dp_payload *payload)
2357 ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
2360 payload->payload_state = DP_PAYLOAD_REMOTE;
2364 static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
2365 struct drm_dp_mst_port *port,
2367 struct drm_dp_payload *payload)
2369 DRM_DEBUG_KMS("\n");
2370 /* it's okay for these to fail */
2372 drm_dp_payload_send_msg(mgr, port, id, 0);
2375 drm_dp_dpcd_write_payload(mgr, id, payload);
2376 payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
2380 static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
2382 struct drm_dp_payload *payload)
2384 payload->payload_state = 0;
2389 * drm_dp_update_payload_part1() - Execute payload update part 1
2390 * @mgr: manager to use.
2392 * This iterates over all proposed virtual channels, and tries to
2393 * allocate space in the link for them. For 0->slots transitions,
2394 * this step just writes the VCPI to the MST device. For slots->0
2395 * transitions, this writes the updated VCPIs and removes the
2396 * remote VC payloads.
2398 * after calling this the driver should generate ACT and payload
2401 int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
2403 struct drm_dp_payload req_payload;
2404 struct drm_dp_mst_port *port;
2408 mutex_lock(&mgr->payload_lock);
2409 for (i = 0; i < mgr->max_payloads; i++) {
2410 struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
2411 struct drm_dp_payload *payload = &mgr->payloads[i];
2412 bool put_port = false;
2414 /* solve the current payloads - compare to the hw ones
2415 - update the hw view */
2416 req_payload.start_slot = cur_slots;
2418 port = container_of(vcpi, struct drm_dp_mst_port,
2421 /* Validated ports don't matter if we're releasing
2424 if (vcpi->num_slots) {
2425 port = drm_dp_mst_topology_get_port_validated(
2428 mutex_unlock(&mgr->payload_lock);
2434 req_payload.num_slots = vcpi->num_slots;
2435 req_payload.vcpi = vcpi->vcpi;
2438 req_payload.num_slots = 0;
2441 payload->start_slot = req_payload.start_slot;
2442 /* work out what is required to happen with this payload */
2443 if (payload->num_slots != req_payload.num_slots) {
2445 /* need to push an update for this payload */
2446 if (req_payload.num_slots) {
2447 drm_dp_create_payload_step1(mgr, vcpi->vcpi,
2449 payload->num_slots = req_payload.num_slots;
2450 payload->vcpi = req_payload.vcpi;
2452 } else if (payload->num_slots) {
2453 payload->num_slots = 0;
2454 drm_dp_destroy_payload_step1(mgr, port,
2457 req_payload.payload_state =
2458 payload->payload_state;
2459 payload->start_slot = 0;
2461 payload->payload_state = req_payload.payload_state;
2463 cur_slots += req_payload.num_slots;
2466 drm_dp_mst_topology_put_port(port);
2469 for (i = 0; i < mgr->max_payloads; /* do nothing */) {
2470 if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL) {
2475 DRM_DEBUG_KMS("removing payload %d\n", i);
2476 for (j = i; j < mgr->max_payloads - 1; j++) {
2477 mgr->payloads[j] = mgr->payloads[j + 1];
2478 mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
2480 if (mgr->proposed_vcpis[j] &&
2481 mgr->proposed_vcpis[j]->num_slots) {
2482 set_bit(j + 1, &mgr->payload_mask);
2484 clear_bit(j + 1, &mgr->payload_mask);
2488 memset(&mgr->payloads[mgr->max_payloads - 1], 0,
2489 sizeof(struct drm_dp_payload));
2490 mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
2491 clear_bit(mgr->max_payloads, &mgr->payload_mask);
2493 mutex_unlock(&mgr->payload_lock);
2497 EXPORT_SYMBOL(drm_dp_update_payload_part1);
2500 * drm_dp_update_payload_part2() - Execute payload update part 2
2501 * @mgr: manager to use.
2503 * This iterates over all proposed virtual channels, and tries to
2504 * allocate space in the link for them. For 0->slots transitions,
2505 * this step writes the remote VC payload commands. For slots->0
2506 * this just resets some internal state.
2508 int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
2510 struct drm_dp_mst_port *port;
2513 mutex_lock(&mgr->payload_lock);
2514 for (i = 0; i < mgr->max_payloads; i++) {
2516 if (!mgr->proposed_vcpis[i])
2519 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
2521 DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
2522 if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
2523 ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
2524 } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
2525 ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
2528 mutex_unlock(&mgr->payload_lock);
2532 mutex_unlock(&mgr->payload_lock);
2535 EXPORT_SYMBOL(drm_dp_update_payload_part2);
2537 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
2538 struct drm_dp_mst_port *port,
2539 int offset, int size, u8 *bytes)
2543 struct drm_dp_sideband_msg_tx *txmsg;
2544 struct drm_dp_mst_branch *mstb;
2546 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
2550 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2556 len = build_dpcd_read(txmsg, port->port_num, offset, size);
2557 txmsg->dst = port->parent;
2559 drm_dp_queue_down_tx(mgr, txmsg);
2561 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2565 /* DPCD read should never be NACKed */
2566 if (txmsg->reply.reply_type == 1) {
2567 DRM_ERROR("mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n",
2568 mstb, port->port_num, offset, size);
2573 if (txmsg->reply.u.remote_dpcd_read_ack.num_bytes != size) {
2578 ret = min_t(size_t, txmsg->reply.u.remote_dpcd_read_ack.num_bytes,
2580 memcpy(bytes, txmsg->reply.u.remote_dpcd_read_ack.bytes, ret);
2585 drm_dp_mst_topology_put_mstb(mstb);
2590 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
2591 struct drm_dp_mst_port *port,
2592 int offset, int size, u8 *bytes)
2596 struct drm_dp_sideband_msg_tx *txmsg;
2597 struct drm_dp_mst_branch *mstb;
2599 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
2603 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2609 len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
2612 drm_dp_queue_down_tx(mgr, txmsg);
2614 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2616 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
2623 drm_dp_mst_topology_put_mstb(mstb);
2627 static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
2629 struct drm_dp_sideband_msg_reply_body reply;
2631 reply.reply_type = DP_SIDEBAND_REPLY_ACK;
2632 reply.req_type = req_type;
2633 drm_dp_encode_sideband_reply(&reply, msg);
2637 static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
2638 struct drm_dp_mst_branch *mstb,
2639 int req_type, int seqno, bool broadcast)
2641 struct drm_dp_sideband_msg_tx *txmsg;
2643 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2648 txmsg->seqno = seqno;
2649 drm_dp_encode_up_ack_reply(txmsg, req_type);
2651 mutex_lock(&mgr->qlock);
2653 process_single_up_tx_qlock(mgr, txmsg);
2655 mutex_unlock(&mgr->qlock);
2661 static bool drm_dp_get_vc_payload_bw(int dp_link_bw,
2665 switch (dp_link_bw) {
2667 DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
2668 dp_link_bw, dp_link_count);
2671 case DP_LINK_BW_1_62:
2672 *out = 3 * dp_link_count;
2674 case DP_LINK_BW_2_7:
2675 *out = 5 * dp_link_count;
2677 case DP_LINK_BW_5_4:
2678 *out = 10 * dp_link_count;
2680 case DP_LINK_BW_8_1:
2681 *out = 15 * dp_link_count;
2688 * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
2689 * @mgr: manager to set state for
2690 * @mst_state: true to enable MST on this connector - false to disable.
2692 * This is called by the driver when it detects an MST capable device plugged
2693 * into a DP MST capable port, or when a DP MST capable device is unplugged.
2695 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
2698 struct drm_dp_mst_branch *mstb = NULL;
2700 mutex_lock(&mgr->payload_lock);
2701 mutex_lock(&mgr->lock);
2702 if (mst_state == mgr->mst_state)
2705 mgr->mst_state = mst_state;
2706 /* set the device into MST mode */
2708 WARN_ON(mgr->mst_primary);
2711 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
2712 if (ret != DP_RECEIVER_CAP_SIZE) {
2713 DRM_DEBUG_KMS("failed to read DPCD\n");
2717 if (!drm_dp_get_vc_payload_bw(mgr->dpcd[1],
2718 mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK,
2724 /* add initial branch device at LCT 1 */
2725 mstb = drm_dp_add_mst_branch_device(1, NULL);
2732 /* give this the main reference */
2733 mgr->mst_primary = mstb;
2734 drm_dp_mst_topology_get_mstb(mgr->mst_primary);
2736 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2737 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
2743 struct drm_dp_payload reset_pay;
2744 reset_pay.start_slot = 0;
2745 reset_pay.num_slots = 0x3f;
2746 drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
2749 queue_work(system_long_wq, &mgr->work);
2753 /* disable MST on the device */
2754 mstb = mgr->mst_primary;
2755 mgr->mst_primary = NULL;
2756 /* this can fail if the device is gone */
2757 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
2759 memset(mgr->payloads, 0,
2760 mgr->max_payloads * sizeof(mgr->payloads[0]));
2761 memset(mgr->proposed_vcpis, 0,
2762 mgr->max_payloads * sizeof(mgr->proposed_vcpis[0]));
2763 mgr->payload_mask = 0;
2764 set_bit(0, &mgr->payload_mask);
2769 mutex_unlock(&mgr->lock);
2770 mutex_unlock(&mgr->payload_lock);
2772 drm_dp_mst_topology_put_mstb(mstb);
2776 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
2779 * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
2780 * @mgr: manager to suspend
2782 * This function tells the MST device that we can't handle UP messages
2783 * anymore. This should stop it from sending any since we are suspended.
2785 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
2787 mutex_lock(&mgr->lock);
2788 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2789 DP_MST_EN | DP_UPSTREAM_IS_SRC);
2790 mutex_unlock(&mgr->lock);
2791 flush_work(&mgr->work);
2792 flush_work(&mgr->destroy_connector_work);
2794 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
2797 * drm_dp_mst_topology_mgr_resume() - resume the MST manager
2798 * @mgr: manager to resume
2800 * This will fetch DPCD and see if the device is still there,
2801 * if it is, it will rewrite the MSTM control bits, and return.
2803 * if the device fails this returns -1, and the driver should do
2804 * a full MST reprobe, in case we were undocked.
2806 int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
2810 mutex_lock(&mgr->lock);
2812 if (mgr->mst_primary) {
2816 sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
2817 if (sret != DP_RECEIVER_CAP_SIZE) {
2818 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
2823 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2824 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
2826 DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
2831 /* Some hubs forget their guids after they resume */
2832 sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
2834 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
2838 drm_dp_check_mstb_guid(mgr->mst_primary, guid);
2845 mutex_unlock(&mgr->lock);
2848 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
2850 static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
2854 int replylen, origlen, curreply;
2856 struct drm_dp_sideband_msg_rx *msg;
2857 int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
2858 msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
2860 len = min(mgr->max_dpcd_transaction_bytes, 16);
2861 ret = drm_dp_dpcd_read(mgr->aux, basereg,
2864 DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
2867 ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
2869 DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
2872 replylen = msg->curchunk_len + msg->curchunk_hdrlen;
2877 while (replylen > 0) {
2878 len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
2879 ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
2882 DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n",
2887 ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
2889 DRM_DEBUG_KMS("failed to build sideband msg\n");
2899 static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
2903 if (!drm_dp_get_one_sb_msg(mgr, false)) {
2904 memset(&mgr->down_rep_recv, 0,
2905 sizeof(struct drm_dp_sideband_msg_rx));
2909 if (mgr->down_rep_recv.have_eomt) {
2910 struct drm_dp_sideband_msg_tx *txmsg;
2911 struct drm_dp_mst_branch *mstb;
2913 mstb = drm_dp_get_mst_branch_device(mgr,
2914 mgr->down_rep_recv.initial_hdr.lct,
2915 mgr->down_rep_recv.initial_hdr.rad);
2918 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->down_rep_recv.initial_hdr.lct);
2919 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2923 /* find the message */
2924 slot = mgr->down_rep_recv.initial_hdr.seqno;
2925 mutex_lock(&mgr->qlock);
2926 txmsg = mstb->tx_slots[slot];
2927 /* remove from slots */
2928 mutex_unlock(&mgr->qlock);
2931 DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
2933 mgr->down_rep_recv.initial_hdr.seqno,
2934 mgr->down_rep_recv.initial_hdr.lct,
2935 mgr->down_rep_recv.initial_hdr.rad[0],
2936 mgr->down_rep_recv.msg[0]);
2937 drm_dp_mst_topology_put_mstb(mstb);
2938 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2942 drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
2944 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
2945 DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
2946 txmsg->reply.req_type,
2947 drm_dp_mst_req_type_str(txmsg->reply.req_type),
2948 txmsg->reply.u.nak.reason,
2949 drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
2950 txmsg->reply.u.nak.nak_data);
2952 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2953 drm_dp_mst_topology_put_mstb(mstb);
2955 mutex_lock(&mgr->qlock);
2956 txmsg->state = DRM_DP_SIDEBAND_TX_RX;
2957 mstb->tx_slots[slot] = NULL;
2958 mutex_unlock(&mgr->qlock);
2960 wake_up_all(&mgr->tx_waitq);
2965 static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
2969 if (!drm_dp_get_one_sb_msg(mgr, true)) {
2970 memset(&mgr->up_req_recv, 0,
2971 sizeof(struct drm_dp_sideband_msg_rx));
2975 if (mgr->up_req_recv.have_eomt) {
2976 struct drm_dp_sideband_msg_req_body msg;
2977 struct drm_dp_mst_branch *mstb = NULL;
2980 if (!mgr->up_req_recv.initial_hdr.broadcast) {
2981 mstb = drm_dp_get_mst_branch_device(mgr,
2982 mgr->up_req_recv.initial_hdr.lct,
2983 mgr->up_req_recv.initial_hdr.rad);
2985 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2986 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2991 seqno = mgr->up_req_recv.initial_hdr.seqno;
2992 drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
2994 if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
2995 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
2998 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid);
3001 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
3002 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3006 drm_dp_update_port(mstb, &msg.u.conn_stat);
3008 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
3009 drm_kms_helper_hotplug_event(mgr->dev);
3011 } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
3012 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
3014 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid);
3017 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
3018 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3022 DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
3026 drm_dp_mst_topology_put_mstb(mstb);
3028 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3034 * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
3035 * @mgr: manager to notify irq for.
3036 * @esi: 4 bytes from SINK_COUNT_ESI
3037 * @handled: whether the hpd interrupt was consumed or not
3039 * This should be called from the driver when it detects a short IRQ,
3040 * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
3041 * topology manager will process the sideband messages received as a result
3044 int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
3051 if (sc != mgr->sink_count) {
3052 mgr->sink_count = sc;
3056 if (esi[1] & DP_DOWN_REP_MSG_RDY) {
3057 ret = drm_dp_mst_handle_down_rep(mgr);
3061 if (esi[1] & DP_UP_REQ_MSG_RDY) {
3062 ret |= drm_dp_mst_handle_up_req(mgr);
3066 drm_dp_mst_kick_tx(mgr);
3069 EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
3072 * drm_dp_mst_detect_port() - get connection status for an MST port
3073 * @connector: DRM connector for this port
3074 * @mgr: manager for this port
3075 * @port: unverified pointer to a port
3077 * This returns the current connection state for a port. It validates the
3078 * port pointer still exists so the caller doesn't require a reference
3080 enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector,
3081 struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
3083 enum drm_connector_status status = connector_status_disconnected;
3085 /* we need to search for the port in the mgr in case it's gone */
3086 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3088 return connector_status_disconnected;
3093 switch (port->pdt) {
3094 case DP_PEER_DEVICE_NONE:
3095 case DP_PEER_DEVICE_MST_BRANCHING:
3098 case DP_PEER_DEVICE_SST_SINK:
3099 status = connector_status_connected;
3100 /* for logical ports - cache the EDID */
3101 if (port->port_num >= 8 && !port->cached_edid) {
3102 port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
3105 case DP_PEER_DEVICE_DP_LEGACY_CONV:
3107 status = connector_status_connected;
3111 drm_dp_mst_topology_put_port(port);
3114 EXPORT_SYMBOL(drm_dp_mst_detect_port);
3117 * drm_dp_mst_port_has_audio() - Check whether port has audio capability or not
3118 * @mgr: manager for this port
3119 * @port: unverified pointer to a port.
3121 * This returns whether the port supports audio or not.
3123 bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
3124 struct drm_dp_mst_port *port)
3128 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3131 ret = port->has_audio;
3132 drm_dp_mst_topology_put_port(port);
3135 EXPORT_SYMBOL(drm_dp_mst_port_has_audio);
3138 * drm_dp_mst_get_edid() - get EDID for an MST port
3139 * @connector: toplevel connector to get EDID for
3140 * @mgr: manager for this port
3141 * @port: unverified pointer to a port.
3143 * This returns an EDID for the port connected to a connector,
3144 * It validates the pointer still exists so the caller doesn't require a
3147 struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
3149 struct edid *edid = NULL;
3151 /* we need to search for the port in the mgr in case it's gone */
3152 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3156 if (port->cached_edid)
3157 edid = drm_edid_duplicate(port->cached_edid);
3159 edid = drm_get_edid(connector, &port->aux.ddc);
3161 port->has_audio = drm_detect_monitor_audio(edid);
3162 drm_dp_mst_topology_put_port(port);
3165 EXPORT_SYMBOL(drm_dp_mst_get_edid);
3168 * drm_dp_find_vcpi_slots() - Find VCPI slots for this PBN value
3169 * @mgr: manager to use
3170 * @pbn: payload bandwidth to convert into slots.
3172 * Calculate the number of VCPI slots that will be required for the given PBN
3173 * value. This function is deprecated, and should not be used in atomic
3177 * The total slots required for this port, or error.
3179 int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
3184 num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
3186 /* max. time slots - one slot for MTP header */
3191 EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
3193 static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
3194 struct drm_dp_vcpi *vcpi, int pbn, int slots)
3198 /* max. time slots - one slot for MTP header */
3203 vcpi->aligned_pbn = slots * mgr->pbn_div;
3204 vcpi->num_slots = slots;
3206 ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
3213 * drm_dp_atomic_find_vcpi_slots() - Find and add VCPI slots to the state
3214 * @state: global atomic state
3215 * @mgr: MST topology manager for the port
3216 * @port: port to find vcpi slots for
3217 * @pbn: bandwidth required for the mode in PBN
3219 * Allocates VCPI slots to @port, replacing any previous VCPI allocations it
3220 * may have had. Any atomic drivers which support MST must call this function
3221 * in their &drm_encoder_helper_funcs.atomic_check() callback to change the
3222 * current VCPI allocation for the new state, but only when
3223 * &drm_crtc_state.mode_changed or &drm_crtc_state.connectors_changed is set
3224 * to ensure compatibility with userspace applications that still use the
3225 * legacy modesetting UAPI.
3227 * Allocations set by this function are not checked against the bandwidth
3228 * restraints of @mgr until the driver calls drm_dp_mst_atomic_check().
3230 * Additionally, it is OK to call this function multiple times on the same
3231 * @port as needed. It is not OK however, to call this function and
3232 * drm_dp_atomic_release_vcpi_slots() in the same atomic check phase.
3235 * drm_dp_atomic_release_vcpi_slots()
3236 * drm_dp_mst_atomic_check()
3239 * Total slots in the atomic state assigned for this port, or a negative error
3240 * code if the port no longer exists
3242 int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
3243 struct drm_dp_mst_topology_mgr *mgr,
3244 struct drm_dp_mst_port *port, int pbn)
3246 struct drm_dp_mst_topology_state *topology_state;
3247 struct drm_dp_vcpi_allocation *pos, *vcpi = NULL;
3248 int prev_slots, req_slots, ret;
3250 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
3251 if (IS_ERR(topology_state))
3252 return PTR_ERR(topology_state);
3254 /* Find the current allocation for this port, if any */
3255 list_for_each_entry(pos, &topology_state->vcpis, next) {
3256 if (pos->port == port) {
3258 prev_slots = vcpi->vcpi;
3261 * This should never happen, unless the driver tries
3262 * releasing and allocating the same VCPI allocation,
3265 if (WARN_ON(!prev_slots)) {
3266 DRM_ERROR("cannot allocate and release VCPI on [MST PORT:%p] in the same state\n",
3277 req_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
3279 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n",
3280 port->connector->base.id, port->connector->name,
3281 port, prev_slots, req_slots);
3283 /* Add the new allocation to the state */
3285 vcpi = kzalloc(sizeof(*vcpi), GFP_KERNEL);
3289 drm_dp_mst_get_port_malloc(port);
3291 list_add(&vcpi->next, &topology_state->vcpis);
3293 vcpi->vcpi = req_slots;
3298 EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
3301 * drm_dp_atomic_release_vcpi_slots() - Release allocated vcpi slots
3302 * @state: global atomic state
3303 * @mgr: MST topology manager for the port
3304 * @port: The port to release the VCPI slots from
3306 * Releases any VCPI slots that have been allocated to a port in the atomic
3307 * state. Any atomic drivers which support MST must call this function in
3308 * their &drm_connector_helper_funcs.atomic_check() callback when the
3309 * connector will no longer have VCPI allocated (e.g. because its CRTC was
3310 * removed) when it had VCPI allocated in the previous atomic state.
3312 * It is OK to call this even if @port has been removed from the system.
3313 * Additionally, it is OK to call this function multiple times on the same
3314 * @port as needed. It is not OK however, to call this function and
3315 * drm_dp_atomic_find_vcpi_slots() on the same @port in a single atomic check
3319 * drm_dp_atomic_find_vcpi_slots()
3320 * drm_dp_mst_atomic_check()
3323 * 0 if all slots for this port were added back to
3324 * &drm_dp_mst_topology_state.avail_slots or negative error code
3326 int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
3327 struct drm_dp_mst_topology_mgr *mgr,
3328 struct drm_dp_mst_port *port)
3330 struct drm_dp_mst_topology_state *topology_state;
3331 struct drm_dp_vcpi_allocation *pos;
3334 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
3335 if (IS_ERR(topology_state))
3336 return PTR_ERR(topology_state);
3338 list_for_each_entry(pos, &topology_state->vcpis, next) {
3339 if (pos->port == port) {
3344 if (WARN_ON(!found)) {
3345 DRM_ERROR("no VCPI for [MST PORT:%p] found in mst state %p\n",
3346 port, &topology_state->base);
3350 DRM_DEBUG_ATOMIC("[MST PORT:%p] VCPI %d -> 0\n", port, pos->vcpi);
3352 drm_dp_mst_put_port_malloc(port);
3358 EXPORT_SYMBOL(drm_dp_atomic_release_vcpi_slots);
3361 * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
3362 * @mgr: manager for this port
3363 * @port: port to allocate a virtual channel for.
3364 * @pbn: payload bandwidth number to request
3365 * @slots: returned number of slots for this PBN.
3367 bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
3368 struct drm_dp_mst_port *port, int pbn, int slots)
3372 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3379 if (port->vcpi.vcpi > 0) {
3380 DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n",
3381 port->vcpi.vcpi, port->vcpi.pbn, pbn);
3382 if (pbn == port->vcpi.pbn) {
3383 drm_dp_mst_topology_put_port(port);
3388 ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots);
3390 DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n",
3391 DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
3394 DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n",
3395 pbn, port->vcpi.num_slots);
3397 /* Keep port allocated until its payload has been removed */
3398 drm_dp_mst_get_port_malloc(port);
3399 drm_dp_mst_topology_put_port(port);
3404 EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
3406 int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
3409 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3413 slots = port->vcpi.num_slots;
3414 drm_dp_mst_topology_put_port(port);
3417 EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
3420 * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
3421 * @mgr: manager for this port
3422 * @port: unverified pointer to a port.
3424 * This just resets the number of slots for the ports VCPI for later programming.
3426 void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
3429 * A port with VCPI will remain allocated until its VCPI is
3430 * released, no verified ref needed
3433 port->vcpi.num_slots = 0;
3435 EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
3438 * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
3439 * @mgr: manager for this port
3440 * @port: port to deallocate vcpi for
3442 * This can be called unconditionally, regardless of whether
3443 * drm_dp_mst_allocate_vcpi() succeeded or not.
3445 void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
3446 struct drm_dp_mst_port *port)
3448 if (!port->vcpi.vcpi)
3451 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
3452 port->vcpi.num_slots = 0;
3454 port->vcpi.aligned_pbn = 0;
3455 port->vcpi.vcpi = 0;
3456 drm_dp_mst_put_port_malloc(port);
3458 EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
3460 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
3461 int id, struct drm_dp_payload *payload)
3463 u8 payload_alloc[3], status;
3467 drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
3468 DP_PAYLOAD_TABLE_UPDATED);
3470 payload_alloc[0] = id;
3471 payload_alloc[1] = payload->start_slot;
3472 payload_alloc[2] = payload->num_slots;
3474 ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
3476 DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
3481 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
3483 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
3487 if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
3490 usleep_range(10000, 20000);
3493 DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
3502 static int do_get_act_status(struct drm_dp_aux *aux)
3507 ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
3515 * drm_dp_check_act_status() - Check ACT handled status.
3516 * @mgr: manager to use
3518 * Check the payload status bits in the DPCD for ACT handled completion.
3520 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
3523 * There doesn't seem to be any recommended retry count or timeout in
3524 * the MST specification. Since some hubs have been observed to take
3525 * over 1 second to update their payload allocations under certain
3526 * conditions, we use a rather large timeout value.
3528 const int timeout_ms = 3000;
3531 ret = readx_poll_timeout(do_get_act_status, mgr->aux, status,
3532 status & DP_PAYLOAD_ACT_HANDLED || status < 0,
3533 200, timeout_ms * USEC_PER_MSEC);
3534 if (ret < 0 && status >= 0) {
3535 DRM_DEBUG_KMS("Failed to get ACT after %dms, last status: %02x\n",
3536 timeout_ms, status);
3538 } else if (status < 0) {
3539 DRM_DEBUG_KMS("Failed to read payload table status: %d\n",
3546 EXPORT_SYMBOL(drm_dp_check_act_status);
3549 * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
3550 * @clock: dot clock for the mode
3551 * @bpp: bpp for the mode.
3553 * This uses the formula in the spec to calculate the PBN value for a mode.
3555 int drm_dp_calc_pbn_mode(int clock, int bpp)
3565 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
3566 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
3567 * common multiplier to render an integer PBN for all link rate/lane
3568 * counts combinations
3570 * peak_kbps *= (1006/1000)
3571 * peak_kbps *= (64/54)
3572 * peak_kbps *= 8 convert to bytes
3575 numerator = 64 * 1006;
3576 denominator = 54 * 8 * 1000 * 1000;
3579 peak_kbps = drm_fixp_from_fraction(kbps, denominator);
3581 return drm_fixp2int_ceil(peak_kbps);
3583 EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
3585 static int test_calc_pbn_mode(void)
3588 ret = drm_dp_calc_pbn_mode(154000, 30);
3590 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
3591 154000, 30, 689, ret);
3594 ret = drm_dp_calc_pbn_mode(234000, 30);
3596 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
3597 234000, 30, 1047, ret);
3600 ret = drm_dp_calc_pbn_mode(297000, 24);
3602 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
3603 297000, 24, 1063, ret);
3609 /* we want to kick the TX after we've ack the up/down IRQs. */
3610 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
3612 queue_work(system_long_wq, &mgr->tx_work);
3615 static void drm_dp_mst_dump_mstb(struct seq_file *m,
3616 struct drm_dp_mst_branch *mstb)
3618 struct drm_dp_mst_port *port;
3619 int tabs = mstb->lct;
3623 for (i = 0; i < tabs; i++)
3627 seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
3628 list_for_each_entry(port, &mstb->ports, next) {
3629 seq_printf(m, "%sport: %d: input: %d: pdt: %d, ddps: %d ldps: %d, sdp: %d/%d, %p, conn: %p\n", prefix, port->port_num, port->input, port->pdt, port->ddps, port->ldps, port->num_sdp_streams, port->num_sdp_stream_sinks, port, port->connector);
3631 drm_dp_mst_dump_mstb(m, port->mstb);
3635 #define DP_PAYLOAD_TABLE_SIZE 64
3637 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
3642 for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) {
3643 if (drm_dp_dpcd_read(mgr->aux,
3644 DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
3651 static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
3652 struct drm_dp_mst_port *port, char *name,
3655 struct edid *mst_edid;
3657 mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
3658 drm_edid_get_monitor_name(mst_edid, name, namelen);
3662 * drm_dp_mst_dump_topology(): dump topology to seq file.
3663 * @m: seq_file to dump output to
3664 * @mgr: manager to dump current topology for.
3666 * helper to dump MST topology to a seq file for debugfs.
3668 void drm_dp_mst_dump_topology(struct seq_file *m,
3669 struct drm_dp_mst_topology_mgr *mgr)
3672 struct drm_dp_mst_port *port;
3674 mutex_lock(&mgr->lock);
3675 if (mgr->mst_primary)
3676 drm_dp_mst_dump_mstb(m, mgr->mst_primary);
3679 mutex_unlock(&mgr->lock);
3681 mutex_lock(&mgr->payload_lock);
3682 seq_printf(m, "vcpi: %lx %lx %d\n", mgr->payload_mask, mgr->vcpi_mask,
3685 for (i = 0; i < mgr->max_payloads; i++) {
3686 if (mgr->proposed_vcpis[i]) {
3689 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
3690 fetch_monitor_name(mgr, port, name, sizeof(name));
3691 seq_printf(m, "vcpi %d: %d %d %d sink name: %s\n", i,
3692 port->port_num, port->vcpi.vcpi,
3693 port->vcpi.num_slots,
3694 (*name != 0) ? name : "Unknown");
3696 seq_printf(m, "vcpi %d:unused\n", i);
3698 for (i = 0; i < mgr->max_payloads; i++) {
3699 seq_printf(m, "payload %d: %d, %d, %d\n",
3701 mgr->payloads[i].payload_state,
3702 mgr->payloads[i].start_slot,
3703 mgr->payloads[i].num_slots);
3707 mutex_unlock(&mgr->payload_lock);
3709 mutex_lock(&mgr->lock);
3710 if (mgr->mst_primary) {
3711 u8 buf[DP_PAYLOAD_TABLE_SIZE];
3714 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
3715 seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
3716 ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
3717 seq_printf(m, "faux/mst: %*ph\n", 2, buf);
3718 ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
3719 seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
3721 /* dump the standard OUI branch header */
3722 ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
3723 seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
3724 for (i = 0x3; i < 0x8 && buf[i]; i++)
3725 seq_printf(m, "%c", buf[i]);
3726 seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
3727 buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
3728 if (dump_dp_payload_table(mgr, buf))
3729 seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf);
3732 mutex_unlock(&mgr->lock);
3735 EXPORT_SYMBOL(drm_dp_mst_dump_topology);
3737 static void drm_dp_tx_work(struct work_struct *work)
3739 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
3741 mutex_lock(&mgr->qlock);
3742 if (!list_empty(&mgr->tx_msg_downq))
3743 process_single_down_tx_qlock(mgr);
3744 mutex_unlock(&mgr->qlock);
3747 static void drm_dp_destroy_connector_work(struct work_struct *work)
3749 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
3750 struct drm_dp_mst_port *port;
3751 bool send_hotplug = false;
3753 * Not a regular list traverse as we have to drop the destroy
3754 * connector lock before destroying the connector, to avoid AB->BA
3755 * ordering between this lock and the config mutex.
3758 mutex_lock(&mgr->destroy_connector_lock);
3759 port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next);
3761 mutex_unlock(&mgr->destroy_connector_lock);
3764 list_del(&port->next);
3765 mutex_unlock(&mgr->destroy_connector_lock);
3767 INIT_LIST_HEAD(&port->next);
3769 mgr->cbs->destroy_connector(mgr, port->connector);
3771 drm_dp_port_teardown_pdt(port, port->pdt);
3772 port->pdt = DP_PEER_DEVICE_NONE;
3774 drm_dp_mst_put_port_malloc(port);
3775 send_hotplug = true;
3778 drm_kms_helper_hotplug_event(mgr->dev);
3781 static struct drm_private_state *
3782 drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
3784 struct drm_dp_mst_topology_state *state, *old_state =
3785 to_dp_mst_topology_state(obj->state);
3786 struct drm_dp_vcpi_allocation *pos, *vcpi;
3788 state = kmemdup(old_state, sizeof(*state), GFP_KERNEL);
3792 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
3794 INIT_LIST_HEAD(&state->vcpis);
3796 list_for_each_entry(pos, &old_state->vcpis, next) {
3797 /* Prune leftover freed VCPI allocations */
3801 vcpi = kmemdup(pos, sizeof(*vcpi), GFP_KERNEL);
3805 drm_dp_mst_get_port_malloc(vcpi->port);
3806 list_add(&vcpi->next, &state->vcpis);
3809 return &state->base;
3812 list_for_each_entry_safe(pos, vcpi, &state->vcpis, next) {
3813 drm_dp_mst_put_port_malloc(pos->port);
3821 static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
3822 struct drm_private_state *state)
3824 struct drm_dp_mst_topology_state *mst_state =
3825 to_dp_mst_topology_state(state);
3826 struct drm_dp_vcpi_allocation *pos, *tmp;
3828 list_for_each_entry_safe(pos, tmp, &mst_state->vcpis, next) {
3829 /* We only keep references to ports with non-zero VCPIs */
3831 drm_dp_mst_put_port_malloc(pos->port);
3839 drm_dp_mst_atomic_check_topology_state(struct drm_dp_mst_topology_mgr *mgr,
3840 struct drm_dp_mst_topology_state *mst_state)
3842 struct drm_dp_vcpi_allocation *vcpi;
3843 int avail_slots = 63, payload_count = 0;
3845 list_for_each_entry(vcpi, &mst_state->vcpis, next) {
3846 /* Releasing VCPI is always OK-even if the port is gone */
3848 DRM_DEBUG_ATOMIC("[MST PORT:%p] releases all VCPI slots\n",
3853 DRM_DEBUG_ATOMIC("[MST PORT:%p] requires %d vcpi slots\n",
3854 vcpi->port, vcpi->vcpi);
3856 avail_slots -= vcpi->vcpi;
3857 if (avail_slots < 0) {
3858 DRM_DEBUG_ATOMIC("[MST PORT:%p] not enough VCPI slots in mst state %p (avail=%d)\n",
3859 vcpi->port, mst_state,
3860 avail_slots + vcpi->vcpi);
3864 if (++payload_count > mgr->max_payloads) {
3865 DRM_DEBUG_ATOMIC("[MST MGR:%p] state %p has too many payloads (max=%d)\n",
3866 mgr, mst_state, mgr->max_payloads);
3870 DRM_DEBUG_ATOMIC("[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n",
3871 mgr, mst_state, avail_slots,
3878 * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
3879 * atomic update is valid
3880 * @state: Pointer to the new &struct drm_dp_mst_topology_state
3882 * Checks the given topology state for an atomic update to ensure that it's
3883 * valid. This includes checking whether there's enough bandwidth to support
3884 * the new VCPI allocations in the atomic update.
3886 * Any atomic drivers supporting DP MST must make sure to call this after
3887 * checking the rest of their state in their
3888 * &drm_mode_config_funcs.atomic_check() callback.
3891 * drm_dp_atomic_find_vcpi_slots()
3892 * drm_dp_atomic_release_vcpi_slots()
3896 * 0 if the new state is valid, negative error code otherwise.
3898 int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
3900 struct drm_dp_mst_topology_mgr *mgr;
3901 struct drm_dp_mst_topology_state *mst_state;
3904 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
3905 ret = drm_dp_mst_atomic_check_topology_state(mgr, mst_state);
3912 EXPORT_SYMBOL(drm_dp_mst_atomic_check);
3914 const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = {
3915 .atomic_duplicate_state = drm_dp_mst_duplicate_state,
3916 .atomic_destroy_state = drm_dp_mst_destroy_state,
3918 EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
3921 * drm_atomic_get_mst_topology_state: get MST topology state
3923 * @state: global atomic state
3924 * @mgr: MST topology manager, also the private object in this case
3926 * This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic
3927 * state vtable so that the private object state returned is that of a MST
3928 * topology object. Also, drm_atomic_get_private_obj_state() expects the caller
3929 * to care of the locking, so warn if don't hold the connection_mutex.
3933 * The MST topology state or error pointer.
3935 struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
3936 struct drm_dp_mst_topology_mgr *mgr)
3938 struct drm_device *dev = mgr->dev;
3940 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
3941 return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base));
3943 EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
3946 * drm_dp_mst_topology_mgr_init - initialise a topology manager
3947 * @mgr: manager struct to initialise
3948 * @dev: device providing this structure - for i2c addition.
3949 * @aux: DP helper aux channel to talk to this device
3950 * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
3951 * @max_payloads: maximum number of payloads this GPU can source
3952 * @conn_base_id: the connector object ID the MST device is connected to.
3954 * Return 0 for success, or negative error code on failure
3956 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
3957 struct drm_device *dev, struct drm_dp_aux *aux,
3958 int max_dpcd_transaction_bytes,
3959 int max_payloads, int conn_base_id)
3961 struct drm_dp_mst_topology_state *mst_state;
3963 mutex_init(&mgr->lock);
3964 mutex_init(&mgr->qlock);
3965 mutex_init(&mgr->payload_lock);
3966 mutex_init(&mgr->destroy_connector_lock);
3967 INIT_LIST_HEAD(&mgr->tx_msg_downq);
3968 INIT_LIST_HEAD(&mgr->destroy_connector_list);
3969 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
3970 INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
3971 INIT_WORK(&mgr->destroy_connector_work, drm_dp_destroy_connector_work);
3972 init_waitqueue_head(&mgr->tx_waitq);
3975 mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
3976 mgr->max_payloads = max_payloads;
3977 mgr->conn_base_id = conn_base_id;
3978 if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 ||
3979 max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8)
3981 mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
3984 mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
3985 if (!mgr->proposed_vcpis)
3987 set_bit(0, &mgr->payload_mask);
3988 if (test_calc_pbn_mode() < 0)
3989 DRM_ERROR("MST PBN self-test failed\n");
3991 mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
3992 if (mst_state == NULL)
3995 mst_state->mgr = mgr;
3996 INIT_LIST_HEAD(&mst_state->vcpis);
3998 drm_atomic_private_obj_init(dev, &mgr->base,
4000 &drm_dp_mst_topology_state_funcs);
4004 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
4007 * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
4008 * @mgr: manager to destroy
4010 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
4012 drm_dp_mst_topology_mgr_set_mst(mgr, false);
4013 flush_work(&mgr->work);
4014 flush_work(&mgr->destroy_connector_work);
4015 mutex_lock(&mgr->payload_lock);
4016 kfree(mgr->payloads);
4017 mgr->payloads = NULL;
4018 kfree(mgr->proposed_vcpis);
4019 mgr->proposed_vcpis = NULL;
4020 mutex_unlock(&mgr->payload_lock);
4023 drm_atomic_private_obj_fini(&mgr->base);
4026 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
4028 static bool remote_i2c_read_ok(const struct i2c_msg msgs[], int num)
4032 if (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)
4035 for (i = 0; i < num - 1; i++) {
4036 if (msgs[i].flags & I2C_M_RD ||
4041 return msgs[num - 1].flags & I2C_M_RD &&
4042 msgs[num - 1].len <= 0xff;
4046 static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
4049 struct drm_dp_aux *aux = adapter->algo_data;
4050 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
4051 struct drm_dp_mst_branch *mstb;
4052 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
4054 struct drm_dp_sideband_msg_req_body msg;
4055 struct drm_dp_sideband_msg_tx *txmsg = NULL;
4058 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
4062 if (!remote_i2c_read_ok(msgs, num)) {
4063 DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
4068 memset(&msg, 0, sizeof(msg));
4069 msg.req_type = DP_REMOTE_I2C_READ;
4070 msg.u.i2c_read.num_transactions = num - 1;
4071 msg.u.i2c_read.port_number = port->port_num;
4072 for (i = 0; i < num - 1; i++) {
4073 msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
4074 msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
4075 msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
4076 msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP);
4078 msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
4079 msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
4081 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
4088 drm_dp_encode_sideband_req(&msg, txmsg);
4090 drm_dp_queue_down_tx(mgr, txmsg);
4092 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
4095 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
4099 if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
4103 memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
4108 drm_dp_mst_topology_put_mstb(mstb);
4112 static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
4114 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
4115 I2C_FUNC_SMBUS_READ_BLOCK_DATA |
4116 I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
4117 I2C_FUNC_10BIT_ADDR;
4120 static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
4121 .functionality = drm_dp_mst_i2c_functionality,
4122 .master_xfer = drm_dp_mst_i2c_xfer,
4126 * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
4127 * @aux: DisplayPort AUX channel
4129 * Returns 0 on success or a negative error code on failure.
4131 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
4133 aux->ddc.algo = &drm_dp_mst_i2c_algo;
4134 aux->ddc.algo_data = aux;
4135 aux->ddc.retries = 3;
4137 aux->ddc.class = I2C_CLASS_DDC;
4138 aux->ddc.owner = THIS_MODULE;
4139 aux->ddc.dev.parent = aux->dev;
4140 aux->ddc.dev.of_node = aux->dev->of_node;
4142 strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
4143 sizeof(aux->ddc.name));
4145 return i2c_add_adapter(&aux->ddc);
4149 * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
4150 * @aux: DisplayPort AUX channel
4152 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
4154 i2c_del_adapter(&aux->ddc);