2 * Copyright © 2014 Red Hat.
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that copyright
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
22 #ifndef _DRM_DP_MST_HELPER_H_
23 #define _DRM_DP_MST_HELPER_H_
25 #include <linux/types.h>
26 #include <drm/display/drm_dp_helper.h>
27 #include <drm/drm_atomic.h>
29 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
30 #include <linux/stackdepot.h>
31 #include <linux/timekeeping.h>
33 enum drm_dp_mst_topology_ref_type {
34 DRM_DP_MST_TOPOLOGY_REF_GET,
35 DRM_DP_MST_TOPOLOGY_REF_PUT,
38 struct drm_dp_mst_topology_ref_history {
39 struct drm_dp_mst_topology_ref_entry {
40 enum drm_dp_mst_topology_ref_type type;
43 depot_stack_handle_t backtrace;
47 #endif /* IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) */
49 struct drm_dp_mst_branch;
52 * struct drm_dp_mst_port - MST port
53 * @port_num: port number
54 * @input: if this port is an input port. Protected by
55 * &drm_dp_mst_topology_mgr.base.lock.
56 * @mcs: message capability status - DP 1.2 spec. Protected by
57 * &drm_dp_mst_topology_mgr.base.lock.
58 * @ddps: DisplayPort Device Plug Status - DP 1.2. Protected by
59 * &drm_dp_mst_topology_mgr.base.lock.
60 * @pdt: Peer Device Type. Protected by
61 * &drm_dp_mst_topology_mgr.base.lock.
62 * @ldps: Legacy Device Plug Status. Protected by
63 * &drm_dp_mst_topology_mgr.base.lock.
64 * @dpcd_rev: DPCD revision of device on this port. Protected by
65 * &drm_dp_mst_topology_mgr.base.lock.
66 * @num_sdp_streams: Number of simultaneous streams. Protected by
67 * &drm_dp_mst_topology_mgr.base.lock.
68 * @num_sdp_stream_sinks: Number of stream sinks. Protected by
69 * &drm_dp_mst_topology_mgr.base.lock.
70 * @full_pbn: Max possible bandwidth for this port. Protected by
71 * &drm_dp_mst_topology_mgr.base.lock.
72 * @next: link to next port on this branch device
73 * @aux: i2c aux transport to talk to device connected to this port, protected
74 * by &drm_dp_mst_topology_mgr.base.lock.
75 * @passthrough_aux: parent aux to which DSC pass-through requests should be
76 * sent, only set if DSC pass-through is possible.
77 * @parent: branch device parent of this port
78 * @vcpi: Virtual Channel Payload info for this port.
79 * @connector: DRM connector this port is connected to. Protected by
80 * &drm_dp_mst_topology_mgr.base.lock.
81 * @mgr: topology manager this port lives under.
83 * This structure represents an MST port endpoint on a device somewhere
84 * in the MST topology.
86 struct drm_dp_mst_port {
88 * @topology_kref: refcount for this port's lifetime in the topology,
89 * only the DP MST helpers should need to touch this
91 struct kref topology_kref;
94 * @malloc_kref: refcount for the memory allocation containing this
95 * structure. See drm_dp_mst_get_port_malloc() and
96 * drm_dp_mst_put_port_malloc().
98 struct kref malloc_kref;
100 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
102 * @topology_ref_history: A history of each topology
103 * reference/dereference. See CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS.
105 struct drm_dp_mst_topology_ref_history topology_ref_history;
116 u8 num_sdp_stream_sinks;
118 struct list_head next;
120 * @mstb: the branch device connected to this port, if there is one.
121 * This should be considered protected for reading by
122 * &drm_dp_mst_topology_mgr.lock. There are two exceptions to this:
123 * &drm_dp_mst_topology_mgr.up_req_work and
124 * &drm_dp_mst_topology_mgr.work, which do not grab
125 * &drm_dp_mst_topology_mgr.lock during reads but are the only
126 * updaters of this list and are protected from writing concurrently
127 * by &drm_dp_mst_topology_mgr.probe_lock.
129 struct drm_dp_mst_branch *mstb;
130 struct drm_dp_aux aux; /* i2c bus for this port? */
131 struct drm_dp_aux *passthrough_aux;
132 struct drm_dp_mst_branch *parent;
134 struct drm_connector *connector;
135 struct drm_dp_mst_topology_mgr *mgr;
138 * @cached_edid: for DP logical ports - make tiling work by ensuring
139 * that the EDID for all connectors is read immediately.
141 const struct drm_edid *cached_edid;
144 * @fec_capable: bool indicating if FEC can be supported up to that
145 * point in the MST topology.
150 /* sideband msg header - not bit struct */
151 struct drm_dp_sideband_msg_hdr {
163 struct drm_dp_sideband_msg_rx {
167 u8 curchunk_idx; /* chunk we are parsing now */
169 u8 curlen; /* total length of the msg */
172 struct drm_dp_sideband_msg_hdr initial_hdr;
176 * struct drm_dp_mst_branch - MST branch device.
177 * @rad: Relative Address to talk to this branch device.
178 * @lct: Link count total to talk to this branch device.
179 * @num_ports: number of ports on the branch.
180 * @port_parent: pointer to the port parent, NULL if toplevel.
181 * @mgr: topology manager for this branch device.
182 * @link_address_sent: if a link address message has been sent to this device yet.
183 * @guid: guid for DP 1.2 branch device. port under this branch can be
184 * identified by port #.
186 * This structure represents an MST branch device, there is one
187 * primary branch device at the root, along with any other branches connected
188 * to downstream port of parent branches.
190 struct drm_dp_mst_branch {
192 * @topology_kref: refcount for this branch device's lifetime in the
193 * topology, only the DP MST helpers should need to touch this
195 struct kref topology_kref;
198 * @malloc_kref: refcount for the memory allocation containing this
199 * structure. See drm_dp_mst_get_mstb_malloc() and
200 * drm_dp_mst_put_mstb_malloc().
202 struct kref malloc_kref;
204 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
206 * @topology_ref_history: A history of each topology
207 * reference/dereference. See CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS.
209 struct drm_dp_mst_topology_ref_history topology_ref_history;
213 * @destroy_next: linked-list entry used by
214 * drm_dp_delayed_destroy_work()
216 struct list_head destroy_next;
223 * @ports: the list of ports on this branch device. This should be
224 * considered protected for reading by &drm_dp_mst_topology_mgr.lock.
225 * There are two exceptions to this:
226 * &drm_dp_mst_topology_mgr.up_req_work and
227 * &drm_dp_mst_topology_mgr.work, which do not grab
228 * &drm_dp_mst_topology_mgr.lock during reads but are the only
229 * updaters of this list and are protected from updating the list
230 * concurrently by @drm_dp_mst_topology_mgr.probe_lock
232 struct list_head ports;
234 struct drm_dp_mst_port *port_parent;
235 struct drm_dp_mst_topology_mgr *mgr;
237 bool link_address_sent;
239 /* global unique identifier to identify branch devices */
244 struct drm_dp_nak_reply {
250 struct drm_dp_link_address_ack_reply {
253 struct drm_dp_link_addr_reply_port {
259 bool legacy_device_plug_status;
263 u8 num_sdp_stream_sinks;
267 struct drm_dp_remote_dpcd_read_ack_reply {
273 struct drm_dp_remote_dpcd_write_ack_reply {
277 struct drm_dp_remote_dpcd_write_nak_reply {
280 u8 bytes_written_before_failure;
283 struct drm_dp_remote_i2c_read_ack_reply {
289 struct drm_dp_remote_i2c_read_nak_reply {
292 u8 i2c_nak_transaction;
295 struct drm_dp_remote_i2c_write_ack_reply {
299 struct drm_dp_query_stream_enc_status_ack_reply {
300 /* Bit[23:16]- Stream Id */
303 /* Bit[15]- Signed */
306 /* Bit[10:8]- Stream Output Sink Type */
307 bool unauthorizable_device_present;
308 bool legacy_device_present;
309 bool query_capable_device_present;
311 /* Bit[12:11]- Stream Output CP Type */
312 bool hdcp_1x_device_present;
313 bool hdcp_2x_device_present;
315 /* Bit[4]- Stream Authentication */
318 /* Bit[3]- Stream Encryption */
319 bool encryption_enabled;
321 /* Bit[2]- Stream Repeater Function Present */
322 bool repeater_present;
324 /* Bit[1:0]- Stream State */
328 #define DRM_DP_MAX_SDP_STREAMS 16
329 struct drm_dp_allocate_payload {
331 u8 number_sdp_streams;
334 u8 sdp_stream_sink[DRM_DP_MAX_SDP_STREAMS];
337 struct drm_dp_allocate_payload_ack_reply {
343 struct drm_dp_connection_status_notify {
346 bool legacy_device_plug_status;
347 bool displayport_device_plug_status;
348 bool message_capability_status;
353 struct drm_dp_remote_dpcd_read {
359 struct drm_dp_remote_dpcd_write {
366 #define DP_REMOTE_I2C_READ_MAX_TRANSACTIONS 4
367 struct drm_dp_remote_i2c_read {
370 struct drm_dp_remote_i2c_read_tx {
375 u8 i2c_transaction_delay;
376 } transactions[DP_REMOTE_I2C_READ_MAX_TRANSACTIONS];
377 u8 read_i2c_device_id;
381 struct drm_dp_remote_i2c_write {
383 u8 write_i2c_device_id;
388 struct drm_dp_query_stream_enc_status {
390 u8 client_id[7]; /* 56-bit nonce */
392 bool valid_stream_event;
394 u8 valid_stream_behavior;
397 /* this covers ENUM_RESOURCES, POWER_DOWN_PHY, POWER_UP_PHY */
398 struct drm_dp_port_number_req {
402 struct drm_dp_enum_path_resources_ack_reply {
405 u16 full_payload_bw_number;
406 u16 avail_payload_bw_number;
409 /* covers POWER_DOWN_PHY, POWER_UP_PHY */
410 struct drm_dp_port_number_rep {
414 struct drm_dp_query_payload {
419 struct drm_dp_resource_status_notify {
425 struct drm_dp_query_payload_ack_reply {
430 struct drm_dp_sideband_msg_req_body {
433 struct drm_dp_connection_status_notify conn_stat;
434 struct drm_dp_port_number_req port_num;
435 struct drm_dp_resource_status_notify resource_stat;
437 struct drm_dp_query_payload query_payload;
438 struct drm_dp_allocate_payload allocate_payload;
440 struct drm_dp_remote_dpcd_read dpcd_read;
441 struct drm_dp_remote_dpcd_write dpcd_write;
443 struct drm_dp_remote_i2c_read i2c_read;
444 struct drm_dp_remote_i2c_write i2c_write;
446 struct drm_dp_query_stream_enc_status enc_status;
450 struct drm_dp_sideband_msg_reply_body {
454 struct drm_dp_nak_reply nak;
455 struct drm_dp_link_address_ack_reply link_addr;
456 struct drm_dp_port_number_rep port_number;
458 struct drm_dp_enum_path_resources_ack_reply path_resources;
459 struct drm_dp_allocate_payload_ack_reply allocate_payload;
460 struct drm_dp_query_payload_ack_reply query_payload;
462 struct drm_dp_remote_dpcd_read_ack_reply remote_dpcd_read_ack;
463 struct drm_dp_remote_dpcd_write_ack_reply remote_dpcd_write_ack;
464 struct drm_dp_remote_dpcd_write_nak_reply remote_dpcd_write_nack;
466 struct drm_dp_remote_i2c_read_ack_reply remote_i2c_read_ack;
467 struct drm_dp_remote_i2c_read_nak_reply remote_i2c_read_nack;
468 struct drm_dp_remote_i2c_write_ack_reply remote_i2c_write_ack;
470 struct drm_dp_query_stream_enc_status_ack_reply enc_status;
474 /* msg is queued to be put into a slot */
475 #define DRM_DP_SIDEBAND_TX_QUEUED 0
476 /* msg has started transmitting on a slot - still on msgq */
477 #define DRM_DP_SIDEBAND_TX_START_SEND 1
478 /* msg has finished transmitting on a slot - removed from msgq only in slot */
479 #define DRM_DP_SIDEBAND_TX_SENT 2
480 /* msg has received a response - removed from slot */
481 #define DRM_DP_SIDEBAND_TX_RX 3
482 #define DRM_DP_SIDEBAND_TX_TIMEOUT 4
484 struct drm_dp_sideband_msg_tx {
489 struct drm_dp_mst_branch *dst;
490 struct list_head next;
494 struct drm_dp_sideband_msg_reply_body reply;
497 /* sideband msg handler */
498 struct drm_dp_mst_topology_mgr;
499 struct drm_dp_mst_topology_cbs {
500 /* create a connector for a port */
501 struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path);
503 * Checks for any pending MST interrupts, passing them to MST core for
504 * processing, the same way an HPD IRQ pulse handler would do this.
505 * If provided MST core calls this callback from a poll-waiting loop
506 * when waiting for MST down message replies. The driver is expected
507 * to guard against a race between this callback and the driver's HPD
510 void (*poll_hpd_irq)(struct drm_dp_mst_topology_mgr *mgr);
513 #define to_dp_mst_topology_state(x) container_of(x, struct drm_dp_mst_topology_state, base)
516 * struct drm_dp_mst_atomic_payload - Atomic state struct for an MST payload
518 * The primary atomic state structure for a given MST payload. Stores information like current
519 * bandwidth allocation, intended action for this payload, etc.
521 struct drm_dp_mst_atomic_payload {
522 /** @port: The MST port assigned to this payload */
523 struct drm_dp_mst_port *port;
526 * @vc_start_slot: The time slot that this payload starts on. Because payload start slots
527 * can't be determined ahead of time, the contents of this value are UNDEFINED at atomic
528 * check time. This shouldn't usually matter, as the start slot should never be relevant for
529 * atomic state computations.
531 * Since this value is determined at commit time instead of check time, this value is
532 * protected by the MST helpers ensuring that async commits operating on the given topology
533 * never run in parallel. In the event that a driver does need to read this value (e.g. to
534 * inform hardware of the starting timeslot for a payload), the driver may either:
536 * * Read this field during the atomic commit after
537 * drm_dp_mst_atomic_wait_for_dependencies() has been called, which will ensure the
538 * previous MST states payload start slots have been copied over to the new state. Note
539 * that a new start slot won't be assigned/removed from this payload until
540 * drm_dp_add_payload_part1()/drm_dp_remove_payload() have been called.
541 * * Acquire the MST modesetting lock, and then wait for any pending MST-related commits to
542 * get committed to hardware by calling drm_crtc_commit_wait() on each of the
543 * &drm_crtc_commit structs in &drm_dp_mst_topology_state.commit_deps.
545 * If neither of the two above solutions suffice (e.g. the driver needs to read the start
546 * slot in the middle of an atomic commit without waiting for some reason), then drivers
547 * should cache this value themselves after changing payloads.
551 /** @vcpi: The Virtual Channel Payload Identifier */
555 * The number of timeslots allocated to this payload from the source DP Tx to
556 * the immediate downstream DP Rx
559 /** @pbn: The payload bandwidth for this payload */
562 /** @delete: Whether or not we intend to delete this payload during this atomic commit */
564 /** @dsc_enabled: Whether or not this payload has DSC enabled */
565 bool dsc_enabled : 1;
567 /** @next: The list node for this payload */
568 struct list_head next;
572 * struct drm_dp_mst_topology_state - DisplayPort MST topology atomic state
574 * This struct represents the atomic state of the toplevel DisplayPort MST manager
576 struct drm_dp_mst_topology_state {
577 /** @base: Base private state for atomic */
578 struct drm_private_state base;
580 /** @mgr: The topology manager */
581 struct drm_dp_mst_topology_mgr *mgr;
584 * @pending_crtc_mask: A bitmask of all CRTCs this topology state touches, drivers may
585 * modify this to add additional dependencies if needed.
587 u32 pending_crtc_mask;
589 * @commit_deps: A list of all CRTC commits affecting this topology, this field isn't
590 * populated until drm_dp_mst_atomic_wait_for_dependencies() is called.
592 struct drm_crtc_commit **commit_deps;
593 /** @num_commit_deps: The number of CRTC commits in @commit_deps */
594 size_t num_commit_deps;
596 /** @payload_mask: A bitmask of allocated VCPIs, used for VCPI assignments */
598 /** @payloads: The list of payloads being created/destroyed in this state */
599 struct list_head payloads;
601 /** @total_avail_slots: The total number of slots this topology can handle (63 or 64) */
602 u8 total_avail_slots;
603 /** @start_slot: The first usable time slot in this topology (1 or 0) */
607 * @pbn_div: The current PBN divisor for this topology. The driver is expected to fill this
613 #define to_dp_mst_topology_mgr(x) container_of(x, struct drm_dp_mst_topology_mgr, base)
616 * struct drm_dp_mst_topology_mgr - DisplayPort MST manager
618 * This struct represents the toplevel displayport MST topology manager.
619 * There should be one instance of this for every MST capable DP connector
622 struct drm_dp_mst_topology_mgr {
624 * @base: Base private object for atomic
626 struct drm_private_obj base;
629 * @dev: device pointer for adding i2c devices etc.
631 struct drm_device *dev;
633 * @cbs: callbacks for connector addition and destruction.
635 const struct drm_dp_mst_topology_cbs *cbs;
637 * @max_dpcd_transaction_bytes: maximum number of bytes to read/write
640 int max_dpcd_transaction_bytes;
642 * @aux: AUX channel for the DP MST connector this topolgy mgr is
645 struct drm_dp_aux *aux;
647 * @max_payloads: maximum number of payloads the GPU can generate.
651 * @conn_base_id: DRM connector ID this mgr is connected to. Only used
652 * to build the MST connector path value.
657 * @up_req_recv: Message receiver state for up requests.
659 struct drm_dp_sideband_msg_rx up_req_recv;
662 * @down_rep_recv: Message receiver state for replies to down
665 struct drm_dp_sideband_msg_rx down_rep_recv;
668 * @lock: protects @mst_state, @mst_primary, @dpcd, and
669 * @payload_id_table_cleared.
674 * @probe_lock: Prevents @work and @up_req_work, the only writers of
675 * &drm_dp_mst_port.mstb and &drm_dp_mst_branch.ports, from racing
676 * while they update the topology.
678 struct mutex probe_lock;
681 * @mst_state: If this manager is enabled for an MST capable port. False
682 * if no MST sink/branch devices is connected.
687 * @payload_id_table_cleared: Whether or not we've cleared the payload
688 * ID table for @mst_primary. Protected by @lock.
690 bool payload_id_table_cleared : 1;
693 * @payload_count: The number of currently active payloads in hardware. This value is only
694 * intended to be used internally by MST helpers for payload tracking, and is only safe to
695 * read/write from the atomic commit (not check) context.
700 * @next_start_slot: The starting timeslot to use for new VC payloads. This value is used
701 * internally by MST helpers for payload tracking, and is only safe to read/write from the
702 * atomic commit (not check) context.
707 * @mst_primary: Pointer to the primary/first branch device.
709 struct drm_dp_mst_branch *mst_primary;
712 * @dpcd: Cache of DPCD for primary port.
714 u8 dpcd[DP_RECEIVER_CAP_SIZE];
716 * @sink_count: Sink count from DEVICE_SERVICE_IRQ_VECTOR_ESI0.
721 * @funcs: Atomic helper callbacks
723 const struct drm_private_state_funcs *funcs;
726 * @qlock: protects @tx_msg_downq and &drm_dp_sideband_msg_tx.state
731 * @tx_msg_downq: List of pending down requests
733 struct list_head tx_msg_downq;
736 * @tx_waitq: Wait to queue stall for the tx worker.
738 wait_queue_head_t tx_waitq;
742 struct work_struct work;
744 * @tx_work: Sideband transmit worker. This can nest within the main
745 * @work worker for each transaction @work launches.
747 struct work_struct tx_work;
750 * @destroy_port_list: List of to be destroyed connectors.
752 struct list_head destroy_port_list;
754 * @destroy_branch_device_list: List of to be destroyed branch
757 struct list_head destroy_branch_device_list;
759 * @delayed_destroy_lock: Protects @destroy_port_list and
760 * @destroy_branch_device_list.
762 struct mutex delayed_destroy_lock;
765 * @delayed_destroy_wq: Workqueue used for delayed_destroy_work items.
766 * A dedicated WQ makes it possible to drain any requeued work items
769 struct workqueue_struct *delayed_destroy_wq;
772 * @delayed_destroy_work: Work item to destroy MST port and branch
773 * devices, needed to avoid locking inversion.
775 struct work_struct delayed_destroy_work;
778 * @up_req_list: List of pending up requests from the topology that
779 * need to be processed, in chronological order.
781 struct list_head up_req_list;
783 * @up_req_lock: Protects @up_req_list
785 struct mutex up_req_lock;
787 * @up_req_work: Work item to process up requests received from the
788 * topology. Needed to avoid blocking hotplug handling and sideband
791 struct work_struct up_req_work;
793 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
795 * @topology_ref_history_lock: protects
796 * &drm_dp_mst_port.topology_ref_history and
797 * &drm_dp_mst_branch.topology_ref_history.
799 struct mutex topology_ref_history_lock;
803 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
804 struct drm_device *dev, struct drm_dp_aux *aux,
805 int max_dpcd_transaction_bytes,
806 int max_payloads, int conn_base_id);
808 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr);
810 bool drm_dp_read_mst_cap(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
811 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state);
813 int drm_dp_mst_hpd_irq_handle_event(struct drm_dp_mst_topology_mgr *mgr,
817 void drm_dp_mst_hpd_irq_send_new_request(struct drm_dp_mst_topology_mgr *mgr);
820 drm_dp_mst_detect_port(struct drm_connector *connector,
821 struct drm_modeset_acquire_ctx *ctx,
822 struct drm_dp_mst_topology_mgr *mgr,
823 struct drm_dp_mst_port *port);
825 const struct drm_edid *drm_dp_mst_edid_read(struct drm_connector *connector,
826 struct drm_dp_mst_topology_mgr *mgr,
827 struct drm_dp_mst_port *port);
828 struct edid *drm_dp_mst_get_edid(struct drm_connector *connector,
829 struct drm_dp_mst_topology_mgr *mgr,
830 struct drm_dp_mst_port *port);
832 int drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr,
833 int link_rate, int link_lane_count);
835 int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc);
837 void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap);
839 int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
840 struct drm_dp_mst_topology_state *mst_state,
841 struct drm_dp_mst_atomic_payload *payload);
842 int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
843 struct drm_atomic_state *state,
844 struct drm_dp_mst_atomic_payload *payload);
845 void drm_dp_remove_payload(struct drm_dp_mst_topology_mgr *mgr,
846 struct drm_dp_mst_topology_state *mst_state,
847 const struct drm_dp_mst_atomic_payload *old_payload,
848 struct drm_dp_mst_atomic_payload *new_payload);
850 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr);
852 void drm_dp_mst_dump_topology(struct seq_file *m,
853 struct drm_dp_mst_topology_mgr *mgr);
855 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr);
857 drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
860 ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
861 unsigned int offset, void *buffer, size_t size);
862 ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
863 unsigned int offset, void *buffer, size_t size);
865 int drm_dp_mst_connector_late_register(struct drm_connector *connector,
866 struct drm_dp_mst_port *port);
867 void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
868 struct drm_dp_mst_port *port);
870 struct drm_dp_mst_topology_state *
871 drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
872 struct drm_dp_mst_topology_mgr *mgr);
873 struct drm_dp_mst_topology_state *
874 drm_atomic_get_old_mst_topology_state(struct drm_atomic_state *state,
875 struct drm_dp_mst_topology_mgr *mgr);
876 struct drm_dp_mst_topology_state *
877 drm_atomic_get_new_mst_topology_state(struct drm_atomic_state *state,
878 struct drm_dp_mst_topology_mgr *mgr);
879 struct drm_dp_mst_atomic_payload *
880 drm_atomic_get_mst_payload_state(struct drm_dp_mst_topology_state *state,
881 struct drm_dp_mst_port *port);
883 drm_dp_atomic_find_time_slots(struct drm_atomic_state *state,
884 struct drm_dp_mst_topology_mgr *mgr,
885 struct drm_dp_mst_port *port, int pbn);
886 int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
887 struct drm_dp_mst_port *port,
888 int pbn, bool enable);
890 drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state,
891 struct drm_dp_mst_topology_mgr *mgr);
893 drm_dp_atomic_release_time_slots(struct drm_atomic_state *state,
894 struct drm_dp_mst_topology_mgr *mgr,
895 struct drm_dp_mst_port *port);
896 void drm_dp_mst_atomic_wait_for_dependencies(struct drm_atomic_state *state);
897 int __must_check drm_dp_mst_atomic_setup_commit(struct drm_atomic_state *state);
898 int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
899 struct drm_dp_mst_port *port, bool power_up);
900 int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
901 struct drm_dp_mst_port *port,
902 struct drm_dp_query_stream_enc_status_ack_reply *status);
903 int __must_check drm_dp_mst_atomic_check(struct drm_atomic_state *state);
904 int __must_check drm_dp_mst_root_conn_atomic_check(struct drm_connector_state *new_conn_state,
905 struct drm_dp_mst_topology_mgr *mgr);
907 void drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port);
908 void drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port);
910 struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port);
912 static inline struct drm_dp_mst_topology_state *
913 to_drm_dp_mst_topology_state(struct drm_private_state *state)
915 return container_of(state, struct drm_dp_mst_topology_state, base);
918 extern const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs;
921 * __drm_dp_mst_state_iter_get - private atomic state iterator function for
923 * @state: &struct drm_atomic_state pointer
924 * @mgr: pointer to the &struct drm_dp_mst_topology_mgr iteration cursor
925 * @old_state: optional pointer to the old &struct drm_dp_mst_topology_state
927 * @new_state: optional pointer to the new &struct drm_dp_mst_topology_state
929 * @i: int iteration cursor, for macro-internal use
931 * Used by for_each_oldnew_mst_mgr_in_state(),
932 * for_each_old_mst_mgr_in_state(), and for_each_new_mst_mgr_in_state(). Don't
933 * call this directly.
936 * True if the current &struct drm_private_obj is a &struct
937 * drm_dp_mst_topology_mgr, false otherwise.
940 __drm_dp_mst_state_iter_get(struct drm_atomic_state *state,
941 struct drm_dp_mst_topology_mgr **mgr,
942 struct drm_dp_mst_topology_state **old_state,
943 struct drm_dp_mst_topology_state **new_state,
946 struct __drm_private_objs_state *objs_state = &state->private_objs[i];
948 if (objs_state->ptr->funcs != &drm_dp_mst_topology_state_funcs)
951 *mgr = to_dp_mst_topology_mgr(objs_state->ptr);
953 *old_state = to_dp_mst_topology_state(objs_state->old_state);
955 *new_state = to_dp_mst_topology_state(objs_state->new_state);
961 * for_each_oldnew_mst_mgr_in_state - iterate over all DP MST topology
962 * managers in an atomic update
963 * @__state: &struct drm_atomic_state pointer
964 * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
965 * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old
967 * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new
969 * @__i: int iteration cursor, for macro-internal use
971 * This iterates over all DRM DP MST topology managers in an atomic update,
972 * tracking both old and new state. This is useful in places where the state
973 * delta needs to be considered, for example in atomic check functions.
975 #define for_each_oldnew_mst_mgr_in_state(__state, mgr, old_state, new_state, __i) \
976 for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
977 for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), &(new_state), (__i)))
980 * for_each_old_mst_mgr_in_state - iterate over all DP MST topology managers
981 * in an atomic update
982 * @__state: &struct drm_atomic_state pointer
983 * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
984 * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old
986 * @__i: int iteration cursor, for macro-internal use
988 * This iterates over all DRM DP MST topology managers in an atomic update,
989 * tracking only the old state. This is useful in disable functions, where we
990 * need the old state the hardware is still in.
992 #define for_each_old_mst_mgr_in_state(__state, mgr, old_state, __i) \
993 for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
994 for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), NULL, (__i)))
997 * for_each_new_mst_mgr_in_state - iterate over all DP MST topology managers
998 * in an atomic update
999 * @__state: &struct drm_atomic_state pointer
1000 * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
1001 * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new
1003 * @__i: int iteration cursor, for macro-internal use
1005 * This iterates over all DRM DP MST topology managers in an atomic update,
1006 * tracking only the new state. This is useful in enable functions, where we
1007 * need the new state the hardware should be in when the atomic commit
1008 * operation has completed.
1010 #define for_each_new_mst_mgr_in_state(__state, mgr, new_state, __i) \
1011 for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
1012 for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), NULL, &(new_state), (__i)))