2 * Copyright 2012-15 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <drm/display/drm_dp_helper.h>
27 #include <drm/display/drm_dp_mst_helper.h>
28 #include <drm/drm_atomic.h>
29 #include <drm/drm_atomic_helper.h>
30 #include "dm_services.h"
32 #include "amdgpu_dm.h"
33 #include "amdgpu_dm_mst_types.h"
35 #ifdef CONFIG_DRM_AMD_DC_HDCP
36 #include "amdgpu_dm_hdcp.h"
40 #include "dm_helpers.h"
42 #include "ddc_service_types.h"
43 #include "dpcd_defs.h"
46 #if defined(CONFIG_DEBUG_FS)
47 #include "amdgpu_dm_debugfs.h"
50 #include "dc/dcn20/dcn20_resource.h"
51 bool is_timing_changed(struct dc_stream_state *cur_stream,
52 struct dc_stream_state *new_stream);
53 #define PEAK_FACTOR_X1000 1006
55 static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
56 struct drm_dp_aux_msg *msg)
59 struct aux_payload payload;
60 enum aux_return_code_type operation_result;
61 struct amdgpu_device *adev;
62 struct ddc_service *ddc;
64 if (WARN_ON(msg->size > 16))
67 payload.address = msg->address;
68 payload.data = msg->buffer;
69 payload.length = msg->size;
70 payload.reply = &msg->reply;
71 payload.i2c_over_aux = (msg->request & DP_AUX_NATIVE_WRITE) == 0;
72 payload.write = (msg->request & DP_AUX_I2C_READ) == 0;
73 payload.mot = (msg->request & DP_AUX_I2C_MOT) != 0;
74 payload.write_status_update =
75 (msg->request & DP_AUX_I2C_WRITE_STATUS_UPDATE) != 0;
76 payload.defer_delay = 0;
78 result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload,
82 * w/a on certain intel platform where hpd is unexpected to pull low during
83 * 1st sideband message transaction by return AUX_RET_ERROR_HPD_DISCON
84 * aux transaction is succuess in such case, therefore bypass the error
86 ddc = TO_DM_AUX(aux)->ddc_service;
87 adev = ddc->ctx->driver_context;
88 if (adev->dm.aux_hpd_discon_quirk) {
89 if (msg->address == DP_SIDEBAND_MSG_DOWN_REQ_BASE &&
90 operation_result == AUX_RET_ERROR_HPD_DISCON) {
92 operation_result = AUX_RET_SUCCESS;
96 if (payload.write && result >= 0)
100 switch (operation_result) {
101 case AUX_RET_SUCCESS:
103 case AUX_RET_ERROR_HPD_DISCON:
104 case AUX_RET_ERROR_UNKNOWN:
105 case AUX_RET_ERROR_INVALID_OPERATION:
106 case AUX_RET_ERROR_PROTOCOL_ERROR:
109 case AUX_RET_ERROR_INVALID_REPLY:
110 case AUX_RET_ERROR_ENGINE_ACQUIRE:
113 case AUX_RET_ERROR_TIMEOUT:
122 dm_dp_mst_connector_destroy(struct drm_connector *connector)
124 struct amdgpu_dm_connector *aconnector =
125 to_amdgpu_dm_connector(connector);
127 if (aconnector->dc_sink) {
128 dc_link_remove_remote_sink(aconnector->dc_link,
129 aconnector->dc_sink);
130 dc_sink_release(aconnector->dc_sink);
133 kfree(aconnector->edid);
135 drm_connector_cleanup(connector);
136 drm_dp_mst_put_port_malloc(aconnector->mst_output_port);
141 amdgpu_dm_mst_connector_late_register(struct drm_connector *connector)
143 struct amdgpu_dm_connector *amdgpu_dm_connector =
144 to_amdgpu_dm_connector(connector);
147 r = drm_dp_mst_connector_late_register(connector,
148 amdgpu_dm_connector->mst_output_port);
152 #if defined(CONFIG_DEBUG_FS)
153 connector_debugfs_init(amdgpu_dm_connector);
160 amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
162 struct amdgpu_dm_connector *aconnector =
163 to_amdgpu_dm_connector(connector);
164 struct drm_dp_mst_port *port = aconnector->mst_output_port;
165 struct amdgpu_dm_connector *root = aconnector->mst_root;
166 struct dc_link *dc_link = aconnector->dc_link;
167 struct dc_sink *dc_sink = aconnector->dc_sink;
169 drm_dp_mst_connector_early_unregister(connector, port);
172 * Release dc_sink for connector which its attached port is
173 * no longer in the mst topology
175 drm_modeset_lock(&root->mst_mgr.base.lock, NULL);
177 if (dc_link->sink_count)
178 dc_link_remove_remote_sink(dc_link, dc_sink);
180 DC_LOG_MST("DM_MST: remove remote sink 0x%p, %d remaining\n",
181 dc_sink, dc_link->sink_count);
183 dc_sink_release(dc_sink);
184 aconnector->dc_sink = NULL;
185 aconnector->edid = NULL;
188 aconnector->mst_status = MST_STATUS_DEFAULT;
189 drm_modeset_unlock(&root->mst_mgr.base.lock);
192 static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
193 .fill_modes = drm_helper_probe_single_connector_modes,
194 .destroy = dm_dp_mst_connector_destroy,
195 .reset = amdgpu_dm_connector_funcs_reset,
196 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
197 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
198 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
199 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
200 .late_register = amdgpu_dm_mst_connector_late_register,
201 .early_unregister = amdgpu_dm_mst_connector_early_unregister,
204 #if defined(CONFIG_DRM_AMD_DC_DCN)
205 bool needs_dsc_aux_workaround(struct dc_link *link)
207 if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
208 (link->dpcd_caps.dpcd_rev.raw == DPCD_REV_14 || link->dpcd_caps.dpcd_rev.raw == DPCD_REV_12) &&
209 link->dpcd_caps.sink_count.bits.SINK_COUNT >= 2)
215 bool is_synaptics_cascaded_panamera(struct dc_link *link, struct drm_dp_mst_port *port)
217 u8 branch_vendor_data[4] = { 0 }; // Vendor data 0x50C ~ 0x50F
219 if (drm_dp_dpcd_read(port->mgr->aux, DP_BRANCH_VENDOR_SPECIFIC_START, &branch_vendor_data, 4) == 4) {
220 if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
221 IS_SYNAPTICS_CASCADED_PANAMERA(link->dpcd_caps.branch_dev_name, branch_vendor_data)) {
222 DRM_INFO("Synaptics Cascaded MST hub\n");
230 static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector)
232 struct dc_sink *dc_sink = aconnector->dc_sink;
233 struct drm_dp_mst_port *port = aconnector->mst_output_port;
234 u8 dsc_caps[16] = { 0 };
235 u8 dsc_branch_dec_caps_raw[3] = { 0 }; // DSC branch decoder caps 0xA0 ~ 0xA2
236 u8 *dsc_branch_dec_caps = NULL;
238 aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port);
241 * drm_dp_mst_dsc_aux_for_port() will return NULL for certain configs
242 * because it only check the dsc/fec caps of the "port variable" and not the dock
244 * This case will return NULL: DSC capabe MST dock connected to a non fec/dsc capable display
246 * Workaround: explicitly check the use case above and use the mst dock's aux as dsc_aux
249 if (!aconnector->dsc_aux && !port->parent->port_parent &&
250 needs_dsc_aux_workaround(aconnector->dc_link))
251 aconnector->dsc_aux = &aconnector->mst_root->dm_dp_aux.aux;
253 /* synaptics cascaded MST hub case */
254 if (!aconnector->dsc_aux && is_synaptics_cascaded_panamera(aconnector->dc_link, port))
255 aconnector->dsc_aux = port->mgr->aux;
257 if (!aconnector->dsc_aux)
260 if (drm_dp_dpcd_read(aconnector->dsc_aux, DP_DSC_SUPPORT, dsc_caps, 16) < 0)
263 if (drm_dp_dpcd_read(aconnector->dsc_aux,
264 DP_DSC_BRANCH_OVERALL_THROUGHPUT_0, dsc_branch_dec_caps_raw, 3) == 3)
265 dsc_branch_dec_caps = dsc_branch_dec_caps_raw;
267 if (!dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
268 dsc_caps, dsc_branch_dec_caps,
269 &dc_sink->dsc_caps.dsc_dec_caps))
275 static bool retrieve_downstream_port_device(struct amdgpu_dm_connector *aconnector)
277 union dp_downstream_port_present ds_port_present;
279 if (!aconnector->dsc_aux)
282 if (drm_dp_dpcd_read(aconnector->dsc_aux, DP_DOWNSTREAMPORT_PRESENT, &ds_port_present, 1) < 0) {
283 DRM_INFO("Failed to read downstream_port_present 0x05 from DFP of branch device\n");
287 aconnector->mst_downstream_port_present = ds_port_present;
288 DRM_INFO("Downstream port present %d, type %d\n",
289 ds_port_present.fields.PORT_PRESENT, ds_port_present.fields.PORT_TYPE);
295 static int dm_dp_mst_get_modes(struct drm_connector *connector)
297 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
301 return drm_add_edid_modes(connector, NULL);
303 if (!aconnector->edid) {
305 edid = drm_dp_mst_get_edid(connector, &aconnector->mst_root->mst_mgr, aconnector->mst_output_port);
308 amdgpu_dm_set_mst_status(&aconnector->mst_status,
309 MST_REMOTE_EDID, false);
311 drm_connector_update_edid_property(
315 DRM_DEBUG_KMS("Can't get EDID of %s. Add default remote sink.", connector->name);
316 if (!aconnector->dc_sink) {
317 struct dc_sink *dc_sink;
318 struct dc_sink_init_data init_params = {
319 .link = aconnector->dc_link,
320 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
322 dc_sink = dc_link_add_remote_sink(
329 DRM_ERROR("Unable to add a remote sink\n");
333 DC_LOG_MST("DM_MST: add remote sink 0x%p, %d remaining\n",
334 dc_sink, aconnector->dc_link->sink_count);
336 dc_sink->priv = aconnector;
337 aconnector->dc_sink = dc_sink;
343 aconnector->edid = edid;
344 amdgpu_dm_set_mst_status(&aconnector->mst_status,
345 MST_REMOTE_EDID, true);
348 if (aconnector->dc_sink && aconnector->dc_sink->sink_signal == SIGNAL_TYPE_VIRTUAL) {
349 dc_sink_release(aconnector->dc_sink);
350 aconnector->dc_sink = NULL;
353 if (!aconnector->dc_sink) {
354 struct dc_sink *dc_sink;
355 struct dc_sink_init_data init_params = {
356 .link = aconnector->dc_link,
357 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
358 dc_sink = dc_link_add_remote_sink(
360 (uint8_t *)aconnector->edid,
361 (aconnector->edid->extensions + 1) * EDID_LENGTH,
365 DRM_ERROR("Unable to add a remote sink\n");
369 DC_LOG_MST("DM_MST: add remote sink 0x%p, %d remaining\n",
370 dc_sink, aconnector->dc_link->sink_count);
372 dc_sink->priv = aconnector;
373 /* dc_link_add_remote_sink returns a new reference */
374 aconnector->dc_sink = dc_sink;
376 /* when display is unplugged from mst hub, connctor will be
377 * destroyed within dm_dp_mst_connector_destroy. connector
378 * hdcp perperties, like type, undesired, desired, enabled,
379 * will be lost. So, save hdcp properties into hdcp_work within
380 * amdgpu_dm_atomic_commit_tail. if the same display is
381 * plugged back with same display index, its hdcp properties
382 * will be retrieved from hdcp_work within dm_dp_mst_get_modes
384 #ifdef CONFIG_DRM_AMD_DC_HDCP
385 if (aconnector->dc_sink && connector->state) {
386 struct drm_device *dev = connector->dev;
387 struct amdgpu_device *adev = drm_to_adev(dev);
388 struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue;
389 struct hdcp_workqueue *hdcp_w = &hdcp_work[aconnector->dc_link->link_index];
391 connector->state->hdcp_content_type =
392 hdcp_w->hdcp_content_type[connector->index];
393 connector->state->content_protection =
394 hdcp_w->content_protection[connector->index];
398 if (aconnector->dc_sink) {
399 amdgpu_dm_update_freesync_caps(
400 connector, aconnector->edid);
402 #if defined(CONFIG_DRM_AMD_DC_DCN)
403 if (!validate_dsc_caps_on_connector(aconnector))
404 memset(&aconnector->dc_sink->dsc_caps,
405 0, sizeof(aconnector->dc_sink->dsc_caps));
407 if (!retrieve_downstream_port_device(aconnector))
408 memset(&aconnector->mst_downstream_port_present,
409 0, sizeof(aconnector->mst_downstream_port_present));
414 drm_connector_update_edid_property(
415 &aconnector->base, aconnector->edid);
417 ret = drm_add_edid_modes(connector, aconnector->edid);
422 static struct drm_encoder *
423 dm_mst_atomic_best_encoder(struct drm_connector *connector,
424 struct drm_atomic_state *state)
426 struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state,
428 struct drm_device *dev = connector->dev;
429 struct amdgpu_device *adev = drm_to_adev(dev);
430 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(connector_state->crtc);
432 return &adev->dm.mst_encoders[acrtc->crtc_id].base;
436 dm_dp_mst_detect(struct drm_connector *connector,
437 struct drm_modeset_acquire_ctx *ctx, bool force)
439 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
440 struct amdgpu_dm_connector *master = aconnector->mst_root;
441 struct drm_dp_mst_port *port = aconnector->mst_output_port;
442 int connection_status;
444 if (drm_connector_is_unregistered(connector))
445 return connector_status_disconnected;
447 connection_status = drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr,
448 aconnector->mst_output_port);
450 if (port->pdt != DP_PEER_DEVICE_NONE && !port->dpcd_rev) {
454 ret = drm_dp_dpcd_readb(&port->aux, DP_DP13_DPCD_REV, &dpcd_rev);
457 port->dpcd_rev = dpcd_rev;
459 /* Could be DP1.2 DP Rx case*/
461 ret = drm_dp_dpcd_readb(&port->aux, DP_DPCD_REV, &dpcd_rev);
464 port->dpcd_rev = dpcd_rev;
468 DRM_DEBUG_KMS("Can't decide DPCD revision number!");
472 * Could be legacy sink, logical port etc on DP1.2.
473 * Will get Nack under these cases when issue remote
477 DRM_DEBUG_KMS("Can't access DPCD");
478 } else if (port->pdt == DP_PEER_DEVICE_NONE) {
483 * Release dc_sink for connector which unplug event is notified by CSN msg
485 if (connection_status == connector_status_disconnected && aconnector->dc_sink) {
486 if (aconnector->dc_link->sink_count)
487 dc_link_remove_remote_sink(aconnector->dc_link, aconnector->dc_sink);
489 DC_LOG_MST("DM_MST: remove remote sink 0x%p, %d remaining\n",
490 aconnector->dc_link, aconnector->dc_link->sink_count);
492 dc_sink_release(aconnector->dc_sink);
493 aconnector->dc_sink = NULL;
494 aconnector->edid = NULL;
496 amdgpu_dm_set_mst_status(&aconnector->mst_status,
497 MST_REMOTE_EDID | MST_ALLOCATE_NEW_PAYLOAD | MST_CLEAR_ALLOCATED_PAYLOAD,
501 return connection_status;
504 static int dm_dp_mst_atomic_check(struct drm_connector *connector,
505 struct drm_atomic_state *state)
507 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
508 struct drm_dp_mst_topology_mgr *mst_mgr = &aconnector->mst_root->mst_mgr;
509 struct drm_dp_mst_port *mst_port = aconnector->mst_output_port;
511 return drm_dp_atomic_release_time_slots(state, mst_mgr, mst_port);
514 static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = {
515 .get_modes = dm_dp_mst_get_modes,
516 .mode_valid = amdgpu_dm_connector_mode_valid,
517 .atomic_best_encoder = dm_mst_atomic_best_encoder,
518 .detect_ctx = dm_dp_mst_detect,
519 .atomic_check = dm_dp_mst_atomic_check,
522 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
524 drm_encoder_cleanup(encoder);
527 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
528 .destroy = amdgpu_dm_encoder_destroy,
532 dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev)
534 struct drm_device *dev = adev_to_drm(adev);
537 for (i = 0; i < adev->dm.display_indexes_num; i++) {
538 struct amdgpu_encoder *amdgpu_encoder = &adev->dm.mst_encoders[i];
539 struct drm_encoder *encoder = &amdgpu_encoder->base;
541 encoder->possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
545 &amdgpu_encoder->base,
546 &amdgpu_dm_encoder_funcs,
547 DRM_MODE_ENCODER_DPMST,
550 drm_encoder_helper_add(encoder, &amdgpu_dm_encoder_helper_funcs);
554 static struct drm_connector *
555 dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
556 struct drm_dp_mst_port *port,
557 const char *pathprop)
559 struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
560 struct drm_device *dev = master->base.dev;
561 struct amdgpu_device *adev = drm_to_adev(dev);
562 struct amdgpu_dm_connector *aconnector;
563 struct drm_connector *connector;
566 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
570 connector = &aconnector->base;
571 aconnector->mst_output_port = port;
572 aconnector->mst_root = master;
573 amdgpu_dm_set_mst_status(&aconnector->mst_status,
576 if (drm_connector_init(
579 &dm_dp_mst_connector_funcs,
580 DRM_MODE_CONNECTOR_DisplayPort)) {
584 drm_connector_helper_add(connector, &dm_dp_mst_connector_helper_funcs);
586 amdgpu_dm_connector_init_helper(
589 DRM_MODE_CONNECTOR_DisplayPort,
591 master->connector_id);
593 for (i = 0; i < adev->dm.display_indexes_num; i++) {
594 drm_connector_attach_encoder(&aconnector->base,
595 &adev->dm.mst_encoders[i].base);
598 connector->max_bpc_property = master->base.max_bpc_property;
599 if (connector->max_bpc_property)
600 drm_connector_attach_max_bpc_property(connector, 8, 16);
602 connector->vrr_capable_property = master->base.vrr_capable_property;
603 if (connector->vrr_capable_property)
604 drm_connector_attach_vrr_capable_property(connector);
606 drm_object_attach_property(
608 dev->mode_config.path_property,
610 drm_object_attach_property(
612 dev->mode_config.tile_property,
615 drm_connector_set_path_property(connector, pathprop);
618 * Initialize connector state before adding the connectror to drm and
621 amdgpu_dm_connector_funcs_reset(connector);
623 drm_dp_mst_get_port_malloc(port);
628 static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
629 .add_connector = dm_dp_add_mst_connector,
632 void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
633 struct amdgpu_dm_connector *aconnector,
636 struct dc_link_settings max_link_enc_cap = {0};
638 aconnector->dm_dp_aux.aux.name =
639 kasprintf(GFP_KERNEL, "AMDGPU DM aux hw bus %d",
641 aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer;
642 aconnector->dm_dp_aux.aux.drm_dev = dm->ddev;
643 aconnector->dm_dp_aux.ddc_service = aconnector->dc_link->ddc;
645 drm_dp_aux_init(&aconnector->dm_dp_aux.aux);
646 drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux,
649 if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
652 dc_link_dp_get_max_link_enc_cap(aconnector->dc_link, &max_link_enc_cap);
653 aconnector->mst_mgr.cbs = &dm_mst_cbs;
654 drm_dp_mst_topology_mgr_init(&aconnector->mst_mgr, adev_to_drm(dm->adev),
655 &aconnector->dm_dp_aux.aux, 16, 4, aconnector->connector_id);
657 drm_connector_attach_dp_subconnector_property(&aconnector->base);
660 int dm_mst_get_pbn_divider(struct dc_link *link)
665 return dc_link_bandwidth_kbps(link,
666 dc_link_get_link_cap(link)) / (8 * 1000 * 54);
669 #if defined(CONFIG_DRM_AMD_DC_DCN)
671 struct dsc_mst_fairness_params {
672 struct dc_crtc_timing *timing;
673 struct dc_sink *sink;
674 struct dc_dsc_bw_range bw_range;
675 bool compression_possible;
676 struct drm_dp_mst_port *port;
677 enum dsc_clock_force_state clock_force_enable;
678 uint32_t num_slices_h;
679 uint32_t num_slices_v;
680 uint32_t bpp_overwrite;
681 struct amdgpu_dm_connector *aconnector;
684 static int kbps_to_peak_pbn(int kbps)
686 u64 peak_kbps = kbps;
689 peak_kbps = div_u64(peak_kbps, 1000);
690 return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000));
693 static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params,
694 struct dsc_mst_fairness_vars *vars,
698 struct drm_connector *drm_connector;
701 for (i = 0; i < count; i++) {
702 drm_connector = ¶ms[i].aconnector->base;
704 memset(¶ms[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg));
705 if (vars[i + k].dsc_enabled && dc_dsc_compute_config(
706 params[i].sink->ctx->dc->res_pool->dscs[0],
707 ¶ms[i].sink->dsc_caps.dsc_dec_caps,
708 params[i].sink->ctx->dc->debug.dsc_min_slice_height_override,
709 drm_connector->display_info.max_dsc_bpp,
712 ¶ms[i].timing->dsc_cfg)) {
713 params[i].timing->flags.DSC = 1;
715 if (params[i].bpp_overwrite)
716 params[i].timing->dsc_cfg.bits_per_pixel = params[i].bpp_overwrite;
718 params[i].timing->dsc_cfg.bits_per_pixel = vars[i + k].bpp_x16;
720 if (params[i].num_slices_h)
721 params[i].timing->dsc_cfg.num_slices_h = params[i].num_slices_h;
723 if (params[i].num_slices_v)
724 params[i].timing->dsc_cfg.num_slices_v = params[i].num_slices_v;
726 params[i].timing->flags.DSC = 0;
728 params[i].timing->dsc_cfg.mst_pbn = vars[i + k].pbn;
731 for (i = 0; i < count; i++) {
732 if (params[i].sink) {
733 if (params[i].sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
734 params[i].sink->sink_signal != SIGNAL_TYPE_NONE)
735 DRM_DEBUG_DRIVER("%s i=%d dispname=%s\n", __func__, i,
736 params[i].sink->edid_caps.display_name);
739 DRM_DEBUG_DRIVER("dsc=%d bits_per_pixel=%d pbn=%d\n",
740 params[i].timing->flags.DSC,
741 params[i].timing->dsc_cfg.bits_per_pixel,
746 static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
748 struct dc_dsc_config dsc_config;
751 struct drm_connector *drm_connector = ¶m.aconnector->base;
752 uint32_t max_dsc_target_bpp_limit_override =
753 drm_connector->display_info.max_dsc_bpp;
755 kbps = div_u64((u64)pbn * 994 * 8 * 54, 64);
756 dc_dsc_compute_config(
757 param.sink->ctx->dc->res_pool->dscs[0],
758 ¶m.sink->dsc_caps.dsc_dec_caps,
759 param.sink->ctx->dc->debug.dsc_min_slice_height_override,
760 max_dsc_target_bpp_limit_override,
761 (int) kbps, param.timing, &dsc_config);
763 return dsc_config.bits_per_pixel;
766 static int increase_dsc_bpp(struct drm_atomic_state *state,
767 struct drm_dp_mst_topology_state *mst_state,
768 struct dc_link *dc_link,
769 struct dsc_mst_fairness_params *params,
770 struct dsc_mst_fairness_vars *vars,
775 bool bpp_increased[MAX_PIPES];
776 int initial_slack[MAX_PIPES];
777 int min_initial_slack;
779 int remaining_to_increase = 0;
780 int link_timeslots_used;
784 for (i = 0; i < count; i++) {
785 if (vars[i + k].dsc_enabled) {
787 kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i + k].pbn;
788 bpp_increased[i] = false;
789 remaining_to_increase += 1;
791 initial_slack[i] = 0;
792 bpp_increased[i] = true;
796 while (remaining_to_increase) {
798 min_initial_slack = -1;
799 for (i = 0; i < count; i++) {
800 if (!bpp_increased[i]) {
801 if (min_initial_slack == -1 || min_initial_slack > initial_slack[i]) {
802 min_initial_slack = initial_slack[i];
808 if (next_index == -1)
811 link_timeslots_used = 0;
813 for (i = 0; i < count; i++)
814 link_timeslots_used += DIV_ROUND_UP(vars[i + k].pbn, mst_state->pbn_div);
817 (63 - link_timeslots_used) / remaining_to_increase * mst_state->pbn_div;
819 if (initial_slack[next_index] > fair_pbn_alloc) {
820 vars[next_index].pbn += fair_pbn_alloc;
821 ret = drm_dp_atomic_find_time_slots(state,
822 params[next_index].port->mgr,
823 params[next_index].port,
824 vars[next_index].pbn);
828 ret = drm_dp_mst_atomic_check(state);
830 vars[next_index].bpp_x16 = bpp_x16_from_pbn(params[next_index], vars[next_index].pbn);
832 vars[next_index].pbn -= fair_pbn_alloc;
833 ret = drm_dp_atomic_find_time_slots(state,
834 params[next_index].port->mgr,
835 params[next_index].port,
836 vars[next_index].pbn);
841 vars[next_index].pbn += initial_slack[next_index];
842 ret = drm_dp_atomic_find_time_slots(state,
843 params[next_index].port->mgr,
844 params[next_index].port,
845 vars[next_index].pbn);
849 ret = drm_dp_mst_atomic_check(state);
851 vars[next_index].bpp_x16 = params[next_index].bw_range.max_target_bpp_x16;
853 vars[next_index].pbn -= initial_slack[next_index];
854 ret = drm_dp_atomic_find_time_slots(state,
855 params[next_index].port->mgr,
856 params[next_index].port,
857 vars[next_index].pbn);
863 bpp_increased[next_index] = true;
864 remaining_to_increase--;
869 static int try_disable_dsc(struct drm_atomic_state *state,
870 struct dc_link *dc_link,
871 struct dsc_mst_fairness_params *params,
872 struct dsc_mst_fairness_vars *vars,
877 bool tried[MAX_PIPES];
878 int kbps_increase[MAX_PIPES];
879 int max_kbps_increase;
881 int remaining_to_try = 0;
884 for (i = 0; i < count; i++) {
885 if (vars[i + k].dsc_enabled
886 && vars[i + k].bpp_x16 == params[i].bw_range.max_target_bpp_x16
887 && params[i].clock_force_enable == DSC_CLK_FORCE_DEFAULT) {
888 kbps_increase[i] = params[i].bw_range.stream_kbps - params[i].bw_range.max_kbps;
890 remaining_to_try += 1;
892 kbps_increase[i] = 0;
897 while (remaining_to_try) {
899 max_kbps_increase = -1;
900 for (i = 0; i < count; i++) {
902 if (max_kbps_increase == -1 || max_kbps_increase < kbps_increase[i]) {
903 max_kbps_increase = kbps_increase[i];
909 if (next_index == -1)
912 vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps);
913 ret = drm_dp_atomic_find_time_slots(state,
914 params[next_index].port->mgr,
915 params[next_index].port,
916 vars[next_index].pbn);
920 ret = drm_dp_mst_atomic_check(state);
922 vars[next_index].dsc_enabled = false;
923 vars[next_index].bpp_x16 = 0;
925 vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps);
926 ret = drm_dp_atomic_find_time_slots(state,
927 params[next_index].port->mgr,
928 params[next_index].port,
929 vars[next_index].pbn);
934 tried[next_index] = true;
940 static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
941 struct dc_state *dc_state,
942 struct dc_link *dc_link,
943 struct dsc_mst_fairness_vars *vars,
944 struct drm_dp_mst_topology_mgr *mgr,
945 int *link_vars_start_index)
947 struct dc_stream_state *stream;
948 struct dsc_mst_fairness_params params[MAX_PIPES];
949 struct amdgpu_dm_connector *aconnector;
950 struct drm_dp_mst_topology_state *mst_state = drm_atomic_get_mst_topology_state(state, mgr);
953 bool debugfs_overwrite = false;
955 memset(params, 0, sizeof(params));
957 if (IS_ERR(mst_state))
958 return PTR_ERR(mst_state);
961 for (i = 0; i < dc_state->stream_count; i++) {
962 struct dc_dsc_policy dsc_policy = {0};
964 stream = dc_state->streams[i];
966 if (stream->link != dc_link)
969 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
973 if (!aconnector->mst_output_port)
976 stream->timing.flags.DSC = 0;
978 params[count].timing = &stream->timing;
979 params[count].sink = stream->sink;
980 params[count].aconnector = aconnector;
981 params[count].port = aconnector->mst_output_port;
982 params[count].clock_force_enable = aconnector->dsc_settings.dsc_force_enable;
983 if (params[count].clock_force_enable == DSC_CLK_FORCE_ENABLE)
984 debugfs_overwrite = true;
985 params[count].num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
986 params[count].num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
987 params[count].bpp_overwrite = aconnector->dsc_settings.dsc_bits_per_pixel;
988 params[count].compression_possible = stream->sink->dsc_caps.dsc_dec_caps.is_dsc_supported;
989 dc_dsc_get_policy_for_timing(params[count].timing, 0, &dsc_policy);
990 if (!dc_dsc_compute_bandwidth_range(
991 stream->sink->ctx->dc->res_pool->dscs[0],
992 stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
993 dsc_policy.min_target_bpp * 16,
994 dsc_policy.max_target_bpp * 16,
995 &stream->sink->dsc_caps.dsc_dec_caps,
996 &stream->timing, ¶ms[count].bw_range))
997 params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
1007 /* k is start index of vars for current phy link used by mst hub */
1008 k = *link_vars_start_index;
1009 /* set vars start index for next mst hub phy link */
1010 *link_vars_start_index += count;
1012 /* Try no compression */
1013 for (i = 0; i < count; i++) {
1014 vars[i + k].aconnector = params[i].aconnector;
1015 vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
1016 vars[i + k].dsc_enabled = false;
1017 vars[i + k].bpp_x16 = 0;
1018 ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port,
1023 ret = drm_dp_mst_atomic_check(state);
1024 if (ret == 0 && !debugfs_overwrite) {
1025 set_dsc_configs_from_fairness_vars(params, vars, count, k);
1027 } else if (ret != -ENOSPC) {
1031 /* Try max compression */
1032 for (i = 0; i < count; i++) {
1033 if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) {
1034 vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps);
1035 vars[i + k].dsc_enabled = true;
1036 vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
1037 ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
1038 params[i].port, vars[i + k].pbn);
1042 vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
1043 vars[i + k].dsc_enabled = false;
1044 vars[i + k].bpp_x16 = 0;
1045 ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
1046 params[i].port, vars[i + k].pbn);
1051 ret = drm_dp_mst_atomic_check(state);
1055 /* Optimize degree of compression */
1056 ret = increase_dsc_bpp(state, mst_state, dc_link, params, vars, count, k);
1060 ret = try_disable_dsc(state, dc_link, params, vars, count, k);
1064 set_dsc_configs_from_fairness_vars(params, vars, count, k);
1069 static bool is_dsc_need_re_compute(
1070 struct drm_atomic_state *state,
1071 struct dc_state *dc_state,
1072 struct dc_link *dc_link)
1075 bool is_dsc_need_re_compute = false;
1076 struct amdgpu_dm_connector *stream_on_link[MAX_PIPES];
1077 int new_stream_on_link_num = 0;
1078 struct amdgpu_dm_connector *aconnector;
1079 struct dc_stream_state *stream;
1080 const struct dc *dc = dc_link->dc;
1082 /* only check phy used by dsc mst branch */
1083 if (dc_link->type != dc_connection_mst_branch)
1086 if (!(dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT ||
1087 dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT))
1090 for (i = 0; i < MAX_PIPES; i++)
1091 stream_on_link[i] = NULL;
1093 /* check if there is mode change in new request */
1094 for (i = 0; i < dc_state->stream_count; i++) {
1095 struct drm_crtc_state *new_crtc_state;
1096 struct drm_connector_state *new_conn_state;
1098 stream = dc_state->streams[i];
1102 /* check if stream using the same link for mst */
1103 if (stream->link != dc_link)
1106 aconnector = (struct amdgpu_dm_connector *) stream->dm_stream_context;
1110 stream_on_link[new_stream_on_link_num] = aconnector;
1111 new_stream_on_link_num++;
1113 new_conn_state = drm_atomic_get_new_connector_state(state, &aconnector->base);
1114 if (!new_conn_state)
1117 if (IS_ERR(new_conn_state))
1120 if (!new_conn_state->crtc)
1123 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
1124 if (!new_crtc_state)
1127 if (IS_ERR(new_crtc_state))
1130 if (new_crtc_state->enable && new_crtc_state->active) {
1131 if (new_crtc_state->mode_changed || new_crtc_state->active_changed ||
1132 new_crtc_state->connectors_changed)
1137 /* check current_state if there stream on link but it is not in
1140 for (i = 0; i < dc->current_state->stream_count; i++) {
1141 stream = dc->current_state->streams[i];
1142 /* only check stream on the mst hub */
1143 if (stream->link != dc_link)
1146 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
1150 for (j = 0; j < new_stream_on_link_num; j++) {
1151 if (stream_on_link[j]) {
1152 if (aconnector == stream_on_link[j])
1157 if (j == new_stream_on_link_num) {
1158 /* not in new state */
1159 is_dsc_need_re_compute = true;
1164 return is_dsc_need_re_compute;
1167 int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
1168 struct dc_state *dc_state,
1169 struct dsc_mst_fairness_vars *vars)
1172 struct dc_stream_state *stream;
1173 bool computed_streams[MAX_PIPES];
1174 struct amdgpu_dm_connector *aconnector;
1175 struct drm_dp_mst_topology_mgr *mst_mgr;
1176 int link_vars_start_index = 0;
1179 for (i = 0; i < dc_state->stream_count; i++)
1180 computed_streams[i] = false;
1182 for (i = 0; i < dc_state->stream_count; i++) {
1183 stream = dc_state->streams[i];
1185 if (stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST)
1188 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
1190 if (!aconnector || !aconnector->dc_sink || !aconnector->mst_output_port)
1193 if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported)
1196 if (computed_streams[i])
1199 if (dcn20_remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK)
1202 if (!is_dsc_need_re_compute(state, dc_state, stream->link))
1205 mst_mgr = aconnector->mst_output_port->mgr;
1206 ret = compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, mst_mgr,
1207 &link_vars_start_index);
1211 for (j = 0; j < dc_state->stream_count; j++) {
1212 if (dc_state->streams[j]->link == stream->link)
1213 computed_streams[j] = true;
1217 for (i = 0; i < dc_state->stream_count; i++) {
1218 stream = dc_state->streams[i];
1220 if (stream->timing.flags.DSC == 1)
1221 if (dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream) != DC_OK)
1228 static int pre_compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
1229 struct dc_state *dc_state,
1230 struct dsc_mst_fairness_vars *vars)
1233 struct dc_stream_state *stream;
1234 bool computed_streams[MAX_PIPES];
1235 struct amdgpu_dm_connector *aconnector;
1236 struct drm_dp_mst_topology_mgr *mst_mgr;
1237 int link_vars_start_index = 0;
1240 for (i = 0; i < dc_state->stream_count; i++)
1241 computed_streams[i] = false;
1243 for (i = 0; i < dc_state->stream_count; i++) {
1244 stream = dc_state->streams[i];
1246 if (stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST)
1249 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
1251 if (!aconnector || !aconnector->dc_sink || !aconnector->mst_output_port)
1254 if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported)
1257 if (computed_streams[i])
1260 if (!is_dsc_need_re_compute(state, dc_state, stream->link))
1263 mst_mgr = aconnector->mst_output_port->mgr;
1264 ret = compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, mst_mgr,
1265 &link_vars_start_index);
1269 for (j = 0; j < dc_state->stream_count; j++) {
1270 if (dc_state->streams[j]->link == stream->link)
1271 computed_streams[j] = true;
1278 static int find_crtc_index_in_state_by_stream(struct drm_atomic_state *state,
1279 struct dc_stream_state *stream)
1282 struct drm_crtc *crtc;
1283 struct drm_crtc_state *new_state, *old_state;
1285 for_each_oldnew_crtc_in_state(state, crtc, old_state, new_state, i) {
1286 struct dm_crtc_state *dm_state = to_dm_crtc_state(new_state);
1288 if (dm_state->stream == stream)
1294 static bool is_link_to_dschub(struct dc_link *dc_link)
1296 union dpcd_dsc_basic_capabilities *dsc_caps =
1297 &dc_link->dpcd_caps.dsc_caps.dsc_basic_caps;
1299 /* only check phy used by dsc mst branch */
1300 if (dc_link->type != dc_connection_mst_branch)
1303 if (!(dsc_caps->fields.dsc_support.DSC_SUPPORT ||
1304 dsc_caps->fields.dsc_support.DSC_PASSTHROUGH_SUPPORT))
1309 static bool is_dsc_precompute_needed(struct drm_atomic_state *state)
1312 struct drm_crtc *crtc;
1313 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1316 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
1317 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(new_crtc_state);
1319 if (!amdgpu_dm_find_first_crtc_matching_connector(state, crtc)) {
1323 if (dm_crtc_state->stream && dm_crtc_state->stream->link)
1324 if (is_link_to_dschub(dm_crtc_state->stream->link))
1330 int pre_validate_dsc(struct drm_atomic_state *state,
1331 struct dm_atomic_state **dm_state_ptr,
1332 struct dsc_mst_fairness_vars *vars)
1335 struct dm_atomic_state *dm_state;
1336 struct dc_state *local_dc_state = NULL;
1339 if (!is_dsc_precompute_needed(state)) {
1340 DRM_INFO_ONCE("DSC precompute is not needed.\n");
1343 ret = dm_atomic_get_state(state, dm_state_ptr);
1345 DRM_INFO_ONCE("dm_atomic_get_state() failed\n");
1348 dm_state = *dm_state_ptr;
1351 * create local vailable for dc_state. copy content of streams of dm_state->context
1352 * to local variable. make sure stream pointer of local variable not the same as stream
1353 * from dm_state->context.
1356 local_dc_state = kmemdup(dm_state->context, sizeof(struct dc_state), GFP_KERNEL);
1357 if (!local_dc_state)
1360 for (i = 0; i < local_dc_state->stream_count; i++) {
1361 struct dc_stream_state *stream = dm_state->context->streams[i];
1362 int ind = find_crtc_index_in_state_by_stream(state, stream);
1365 struct amdgpu_dm_connector *aconnector;
1366 struct drm_connector_state *drm_new_conn_state;
1367 struct dm_connector_state *dm_new_conn_state;
1368 struct dm_crtc_state *dm_old_crtc_state;
1371 amdgpu_dm_find_first_crtc_matching_connector(state,
1372 state->crtcs[ind].ptr);
1373 drm_new_conn_state =
1374 drm_atomic_get_new_connector_state(state,
1376 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
1377 dm_old_crtc_state = to_dm_crtc_state(state->crtcs[ind].old_state);
1379 local_dc_state->streams[i] =
1380 create_validate_stream_for_sink(aconnector,
1381 &state->crtcs[ind].new_state->mode,
1383 dm_old_crtc_state->stream);
1384 if (local_dc_state->streams[i] == NULL) {
1394 ret = pre_compute_mst_dsc_configs_for_state(state, local_dc_state, vars);
1396 DRM_INFO_ONCE("pre_compute_mst_dsc_configs_for_state() failed\n");
1401 * compare local_streams -> timing with dm_state->context,
1402 * if the same set crtc_state->mode-change = 0;
1404 for (i = 0; i < local_dc_state->stream_count; i++) {
1405 struct dc_stream_state *stream = dm_state->context->streams[i];
1407 if (local_dc_state->streams[i] &&
1408 is_timing_changed(stream, local_dc_state->streams[i])) {
1409 DRM_INFO_ONCE("crtc[%d] needs mode_changed\n", i);
1411 int ind = find_crtc_index_in_state_by_stream(state, stream);
1414 state->crtcs[ind].new_state->mode_changed = 0;
1418 for (i = 0; i < local_dc_state->stream_count; i++) {
1419 struct dc_stream_state *stream = dm_state->context->streams[i];
1421 if (local_dc_state->streams[i] != stream)
1422 dc_stream_release(local_dc_state->streams[i]);
1425 kfree(local_dc_state);
1430 static unsigned int kbps_from_pbn(unsigned int pbn)
1432 unsigned int kbps = pbn;
1434 kbps *= (1000000 / PEAK_FACTOR_X1000);
1442 static bool is_dsc_common_config_possible(struct dc_stream_state *stream,
1443 struct dc_dsc_bw_range *bw_range)
1445 struct dc_dsc_policy dsc_policy = {0};
1447 dc_dsc_get_policy_for_timing(&stream->timing, 0, &dsc_policy);
1448 dc_dsc_compute_bandwidth_range(stream->sink->ctx->dc->res_pool->dscs[0],
1449 stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
1450 dsc_policy.min_target_bpp * 16,
1451 dsc_policy.max_target_bpp * 16,
1452 &stream->sink->dsc_caps.dsc_dec_caps,
1453 &stream->timing, bw_range);
1455 return bw_range->max_target_bpp_x16 && bw_range->min_target_bpp_x16;
1457 #endif /* CONFIG_DRM_AMD_DC_DCN */
1459 enum dc_status dm_dp_mst_is_port_support_mode(
1460 struct amdgpu_dm_connector *aconnector,
1461 struct dc_stream_state *stream)
1463 int bpp, pbn, branch_max_throughput_mps = 0;
1464 #if defined(CONFIG_DRM_AMD_DC_DCN)
1465 struct dc_link_settings cur_link_settings;
1466 unsigned int end_to_end_bw_in_kbps = 0;
1467 unsigned int upper_link_bw_in_kbps = 0, down_link_bw_in_kbps = 0;
1468 unsigned int max_compressed_bw_in_kbps = 0;
1469 struct dc_dsc_bw_range bw_range = {0};
1470 struct drm_dp_mst_topology_mgr *mst_mgr;
1473 * check if the mode could be supported if DSC pass-through is supported
1474 * AND check if there enough bandwidth available to support the mode
1477 if (is_dsc_common_config_possible(stream, &bw_range) &&
1478 aconnector->mst_output_port->passthrough_aux) {
1479 mst_mgr = aconnector->mst_output_port->mgr;
1480 mutex_lock(&mst_mgr->lock);
1482 cur_link_settings = stream->link->verified_link_cap;
1484 upper_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
1487 down_link_bw_in_kbps = kbps_from_pbn(aconnector->mst_output_port->full_pbn);
1489 /* pick the bottleneck */
1490 end_to_end_bw_in_kbps = min(upper_link_bw_in_kbps,
1491 down_link_bw_in_kbps);
1493 mutex_unlock(&mst_mgr->lock);
1496 * use the maximum dsc compression bandwidth as the required
1497 * bandwidth for the mode
1499 max_compressed_bw_in_kbps = bw_range.min_kbps;
1501 if (end_to_end_bw_in_kbps < max_compressed_bw_in_kbps) {
1502 DRM_DEBUG_DRIVER("Mode does not fit into DSC pass-through bandwidth validation\n");
1503 return DC_FAIL_BANDWIDTH_VALIDATE;
1507 /* check if mode could be supported within full_pbn */
1508 bpp = convert_dc_color_depth_into_bpc(stream->timing.display_color_depth) * 3;
1509 pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp, false);
1511 if (pbn > aconnector->mst_output_port->full_pbn)
1512 return DC_FAIL_BANDWIDTH_VALIDATE;
1513 #if defined(CONFIG_DRM_AMD_DC_DCN)
1517 /* check is mst dsc output bandwidth branch_overall_throughput_0_mps */
1518 switch (stream->timing.pixel_encoding) {
1519 case PIXEL_ENCODING_RGB:
1520 case PIXEL_ENCODING_YCBCR444:
1521 branch_max_throughput_mps =
1522 aconnector->dc_sink->dsc_caps.dsc_dec_caps.branch_overall_throughput_0_mps;
1524 case PIXEL_ENCODING_YCBCR422:
1525 case PIXEL_ENCODING_YCBCR420:
1526 branch_max_throughput_mps =
1527 aconnector->dc_sink->dsc_caps.dsc_dec_caps.branch_overall_throughput_1_mps;
1533 if (branch_max_throughput_mps != 0 &&
1534 ((stream->timing.pix_clk_100hz / 10) > branch_max_throughput_mps * 1000))
1535 return DC_FAIL_BANDWIDTH_VALIDATE;