Merge tag 'drm-misc-next-2020-02-10' of git://anongit.freedesktop.org/drm/drm-misc...
[platform/kernel/linux-rpi.git] / drivers / gpu / drm / drm_dp_mst_topology.c
index 4104f15..a811247 100644 (file)
@@ -398,7 +398,7 @@ drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
                        memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
                        idx += req->u.i2c_read.transactions[i].num_bytes;
 
-                       buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5;
+                       buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 4;
                        buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
                        idx++;
                }
@@ -853,6 +853,7 @@ static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband
 {
        int idx = 1;
        repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
+       repmsg->u.path_resources.fec_capable = raw->msg[idx] & 0x1;
        idx++;
        if (idx > raw->curlen)
                goto fail_len;
@@ -1208,6 +1209,8 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
                    txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
                        mstb->tx_slots[txmsg->seqno] = NULL;
                }
+               mgr->is_waiting_for_dwn_reply = false;
+
        }
 out:
        if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
@@ -1217,6 +1220,7 @@ out:
        }
        mutex_unlock(&mgr->qlock);
 
+       drm_dp_mst_kick_tx(mgr);
        return ret;
 }
 
@@ -1931,73 +1935,90 @@ static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
        return parent_lct + 1;
 }
 
-static int drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt)
+static bool drm_dp_mst_is_dp_mst_end_device(u8 pdt, bool mcs)
+{
+       switch (pdt) {
+       case DP_PEER_DEVICE_DP_LEGACY_CONV:
+       case DP_PEER_DEVICE_SST_SINK:
+               return true;
+       case DP_PEER_DEVICE_MST_BRANCHING:
+               /* For sst branch device */
+               if (!mcs)
+                       return true;
+
+               return false;
+       }
+       return true;
+}
+
+static int
+drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
+                   bool new_mcs)
 {
        struct drm_dp_mst_topology_mgr *mgr = port->mgr;
        struct drm_dp_mst_branch *mstb;
        u8 rad[8], lct;
        int ret = 0;
 
-       if (port->pdt == new_pdt)
+       if (port->pdt == new_pdt && port->mcs == new_mcs)
                return 0;
 
        /* Teardown the old pdt, if there is one */
-       switch (port->pdt) {
-       case DP_PEER_DEVICE_DP_LEGACY_CONV:
-       case DP_PEER_DEVICE_SST_SINK:
-               /*
-                * If the new PDT would also have an i2c bus, don't bother
-                * with reregistering it
-                */
-               if (new_pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
-                   new_pdt == DP_PEER_DEVICE_SST_SINK) {
-                       port->pdt = new_pdt;
-                       return 0;
-               }
+       if (port->pdt != DP_PEER_DEVICE_NONE) {
+               if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
+                       /*
+                        * If the new PDT would also have an i2c bus,
+                        * don't bother with reregistering it
+                        */
+                       if (new_pdt != DP_PEER_DEVICE_NONE &&
+                           drm_dp_mst_is_dp_mst_end_device(new_pdt, new_mcs)) {
+                               port->pdt = new_pdt;
+                               port->mcs = new_mcs;
+                               return 0;
+                       }
 
-               /* remove i2c over sideband */
-               drm_dp_mst_unregister_i2c_bus(&port->aux);
-               break;
-       case DP_PEER_DEVICE_MST_BRANCHING:
-               mutex_lock(&mgr->lock);
-               drm_dp_mst_topology_put_mstb(port->mstb);
-               port->mstb = NULL;
-               mutex_unlock(&mgr->lock);
-               break;
+                       /* remove i2c over sideband */
+                       drm_dp_mst_unregister_i2c_bus(&port->aux);
+               } else {
+                       mutex_lock(&mgr->lock);
+                       drm_dp_mst_topology_put_mstb(port->mstb);
+                       port->mstb = NULL;
+                       mutex_unlock(&mgr->lock);
+               }
        }
 
        port->pdt = new_pdt;
-       switch (port->pdt) {
-       case DP_PEER_DEVICE_DP_LEGACY_CONV:
-       case DP_PEER_DEVICE_SST_SINK:
-               /* add i2c over sideband */
-               ret = drm_dp_mst_register_i2c_bus(&port->aux);
-               break;
+       port->mcs = new_mcs;
 
-       case DP_PEER_DEVICE_MST_BRANCHING:
-               lct = drm_dp_calculate_rad(port, rad);
-               mstb = drm_dp_add_mst_branch_device(lct, rad);
-               if (!mstb) {
-                       ret = -ENOMEM;
-                       DRM_ERROR("Failed to create MSTB for port %p", port);
-                       goto out;
-               }
+       if (port->pdt != DP_PEER_DEVICE_NONE) {
+               if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
+                       /* add i2c over sideband */
+                       ret = drm_dp_mst_register_i2c_bus(&port->aux);
+               } else {
+                       lct = drm_dp_calculate_rad(port, rad);
+                       mstb = drm_dp_add_mst_branch_device(lct, rad);
+                       if (!mstb) {
+                               ret = -ENOMEM;
+                               DRM_ERROR("Failed to create MSTB for port %p",
+                                         port);
+                               goto out;
+                       }
 
-               mutex_lock(&mgr->lock);
-               port->mstb = mstb;
-               mstb->mgr = port->mgr;
-               mstb->port_parent = port;
+                       mutex_lock(&mgr->lock);
+                       port->mstb = mstb;
+                       mstb->mgr = port->mgr;
+                       mstb->port_parent = port;
 
-               /*
-                * Make sure this port's memory allocation stays
-                * around until its child MSTB releases it
-                */
-               drm_dp_mst_get_port_malloc(port);
-               mutex_unlock(&mgr->lock);
+                       /*
+                        * Make sure this port's memory allocation stays
+                        * around until its child MSTB releases it
+                        */
+                       drm_dp_mst_get_port_malloc(port);
+                       mutex_unlock(&mgr->lock);
 
-               /* And make sure we send a link address for this */
-               ret = 1;
-               break;
+                       /* And make sure we send a link address for this */
+                       ret = 1;
+               }
        }
 
 out:
@@ -2150,9 +2171,8 @@ drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
                goto error;
        }
 
-       if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
-            port->pdt == DP_PEER_DEVICE_SST_SINK) &&
-           port->port_num >= DP_MST_LOGICAL_PORT_0) {
+       if (port->pdt != DP_PEER_DEVICE_NONE &&
+           drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
                port->cached_edid = drm_get_edid(port->connector,
                                                 &port->aux.ddc);
                drm_connector_set_tile_property(port->connector);
@@ -2174,6 +2194,7 @@ drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr *mgr,
                                struct drm_dp_mst_port *port)
 {
        mutex_lock(&mgr->lock);
+       port->parent->num_ports--;
        list_del(&port->next);
        mutex_unlock(&mgr->lock);
        drm_dp_mst_topology_put_port(port);
@@ -2198,6 +2219,9 @@ drm_dp_mst_add_port(struct drm_device *dev,
        port->aux.dev = dev->dev;
        port->aux.is_remote = true;
 
+       /* initialize the MST downstream port's AUX crc work queue */
+       drm_dp_remote_aux_init(&port->aux);
+
        /*
         * Make sure the memory allocation for our parent branch stays
         * around until our own memory allocation is released
@@ -2216,6 +2240,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
        struct drm_dp_mst_port *port;
        int old_ddps = 0, ret;
        u8 new_pdt = DP_PEER_DEVICE_NONE;
+       bool new_mcs = 0;
        bool created = false, send_link_addr = false, changed = false;
 
        port = drm_dp_get_port(mstb, port_msg->port_number);
@@ -2260,7 +2285,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
        port->input = port_msg->input_port;
        if (!port->input)
                new_pdt = port_msg->peer_device_type;
-       port->mcs = port_msg->mcs;
+       new_mcs = port_msg->mcs;
        port->ddps = port_msg->ddps;
        port->ldps = port_msg->legacy_device_plug_status;
        port->dpcd_rev = port_msg->dpcd_revision;
@@ -2273,6 +2298,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
                mutex_lock(&mgr->lock);
                drm_dp_mst_topology_get_port(port);
                list_add(&port->next, &mstb->ports);
+               mstb->num_ports++;
                mutex_unlock(&mgr->lock);
        }
 
@@ -2287,7 +2313,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
                }
        }
 
-       ret = drm_dp_port_set_pdt(port, new_pdt);
+       ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
        if (ret == 1) {
                send_link_addr = true;
        } else if (ret < 0) {
@@ -2301,7 +2327,8 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
         * we're coming out of suspend. In this case, always resend the link
         * address if there's an MSTB on this port
         */
-       if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING)
+       if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
+           port->mcs)
                send_link_addr = true;
 
        if (port->connector)
@@ -2336,8 +2363,9 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
 {
        struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
        struct drm_dp_mst_port *port;
-       int old_ddps, ret;
+       int old_ddps, old_input, ret, i;
        u8 new_pdt;
+       bool new_mcs;
        bool dowork = false, create_connector = false;
 
        port = drm_dp_get_port(mstb, conn_stat->port_number);
@@ -2367,8 +2395,8 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
        }
 
        old_ddps = port->ddps;
+       old_input = port->input;
        port->input = conn_stat->input_port;
-       port->mcs = conn_stat->message_capability_status;
        port->ldps = conn_stat->legacy_device_plug_status;
        port->ddps = conn_stat->displayport_device_plug_status;
 
@@ -2381,8 +2409,8 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
        }
 
        new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type;
-
-       ret = drm_dp_port_set_pdt(port, new_pdt);
+       new_mcs = conn_stat->message_capability_status;
+       ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
        if (ret == 1) {
                dowork = true;
        } else if (ret < 0) {
@@ -2391,6 +2419,28 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
                dowork = false;
        }
 
+       if (!old_input && old_ddps != port->ddps && !port->ddps) {
+               for (i = 0; i < mgr->max_payloads; i++) {
+                       struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
+                       struct drm_dp_mst_port *port_validated;
+
+                       if (!vcpi)
+                               continue;
+
+                       port_validated =
+                               container_of(vcpi, struct drm_dp_mst_port, vcpi);
+                       port_validated =
+                               drm_dp_mst_topology_get_port_validated(mgr, port_validated);
+                       if (!port_validated) {
+                               mutex_lock(&mgr->payload_lock);
+                               vcpi->num_slots = 0;
+                               mutex_unlock(&mgr->payload_lock);
+                       } else {
+                               drm_dp_mst_topology_put_port(port_validated);
+                       }
+               }
+       }
+
        if (port->connector)
                drm_modeset_unlock(&mgr->base.lock);
        else if (create_connector)
@@ -2753,9 +2803,11 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
        ret = process_single_tx_qlock(mgr, txmsg, false);
        if (ret == 1) {
                /* txmsg is sent it should be in the slots now */
+               mgr->is_waiting_for_dwn_reply = true;
                list_del(&txmsg->next);
        } else if (ret) {
                DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
+               mgr->is_waiting_for_dwn_reply = false;
                list_del(&txmsg->next);
                if (txmsg->seqno != -1)
                        txmsg->dst->tx_slots[txmsg->seqno] = NULL;
@@ -2795,7 +2847,8 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
                drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
        }
 
-       if (list_is_singular(&mgr->tx_msg_downq))
+       if (list_is_singular(&mgr->tx_msg_downq) &&
+           !mgr->is_waiting_for_dwn_reply)
                process_single_down_tx_qlock(mgr);
        mutex_unlock(&mgr->qlock);
 }
@@ -2951,6 +3004,7 @@ drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
                                      path_res->avail_payload_bw_number);
                        port->available_pbn =
                                path_res->avail_payload_bw_number;
+                       port->fec_capable = path_res->fec_capable;
                }
        }
 
@@ -3740,6 +3794,7 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
        mutex_lock(&mgr->qlock);
        txmsg->state = DRM_DP_SIDEBAND_TX_RX;
        mstb->tx_slots[slot] = NULL;
+       mgr->is_waiting_for_dwn_reply = false;
        mutex_unlock(&mgr->qlock);
 
        wake_up_all(&mgr->tx_waitq);
@@ -3749,6 +3804,9 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
 no_msg:
        drm_dp_mst_topology_put_mstb(mstb);
 clear_down_rep_recv:
+       mutex_lock(&mgr->qlock);
+       mgr->is_waiting_for_dwn_reply = false;
+       mutex_unlock(&mgr->qlock);
        memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
 
        return 0;
@@ -3771,7 +3829,8 @@ drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
                else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY)
                        guid = msg->u.resource_stat.guid;
 
-               mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
+               if (guid)
+                       mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
        } else {
                mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
        }
@@ -3958,6 +4017,8 @@ drm_dp_mst_detect_port(struct drm_connector *connector,
        switch (port->pdt) {
        case DP_PEER_DEVICE_NONE:
        case DP_PEER_DEVICE_MST_BRANCHING:
+               if (!port->mcs)
+                       ret = connector_status_connected;
                break;
 
        case DP_PEER_DEVICE_SST_SINK:
@@ -4080,6 +4141,7 @@ static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
  * @mgr: MST topology manager for the port
  * @port: port to find vcpi slots for
  * @pbn: bandwidth required for the mode in PBN
+ * @pbn_div: divider for DSC mode that takes FEC into account
  *
  * Allocates VCPI slots to @port, replacing any previous VCPI allocations it
  * may have had. Any atomic drivers which support MST must call this function
@@ -4106,11 +4168,12 @@ static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
  */
 int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
                                  struct drm_dp_mst_topology_mgr *mgr,
-                                 struct drm_dp_mst_port *port, int pbn)
+                                 struct drm_dp_mst_port *port, int pbn,
+                                 int pbn_div)
 {
        struct drm_dp_mst_topology_state *topology_state;
        struct drm_dp_vcpi_allocation *pos, *vcpi = NULL;
-       int prev_slots, req_slots;
+       int prev_slots, prev_bw, req_slots;
 
        topology_state = drm_atomic_get_mst_topology_state(state, mgr);
        if (IS_ERR(topology_state))
@@ -4121,6 +4184,7 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
                if (pos->port == port) {
                        vcpi = pos;
                        prev_slots = vcpi->vcpi;
+                       prev_bw = vcpi->pbn;
 
                        /*
                         * This should never happen, unless the driver tries
@@ -4136,14 +4200,22 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
                        break;
                }
        }
-       if (!vcpi)
+       if (!vcpi) {
                prev_slots = 0;
+               prev_bw = 0;
+       }
 
-       req_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
+       if (pbn_div <= 0)
+               pbn_div = mgr->pbn_div;
+
+       req_slots = DIV_ROUND_UP(pbn, pbn_div);
 
        DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n",
                         port->connector->base.id, port->connector->name,
                         port, prev_slots, req_slots);
+       DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] PBN %d -> %d\n",
+                        port->connector->base.id, port->connector->name,
+                        port, prev_bw, pbn);
 
        /* Add the new allocation to the state */
        if (!vcpi) {
@@ -4156,6 +4228,7 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
                list_add(&vcpi->next, &topology_state->vcpis);
        }
        vcpi->vcpi = req_slots;
+       vcpi->pbn = pbn;
 
        return req_slots;
 }
@@ -4406,10 +4479,11 @@ EXPORT_SYMBOL(drm_dp_check_act_status);
  * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
  * @clock: dot clock for the mode
  * @bpp: bpp for the mode.
+ * @dsc: DSC mode. If true, bpp has units of 1/16 of a bit per pixel
  *
  * This uses the formula in the spec to calculate the PBN value for a mode.
  */
-int drm_dp_calc_pbn_mode(int clock, int bpp)
+int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc)
 {
        /*
         * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
@@ -4420,7 +4494,16 @@ int drm_dp_calc_pbn_mode(int clock, int bpp)
         * peak_kbps *= (1006/1000)
         * peak_kbps *= (64/54)
         * peak_kbps *= 8    convert to bytes
+        *
+        * If the bpp is in units of 1/16, further divide by 16. Put this
+        * factor in the numerator rather than the denominator to avoid
+        * integer overflow
         */
+
+       if (dsc)
+               return DIV_ROUND_UP_ULL(mul_u32_u32(clock * (bpp / 16), 64 * 1006),
+                                       8 * 54 * 1000 * 1000);
+
        return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006),
                                8 * 54 * 1000 * 1000);
 }
@@ -4559,7 +4642,7 @@ static void drm_dp_tx_work(struct work_struct *work)
        struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
 
        mutex_lock(&mgr->qlock);
-       if (!list_empty(&mgr->tx_msg_downq))
+       if (!list_empty(&mgr->tx_msg_downq) && !mgr->is_waiting_for_dwn_reply)
                process_single_down_tx_qlock(mgr);
        mutex_unlock(&mgr->qlock);
 }
@@ -4570,7 +4653,7 @@ drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
        if (port->connector)
                port->mgr->cbs->destroy_connector(port->mgr, port->connector);
 
-       drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE);
+       drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs);
        drm_dp_mst_put_port_malloc(port);
 }
 
@@ -4722,9 +4805,61 @@ static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
        kfree(mst_state);
 }
 
+static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
+                                                struct drm_dp_mst_branch *branch)
+{
+       while (port->parent) {
+               if (port->parent == branch)
+                       return true;
+
+               if (port->parent->port_parent)
+                       port = port->parent->port_parent;
+               else
+                       break;
+       }
+       return false;
+}
+
+static inline
+int drm_dp_mst_atomic_check_bw_limit(struct drm_dp_mst_branch *branch,
+                                    struct drm_dp_mst_topology_state *mst_state)
+{
+       struct drm_dp_mst_port *port;
+       struct drm_dp_vcpi_allocation *vcpi;
+       int pbn_limit = 0, pbn_used = 0;
+
+       list_for_each_entry(port, &branch->ports, next) {
+               if (port->mstb)
+                       if (drm_dp_mst_atomic_check_bw_limit(port->mstb, mst_state))
+                               return -ENOSPC;
+
+               if (port->available_pbn > 0)
+                       pbn_limit = port->available_pbn;
+       }
+       DRM_DEBUG_ATOMIC("[MST BRANCH:%p] branch has %d PBN available\n",
+                        branch, pbn_limit);
+
+       list_for_each_entry(vcpi, &mst_state->vcpis, next) {
+               if (!vcpi->pbn)
+                       continue;
+
+               if (drm_dp_mst_port_downstream_of_branch(vcpi->port, branch))
+                       pbn_used += vcpi->pbn;
+       }
+       DRM_DEBUG_ATOMIC("[MST BRANCH:%p] branch used %d PBN\n",
+                        branch, pbn_used);
+
+       if (pbn_used > pbn_limit) {
+               DRM_DEBUG_ATOMIC("[MST BRANCH:%p] No available bandwidth\n",
+                                branch);
+               return -ENOSPC;
+       }
+       return 0;
+}
+
 static inline int
-drm_dp_mst_atomic_check_topology_state(struct drm_dp_mst_topology_mgr *mgr,
-                                      struct drm_dp_mst_topology_state *mst_state)
+drm_dp_mst_atomic_check_vcpi_alloc_limit(struct drm_dp_mst_topology_mgr *mgr,
+                                        struct drm_dp_mst_topology_state *mst_state)
 {
        struct drm_dp_vcpi_allocation *vcpi;
        int avail_slots = 63, payload_count = 0;
@@ -4762,6 +4897,128 @@ drm_dp_mst_atomic_check_topology_state(struct drm_dp_mst_topology_mgr *mgr,
 }
 
 /**
+ * drm_dp_mst_add_affected_dsc_crtcs
+ * @state: Pointer to the new struct drm_dp_mst_topology_state
+ * @mgr: MST topology manager
+ *
+ * Whenever there is a change in mst topology
+ * DSC configuration would have to be recalculated
+ * therefore we need to trigger modeset on all affected
+ * CRTCs in that topology
+ *
+ * See also:
+ * drm_dp_mst_atomic_enable_dsc()
+ */
+int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr)
+{
+       struct drm_dp_mst_topology_state *mst_state;
+       struct drm_dp_vcpi_allocation *pos;
+       struct drm_connector *connector;
+       struct drm_connector_state *conn_state;
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *crtc_state;
+
+       mst_state = drm_atomic_get_mst_topology_state(state, mgr);
+
+       if (IS_ERR(mst_state))
+               return -EINVAL;
+
+       list_for_each_entry(pos, &mst_state->vcpis, next) {
+
+               connector = pos->port->connector;
+
+               if (!connector)
+                       return -EINVAL;
+
+               conn_state = drm_atomic_get_connector_state(state, connector);
+
+               if (IS_ERR(conn_state))
+                       return PTR_ERR(conn_state);
+
+               crtc = conn_state->crtc;
+
+               if (WARN_ON(!crtc))
+                       return -EINVAL;
+
+               if (!drm_dp_mst_dsc_aux_for_port(pos->port))
+                       continue;
+
+               crtc_state = drm_atomic_get_crtc_state(mst_state->base.state, crtc);
+
+               if (IS_ERR(crtc_state))
+                       return PTR_ERR(crtc_state);
+
+               DRM_DEBUG_ATOMIC("[MST MGR:%p] Setting mode_changed flag on CRTC %p\n",
+                                mgr, crtc);
+
+               crtc_state->mode_changed = true;
+       }
+       return 0;
+}
+EXPORT_SYMBOL(drm_dp_mst_add_affected_dsc_crtcs);
+
+/**
+ * drm_dp_mst_atomic_enable_dsc - Set DSC Enable Flag to On/Off
+ * @state: Pointer to the new drm_atomic_state
+ * @port: Pointer to the affected MST Port
+ * @pbn: Newly recalculated bw required for link with DSC enabled
+ * @pbn_div: Divider to calculate correct number of pbn per slot
+ * @enable: Boolean flag to enable or disable DSC on the port
+ *
+ * This function enables DSC on the given Port
+ * by recalculating its vcpi from pbn provided
+ * and sets dsc_enable flag to keep track of which
+ * ports have DSC enabled
+ *
+ */
+int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
+                                struct drm_dp_mst_port *port,
+                                int pbn, int pbn_div,
+                                bool enable)
+{
+       struct drm_dp_mst_topology_state *mst_state;
+       struct drm_dp_vcpi_allocation *pos;
+       bool found = false;
+       int vcpi = 0;
+
+       mst_state = drm_atomic_get_mst_topology_state(state, port->mgr);
+
+       if (IS_ERR(mst_state))
+               return PTR_ERR(mst_state);
+
+       list_for_each_entry(pos, &mst_state->vcpis, next) {
+               if (pos->port == port) {
+                       found = true;
+                       break;
+               }
+       }
+
+       if (!found) {
+               DRM_DEBUG_ATOMIC("[MST PORT:%p] Couldn't find VCPI allocation in mst state %p\n",
+                                port, mst_state);
+               return -EINVAL;
+       }
+
+       if (pos->dsc_enabled == enable) {
+               DRM_DEBUG_ATOMIC("[MST PORT:%p] DSC flag is already set to %d, returning %d VCPI slots\n",
+                                port, enable, pos->vcpi);
+               vcpi = pos->vcpi;
+       }
+
+       if (enable) {
+               vcpi = drm_dp_atomic_find_vcpi_slots(state, port->mgr, port, pbn, pbn_div);
+               DRM_DEBUG_ATOMIC("[MST PORT:%p] Enabling DSC flag, reallocating %d VCPI slots on the port\n",
+                                port, vcpi);
+               if (vcpi < 0)
+                       return -EINVAL;
+       }
+
+       pos->dsc_enabled = enable;
+
+       return vcpi;
+}
+EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);
+/**
  * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
  * atomic update is valid
  * @state: Pointer to the new &struct drm_dp_mst_topology_state
@@ -4789,7 +5046,13 @@ int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
        int i, ret = 0;
 
        for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
-               ret = drm_dp_mst_atomic_check_topology_state(mgr, mst_state);
+               if (!mgr->mst_state)
+                       continue;
+
+               ret = drm_dp_mst_atomic_check_vcpi_alloc_limit(mgr, mst_state);
+               if (ret)
+                       break;
+               ret = drm_dp_mst_atomic_check_bw_limit(mgr->mst_primary, mst_state);
                if (ret)
                        break;
        }
@@ -5053,3 +5316,173 @@ static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
 {
        i2c_del_adapter(&aux->ddc);
 }
+
+/**
+ * drm_dp_mst_is_virtual_dpcd() - Is the given port a virtual DP Peer Device
+ * @port: The port to check
+ *
+ * A single physical MST hub object can be represented in the topology
+ * by multiple branches, with virtual ports between those branches.
+ *
+ * As of DP1.4, An MST hub with internal (virtual) ports must expose
+ * certain DPCD registers over those ports. See sections 2.6.1.1.1
+ * and 2.6.1.1.2 of Display Port specification v1.4 for details.
+ *
+ * May acquire mgr->lock
+ *
+ * Returns:
+ * true if the port is a virtual DP peer device, false otherwise
+ */
+static bool drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port *port)
+{
+       struct drm_dp_mst_port *downstream_port;
+
+       if (!port || port->dpcd_rev < DP_DPCD_REV_14)
+               return false;
+
+       /* Virtual DP Sink (Internal Display Panel) */
+       if (port->port_num >= 8)
+               return true;
+
+       /* DP-to-HDMI Protocol Converter */
+       if (port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV &&
+           !port->mcs &&
+           port->ldps)
+               return true;
+
+       /* DP-to-DP */
+       mutex_lock(&port->mgr->lock);
+       if (port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
+           port->mstb &&
+           port->mstb->num_ports == 2) {
+               list_for_each_entry(downstream_port, &port->mstb->ports, next) {
+                       if (downstream_port->pdt == DP_PEER_DEVICE_SST_SINK &&
+                           !downstream_port->input) {
+                               mutex_unlock(&port->mgr->lock);
+                               return true;
+                       }
+               }
+       }
+       mutex_unlock(&port->mgr->lock);
+
+       return false;
+}
+
+/**
+ * drm_dp_mst_dsc_aux_for_port() - Find the correct aux for DSC
+ * @port: The port to check. A leaf of the MST tree with an attached display.
+ *
+ * Depending on the situation, DSC may be enabled via the endpoint aux,
+ * the immediately upstream aux, or the connector's physical aux.
+ *
+ * This is both the correct aux to read DSC_CAPABILITY and the
+ * correct aux to write DSC_ENABLED.
+ *
+ * This operation can be expensive (up to four aux reads), so
+ * the caller should cache the return.
+ *
+ * Returns:
+ * NULL if DSC cannot be enabled on this port, otherwise the aux device
+ */
+struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
+{
+       struct drm_dp_mst_port *immediate_upstream_port;
+       struct drm_dp_mst_port *fec_port;
+       struct drm_dp_desc desc = { 0 };
+       u8 endpoint_fec;
+       u8 endpoint_dsc;
+
+       if (!port)
+               return NULL;
+
+       if (port->parent->port_parent)
+               immediate_upstream_port = port->parent->port_parent;
+       else
+               immediate_upstream_port = NULL;
+
+       fec_port = immediate_upstream_port;
+       while (fec_port) {
+               /*
+                * Each physical link (i.e. not a virtual port) between the
+                * output and the primary device must support FEC
+                */
+               if (!drm_dp_mst_is_virtual_dpcd(fec_port) &&
+                   !fec_port->fec_capable)
+                       return NULL;
+
+               fec_port = fec_port->parent->port_parent;
+       }
+
+       /* DP-to-DP peer device */
+       if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) {
+               u8 upstream_dsc;
+
+               if (drm_dp_dpcd_read(&port->aux,
+                                    DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
+                       return NULL;
+               if (drm_dp_dpcd_read(&port->aux,
+                                    DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
+                       return NULL;
+               if (drm_dp_dpcd_read(&immediate_upstream_port->aux,
+                                    DP_DSC_SUPPORT, &upstream_dsc, 1) != 1)
+                       return NULL;
+
+               /* Enpoint decompression with DP-to-DP peer device */
+               if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
+                   (endpoint_fec & DP_FEC_CAPABLE) &&
+                   (upstream_dsc & 0x2) /* DSC passthrough */)
+                       return &port->aux;
+
+               /* Virtual DPCD decompression with DP-to-DP peer device */
+               return &immediate_upstream_port->aux;
+       }
+
+       /* Virtual DPCD decompression with DP-to-HDMI or Virtual DP Sink */
+       if (drm_dp_mst_is_virtual_dpcd(port))
+               return &port->aux;
+
+       /*
+        * Synaptics quirk
+        * Applies to ports for which:
+        * - Physical aux has Synaptics OUI
+        * - DPv1.4 or higher
+        * - Port is on primary branch device
+        * - Not a VGA adapter (DP_DWN_STRM_PORT_TYPE_ANALOG)
+        */
+       if (drm_dp_read_desc(port->mgr->aux, &desc, true))
+               return NULL;
+
+       if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
+           port->mgr->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 &&
+           port->parent == port->mgr->mst_primary) {
+               u8 downstreamport;
+
+               if (drm_dp_dpcd_read(&port->aux, DP_DOWNSTREAMPORT_PRESENT,
+                                    &downstreamport, 1) < 0)
+                       return NULL;
+
+               if ((downstreamport & DP_DWN_STRM_PORT_PRESENT) &&
+                  ((downstreamport & DP_DWN_STRM_PORT_TYPE_MASK)
+                    != DP_DWN_STRM_PORT_TYPE_ANALOG))
+                       return port->mgr->aux;
+       }
+
+       /*
+        * The check below verifies if the MST sink
+        * connected to the GPU is capable of DSC -
+        * therefore the endpoint needs to be
+        * both DSC and FEC capable.
+        */
+       if (drm_dp_dpcd_read(&port->aux,
+          DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
+               return NULL;
+       if (drm_dp_dpcd_read(&port->aux,
+          DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
+               return NULL;
+       if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
+          (endpoint_fec & DP_FEC_CAPABLE))
+               return &port->aux;
+
+       return NULL;
+}
+EXPORT_SYMBOL(drm_dp_mst_dsc_aux_for_port);