1 // SPDX-License-Identifier: GPL-2.0
3 * Thunderbolt driver - Tunneling support
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2019, Intel Corporation
9 #include <linux/delay.h>
10 #include <linux/slab.h>
11 #include <linux/list.h>
16 /* PCIe adapters use always HopID of 8 for both directions */
17 #define TB_PCI_HOPID 8
19 #define TB_PCI_PATH_DOWN 0
20 #define TB_PCI_PATH_UP 1
22 /* USB3 adapters use always HopID of 8 for both directions */
23 #define TB_USB3_HOPID 8
25 #define TB_USB3_PATH_DOWN 0
26 #define TB_USB3_PATH_UP 1
28 /* DP adapters use HopID 8 for AUX and 9 for Video */
29 #define TB_DP_AUX_TX_HOPID 8
30 #define TB_DP_AUX_RX_HOPID 8
31 #define TB_DP_VIDEO_HOPID 9
33 #define TB_DP_VIDEO_PATH_OUT 0
34 #define TB_DP_AUX_PATH_OUT 1
35 #define TB_DP_AUX_PATH_IN 2
37 /* Minimum number of credits needed for PCIe path */
38 #define TB_MIN_PCIE_CREDITS 6U
40 * Number of credits we try to allocate for each DMA path if not limited
41 * by the host router baMaxHI.
43 #define TB_DMA_CREDITS 14U
44 /* Minimum number of credits for DMA path */
45 #define TB_MIN_DMA_CREDITS 1U
47 static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
49 #define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
51 struct tb_tunnel *__tunnel = (tunnel); \
52 level(__tunnel->tb, "%llx:%x <-> %llx:%x (%s): " fmt, \
53 tb_route(__tunnel->src_port->sw), \
54 __tunnel->src_port->port, \
55 tb_route(__tunnel->dst_port->sw), \
56 __tunnel->dst_port->port, \
57 tb_tunnel_names[__tunnel->type], \
61 #define tb_tunnel_WARN(tunnel, fmt, arg...) \
62 __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
63 #define tb_tunnel_warn(tunnel, fmt, arg...) \
64 __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
65 #define tb_tunnel_info(tunnel, fmt, arg...) \
66 __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
67 #define tb_tunnel_dbg(tunnel, fmt, arg...) \
68 __TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg)
70 static inline unsigned int tb_usable_credits(const struct tb_port *port)
72 return port->total_credits - port->ctl_credits;
76 * tb_available_credits() - Available credits for PCIe and DMA
77 * @port: Lane adapter to check
78 * @max_dp_streams: If non-%NULL stores maximum number of simultaneous DP
79 * streams possible through this lane adapter
81 static unsigned int tb_available_credits(const struct tb_port *port,
82 size_t *max_dp_streams)
84 const struct tb_switch *sw = port->sw;
85 int credits, usb3, pcie, spare;
88 usb3 = tb_acpi_may_tunnel_usb3() ? sw->max_usb3_credits : 0;
89 pcie = tb_acpi_may_tunnel_pcie() ? sw->max_pcie_credits : 0;
91 if (tb_acpi_is_xdomain_allowed()) {
92 spare = min_not_zero(sw->max_dma_credits, TB_DMA_CREDITS);
93 /* Add some credits for potential second DMA tunnel */
94 spare += TB_MIN_DMA_CREDITS;
99 credits = tb_usable_credits(port);
100 if (tb_acpi_may_tunnel_dp()) {
102 * Maximum number of DP streams possible through the
105 ndp = (credits - (usb3 + pcie + spare)) /
106 (sw->min_dp_aux_credits + sw->min_dp_main_credits);
110 credits -= ndp * (sw->min_dp_aux_credits + sw->min_dp_main_credits);
114 *max_dp_streams = ndp;
116 return credits > 0 ? credits : 0;
119 static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
120 enum tb_tunnel_type type)
122 struct tb_tunnel *tunnel;
124 tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
128 tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
129 if (!tunnel->paths) {
130 tb_tunnel_free(tunnel);
134 INIT_LIST_HEAD(&tunnel->list);
136 tunnel->npaths = npaths;
142 static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
146 res = tb_pci_port_enable(tunnel->src_port, activate);
150 if (tb_port_is_pcie_up(tunnel->dst_port))
151 return tb_pci_port_enable(tunnel->dst_port, activate);
156 static int tb_pci_init_credits(struct tb_path_hop *hop)
158 struct tb_port *port = hop->in_port;
159 struct tb_switch *sw = port->sw;
160 unsigned int credits;
162 if (tb_port_use_credit_allocation(port)) {
163 unsigned int available;
165 available = tb_available_credits(port, NULL);
166 credits = min(sw->max_pcie_credits, available);
168 if (credits < TB_MIN_PCIE_CREDITS)
171 credits = max(TB_MIN_PCIE_CREDITS, credits);
173 if (tb_port_is_null(port))
174 credits = port->bonded ? 32 : 16;
179 hop->initial_credits = credits;
183 static int tb_pci_init_path(struct tb_path *path)
185 struct tb_path_hop *hop;
187 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
188 path->egress_shared_buffer = TB_PATH_NONE;
189 path->ingress_fc_enable = TB_PATH_ALL;
190 path->ingress_shared_buffer = TB_PATH_NONE;
193 path->drop_packages = 0;
195 tb_path_for_each_hop(path, hop) {
198 ret = tb_pci_init_credits(hop);
207 * tb_tunnel_discover_pci() - Discover existing PCIe tunnels
208 * @tb: Pointer to the domain structure
209 * @down: PCIe downstream adapter
211 * If @down adapter is active, follows the tunnel to the PCIe upstream
212 * adapter and back. Returns the discovered tunnel or %NULL if there was
215 struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down)
217 struct tb_tunnel *tunnel;
218 struct tb_path *path;
220 if (!tb_pci_port_is_enabled(down))
223 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
227 tunnel->activate = tb_pci_activate;
228 tunnel->src_port = down;
231 * Discover both paths even if they are not complete. We will
232 * clean them up by calling tb_tunnel_deactivate() below in that
235 path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
236 &tunnel->dst_port, "PCIe Up");
238 /* Just disable the downstream port */
239 tb_pci_port_enable(down, false);
242 tunnel->paths[TB_PCI_PATH_UP] = path;
243 if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]))
246 path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
250 tunnel->paths[TB_PCI_PATH_DOWN] = path;
251 if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]))
254 /* Validate that the tunnel is complete */
255 if (!tb_port_is_pcie_up(tunnel->dst_port)) {
256 tb_port_warn(tunnel->dst_port,
257 "path does not end on a PCIe adapter, cleaning up\n");
261 if (down != tunnel->src_port) {
262 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
266 if (!tb_pci_port_is_enabled(tunnel->dst_port)) {
267 tb_tunnel_warn(tunnel,
268 "tunnel is not fully activated, cleaning up\n");
272 tb_tunnel_dbg(tunnel, "discovered\n");
276 tb_tunnel_deactivate(tunnel);
278 tb_tunnel_free(tunnel);
284 * tb_tunnel_alloc_pci() - allocate a pci tunnel
285 * @tb: Pointer to the domain structure
286 * @up: PCIe upstream adapter port
287 * @down: PCIe downstream adapter port
289 * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
292 * Return: Returns a tb_tunnel on success or NULL on failure.
294 struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
295 struct tb_port *down)
297 struct tb_tunnel *tunnel;
298 struct tb_path *path;
300 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
304 tunnel->activate = tb_pci_activate;
305 tunnel->src_port = down;
306 tunnel->dst_port = up;
308 path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0,
312 tunnel->paths[TB_PCI_PATH_DOWN] = path;
313 if (tb_pci_init_path(path))
316 path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
320 tunnel->paths[TB_PCI_PATH_UP] = path;
321 if (tb_pci_init_path(path))
327 tb_tunnel_free(tunnel);
331 static bool tb_dp_is_usb4(const struct tb_switch *sw)
333 /* Titan Ridge DP adapters need the same treatment as USB4 */
334 return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
337 static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out)
343 /* Both ends need to support this */
344 if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw))
347 ret = tb_port_read(out, &val, TB_CFG_PORT,
348 out->cap_adap + DP_STATUS_CTRL, 1);
352 val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS;
354 ret = tb_port_write(out, &val, TB_CFG_PORT,
355 out->cap_adap + DP_STATUS_CTRL, 1);
360 ret = tb_port_read(out, &val, TB_CFG_PORT,
361 out->cap_adap + DP_STATUS_CTRL, 1);
364 if (!(val & DP_STATUS_CTRL_CMHS))
366 usleep_range(10, 100);
372 static inline u32 tb_dp_cap_get_rate(u32 val)
374 u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT;
377 case DP_COMMON_CAP_RATE_RBR:
379 case DP_COMMON_CAP_RATE_HBR:
381 case DP_COMMON_CAP_RATE_HBR2:
383 case DP_COMMON_CAP_RATE_HBR3:
390 static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate)
392 val &= ~DP_COMMON_CAP_RATE_MASK;
395 WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate);
398 val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT;
401 val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT;
404 val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT;
407 val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT;
413 static inline u32 tb_dp_cap_get_lanes(u32 val)
415 u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT;
418 case DP_COMMON_CAP_1_LANE:
420 case DP_COMMON_CAP_2_LANES:
422 case DP_COMMON_CAP_4_LANES:
429 static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes)
431 val &= ~DP_COMMON_CAP_LANES_MASK;
434 WARN(1, "invalid number of lanes %u passed, defaulting to 1\n",
438 val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT;
441 val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT;
444 val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT;
450 static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes)
452 /* Tunneling removes the DP 8b/10b encoding */
453 return rate * lanes * 8 / 10;
456 static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes,
457 u32 out_rate, u32 out_lanes, u32 *new_rate,
460 static const u32 dp_bw[][2] = {
462 { 8100, 4 }, /* 25920 Mb/s */
463 { 5400, 4 }, /* 17280 Mb/s */
464 { 8100, 2 }, /* 12960 Mb/s */
465 { 2700, 4 }, /* 8640 Mb/s */
466 { 5400, 2 }, /* 8640 Mb/s */
467 { 8100, 1 }, /* 6480 Mb/s */
468 { 1620, 4 }, /* 5184 Mb/s */
469 { 5400, 1 }, /* 4320 Mb/s */
470 { 2700, 2 }, /* 4320 Mb/s */
471 { 1620, 2 }, /* 2592 Mb/s */
472 { 2700, 1 }, /* 2160 Mb/s */
473 { 1620, 1 }, /* 1296 Mb/s */
478 * Find a combination that can fit into max_bw and does not
479 * exceed the maximum rate and lanes supported by the DP OUT and
482 for (i = 0; i < ARRAY_SIZE(dp_bw); i++) {
483 if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes)
486 if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes)
489 if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) {
490 *new_rate = dp_bw[i][0];
491 *new_lanes = dp_bw[i][1];
499 static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
501 u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw;
502 struct tb_port *out = tunnel->dst_port;
503 struct tb_port *in = tunnel->src_port;
507 * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for
508 * newer generation hardware.
510 if (in->sw->generation < 2 || out->sw->generation < 2)
514 * Perform connection manager handshake between IN and OUT ports
515 * before capabilities exchange can take place.
517 ret = tb_dp_cm_handshake(in, out);
521 /* Read both DP_LOCAL_CAP registers */
522 ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
523 in->cap_adap + DP_LOCAL_CAP, 1);
527 ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
528 out->cap_adap + DP_LOCAL_CAP, 1);
532 /* Write IN local caps to OUT remote caps */
533 ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
534 out->cap_adap + DP_REMOTE_CAP, 1);
538 in_rate = tb_dp_cap_get_rate(in_dp_cap);
539 in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
540 tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
541 in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
544 * If the tunnel bandwidth is limited (max_bw is set) then see
545 * if we need to reduce bandwidth to fit there.
547 out_rate = tb_dp_cap_get_rate(out_dp_cap);
548 out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
549 bw = tb_dp_bandwidth(out_rate, out_lanes);
550 tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
551 out_rate, out_lanes, bw);
553 if (in->sw->config.depth < out->sw->config.depth)
554 max_bw = tunnel->max_down;
556 max_bw = tunnel->max_up;
558 if (max_bw && bw > max_bw) {
559 u32 new_rate, new_lanes, new_bw;
561 ret = tb_dp_reduce_bandwidth(max_bw, in_rate, in_lanes,
562 out_rate, out_lanes, &new_rate,
565 tb_port_info(out, "not enough bandwidth for DP tunnel\n");
569 new_bw = tb_dp_bandwidth(new_rate, new_lanes);
570 tb_port_dbg(out, "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
571 new_rate, new_lanes, new_bw);
574 * Set new rate and number of lanes before writing it to
575 * the IN port remote caps.
577 out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate);
578 out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes);
581 return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
582 in->cap_adap + DP_REMOTE_CAP, 1);
585 static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
590 struct tb_path **paths;
593 paths = tunnel->paths;
594 last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1;
596 tb_dp_port_set_hops(tunnel->src_port,
597 paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index,
598 paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index,
599 paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index);
601 tb_dp_port_set_hops(tunnel->dst_port,
602 paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index,
603 paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
604 paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
606 tb_dp_port_hpd_clear(tunnel->src_port);
607 tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
608 if (tb_port_is_dpout(tunnel->dst_port))
609 tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0);
612 ret = tb_dp_port_enable(tunnel->src_port, active);
616 if (tb_port_is_dpout(tunnel->dst_port))
617 return tb_dp_port_enable(tunnel->dst_port, active);
622 static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
625 struct tb_port *in = tunnel->src_port;
626 const struct tb_switch *sw = in->sw;
627 u32 val, rate = 0, lanes = 0;
630 if (tb_dp_is_usb4(sw)) {
634 * Wait for DPRX done. Normally it should be already set
638 ret = tb_port_read(in, &val, TB_CFG_PORT,
639 in->cap_adap + DP_COMMON_CAP, 1);
643 if (val & DP_COMMON_CAP_DPRX_DONE) {
644 rate = tb_dp_cap_get_rate(val);
645 lanes = tb_dp_cap_get_lanes(val);
653 } else if (sw->generation >= 2) {
655 * Read from the copied remote cap so that we take into
656 * account if capabilities were reduced during exchange.
658 ret = tb_port_read(in, &val, TB_CFG_PORT,
659 in->cap_adap + DP_REMOTE_CAP, 1);
663 rate = tb_dp_cap_get_rate(val);
664 lanes = tb_dp_cap_get_lanes(val);
666 /* No bandwidth management for legacy devices */
672 if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) {
674 *consumed_down = tb_dp_bandwidth(rate, lanes);
676 *consumed_up = tb_dp_bandwidth(rate, lanes);
683 static void tb_dp_init_aux_credits(struct tb_path_hop *hop)
685 struct tb_port *port = hop->in_port;
686 struct tb_switch *sw = port->sw;
688 if (tb_port_use_credit_allocation(port))
689 hop->initial_credits = sw->min_dp_aux_credits;
691 hop->initial_credits = 1;
694 static void tb_dp_init_aux_path(struct tb_path *path)
696 struct tb_path_hop *hop;
698 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
699 path->egress_shared_buffer = TB_PATH_NONE;
700 path->ingress_fc_enable = TB_PATH_ALL;
701 path->ingress_shared_buffer = TB_PATH_NONE;
705 tb_path_for_each_hop(path, hop)
706 tb_dp_init_aux_credits(hop);
709 static int tb_dp_init_video_credits(struct tb_path_hop *hop)
711 struct tb_port *port = hop->in_port;
712 struct tb_switch *sw = port->sw;
714 if (tb_port_use_credit_allocation(port)) {
715 unsigned int nfc_credits;
716 size_t max_dp_streams;
718 tb_available_credits(port, &max_dp_streams);
720 * Read the number of currently allocated NFC credits
721 * from the lane adapter. Since we only use them for DP
722 * tunneling we can use that to figure out how many DP
723 * tunnels already go through the lane adapter.
725 nfc_credits = port->config.nfc_credits &
726 ADP_CS_4_NFC_BUFFERS_MASK;
727 if (nfc_credits / sw->min_dp_main_credits > max_dp_streams)
730 hop->nfc_credits = sw->min_dp_main_credits;
732 hop->nfc_credits = min(port->total_credits - 2, 12U);
738 static int tb_dp_init_video_path(struct tb_path *path)
740 struct tb_path_hop *hop;
742 path->egress_fc_enable = TB_PATH_NONE;
743 path->egress_shared_buffer = TB_PATH_NONE;
744 path->ingress_fc_enable = TB_PATH_NONE;
745 path->ingress_shared_buffer = TB_PATH_NONE;
749 tb_path_for_each_hop(path, hop) {
752 ret = tb_dp_init_video_credits(hop);
761 * tb_tunnel_discover_dp() - Discover existing Display Port tunnels
762 * @tb: Pointer to the domain structure
765 * If @in adapter is active, follows the tunnel to the DP out adapter
766 * and back. Returns the discovered tunnel or %NULL if there was no
769 * Return: DP tunnel or %NULL if no tunnel found.
771 struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in)
773 struct tb_tunnel *tunnel;
774 struct tb_port *port;
775 struct tb_path *path;
777 if (!tb_dp_port_is_enabled(in))
780 tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
784 tunnel->init = tb_dp_xchg_caps;
785 tunnel->activate = tb_dp_activate;
786 tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
787 tunnel->src_port = in;
789 path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
790 &tunnel->dst_port, "Video");
792 /* Just disable the DP IN port */
793 tb_dp_port_enable(in, false);
796 tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
797 if (tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT]))
800 path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX");
803 tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
804 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT]);
806 path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
810 tunnel->paths[TB_DP_AUX_PATH_IN] = path;
811 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN]);
813 /* Validate that the tunnel is complete */
814 if (!tb_port_is_dpout(tunnel->dst_port)) {
815 tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n");
819 if (!tb_dp_port_is_enabled(tunnel->dst_port))
822 if (!tb_dp_port_hpd_is_active(tunnel->dst_port))
825 if (port != tunnel->src_port) {
826 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
830 tb_tunnel_dbg(tunnel, "discovered\n");
834 tb_tunnel_deactivate(tunnel);
836 tb_tunnel_free(tunnel);
842 * tb_tunnel_alloc_dp() - allocate a Display Port tunnel
843 * @tb: Pointer to the domain structure
844 * @in: DP in adapter port
845 * @out: DP out adapter port
846 * @max_up: Maximum available upstream bandwidth for the DP tunnel (%0
848 * @max_down: Maximum available downstream bandwidth for the DP tunnel
849 * (%0 if not limited)
851 * Allocates a tunnel between @in and @out that is capable of tunneling
852 * Display Port traffic.
854 * Return: Returns a tb_tunnel on success or NULL on failure.
856 struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
857 struct tb_port *out, int max_up,
860 struct tb_tunnel *tunnel;
861 struct tb_path **paths;
862 struct tb_path *path;
864 if (WARN_ON(!in->cap_adap || !out->cap_adap))
867 tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
871 tunnel->init = tb_dp_xchg_caps;
872 tunnel->activate = tb_dp_activate;
873 tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
874 tunnel->src_port = in;
875 tunnel->dst_port = out;
876 tunnel->max_up = max_up;
877 tunnel->max_down = max_down;
879 paths = tunnel->paths;
881 path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
885 tb_dp_init_video_path(path);
886 paths[TB_DP_VIDEO_PATH_OUT] = path;
888 path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
889 TB_DP_AUX_TX_HOPID, 1, "AUX TX");
892 tb_dp_init_aux_path(path);
893 paths[TB_DP_AUX_PATH_OUT] = path;
895 path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
896 TB_DP_AUX_RX_HOPID, 1, "AUX RX");
899 tb_dp_init_aux_path(path);
900 paths[TB_DP_AUX_PATH_IN] = path;
905 tb_tunnel_free(tunnel);
909 static unsigned int tb_dma_available_credits(const struct tb_port *port)
911 const struct tb_switch *sw = port->sw;
914 credits = tb_available_credits(port, NULL);
915 if (tb_acpi_may_tunnel_pcie())
916 credits -= sw->max_pcie_credits;
917 credits -= port->dma_credits;
919 return credits > 0 ? credits : 0;
922 static int tb_dma_reserve_credits(struct tb_path_hop *hop, unsigned int credits)
924 struct tb_port *port = hop->in_port;
926 if (tb_port_use_credit_allocation(port)) {
927 unsigned int available = tb_dma_available_credits(port);
930 * Need to have at least TB_MIN_DMA_CREDITS, otherwise
931 * DMA path cannot be established.
933 if (available < TB_MIN_DMA_CREDITS)
936 while (credits > available)
939 tb_port_dbg(port, "reserving %u credits for DMA path\n",
942 port->dma_credits += credits;
944 if (tb_port_is_null(port))
945 credits = port->bonded ? 14 : 6;
947 credits = min(port->total_credits, credits);
950 hop->initial_credits = credits;
954 /* Path from lane adapter to NHI */
955 static int tb_dma_init_rx_path(struct tb_path *path, unsigned int credits)
957 struct tb_path_hop *hop;
960 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
961 path->ingress_fc_enable = TB_PATH_ALL;
962 path->egress_shared_buffer = TB_PATH_NONE;
963 path->ingress_shared_buffer = TB_PATH_NONE;
966 path->clear_fc = true;
969 * First lane adapter is the one connected to the remote host.
970 * We don't tunnel other traffic over this link so can use all
971 * the credits (except the ones reserved for control traffic).
973 hop = &path->hops[0];
974 tmp = min(tb_usable_credits(hop->in_port), credits);
975 hop->initial_credits = tmp;
976 hop->in_port->dma_credits += tmp;
978 for (i = 1; i < path->path_length; i++) {
981 ret = tb_dma_reserve_credits(&path->hops[i], credits);
989 /* Path from NHI to lane adapter */
990 static int tb_dma_init_tx_path(struct tb_path *path, unsigned int credits)
992 struct tb_path_hop *hop;
994 path->egress_fc_enable = TB_PATH_ALL;
995 path->ingress_fc_enable = TB_PATH_ALL;
996 path->egress_shared_buffer = TB_PATH_NONE;
997 path->ingress_shared_buffer = TB_PATH_NONE;
1000 path->clear_fc = true;
1002 tb_path_for_each_hop(path, hop) {
1005 ret = tb_dma_reserve_credits(hop, credits);
1013 static void tb_dma_release_credits(struct tb_path_hop *hop)
1015 struct tb_port *port = hop->in_port;
1017 if (tb_port_use_credit_allocation(port)) {
1018 port->dma_credits -= hop->initial_credits;
1020 tb_port_dbg(port, "released %u DMA path credits\n",
1021 hop->initial_credits);
1025 static void tb_dma_deinit_path(struct tb_path *path)
1027 struct tb_path_hop *hop;
1029 tb_path_for_each_hop(path, hop)
1030 tb_dma_release_credits(hop);
1033 static void tb_dma_deinit(struct tb_tunnel *tunnel)
1037 for (i = 0; i < tunnel->npaths; i++) {
1038 if (!tunnel->paths[i])
1040 tb_dma_deinit_path(tunnel->paths[i]);
1045 * tb_tunnel_alloc_dma() - allocate a DMA tunnel
1046 * @tb: Pointer to the domain structure
1047 * @nhi: Host controller port
1048 * @dst: Destination null port which the other domain is connected to
1049 * @transmit_path: HopID used for transmitting packets
1050 * @transmit_ring: NHI ring number used to send packets towards the
1051 * other domain. Set to %-1 if TX path is not needed.
1052 * @receive_path: HopID used for receiving packets
1053 * @receive_ring: NHI ring number used to receive packets from the
1054 * other domain. Set to %-1 if RX path is not needed.
1056 * Return: Returns a tb_tunnel on success or NULL on failure.
1058 struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
1059 struct tb_port *dst, int transmit_path,
1060 int transmit_ring, int receive_path,
1063 struct tb_tunnel *tunnel;
1064 size_t npaths = 0, i = 0;
1065 struct tb_path *path;
1068 if (receive_ring > 0)
1070 if (transmit_ring > 0)
1073 if (WARN_ON(!npaths))
1076 tunnel = tb_tunnel_alloc(tb, npaths, TB_TUNNEL_DMA);
1080 tunnel->src_port = nhi;
1081 tunnel->dst_port = dst;
1082 tunnel->deinit = tb_dma_deinit;
1084 credits = min_not_zero(TB_DMA_CREDITS, nhi->sw->max_dma_credits);
1086 if (receive_ring > 0) {
1087 path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0,
1091 tunnel->paths[i++] = path;
1092 if (tb_dma_init_rx_path(path, credits)) {
1093 tb_tunnel_dbg(tunnel, "not enough buffers for RX path\n");
1098 if (transmit_ring > 0) {
1099 path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0,
1103 tunnel->paths[i++] = path;
1104 if (tb_dma_init_tx_path(path, credits)) {
1105 tb_tunnel_dbg(tunnel, "not enough buffers for TX path\n");
1113 tb_tunnel_free(tunnel);
1118 * tb_tunnel_match_dma() - Match DMA tunnel
1119 * @tunnel: Tunnel to match
1120 * @transmit_path: HopID used for transmitting packets. Pass %-1 to ignore.
1121 * @transmit_ring: NHI ring number used to send packets towards the
1122 * other domain. Pass %-1 to ignore.
1123 * @receive_path: HopID used for receiving packets. Pass %-1 to ignore.
1124 * @receive_ring: NHI ring number used to receive packets from the
1125 * other domain. Pass %-1 to ignore.
1127 * This function can be used to match specific DMA tunnel, if there are
1128 * multiple DMA tunnels going through the same XDomain connection.
1129 * Returns true if there is match and false otherwise.
1131 bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path,
1132 int transmit_ring, int receive_path, int receive_ring)
1134 const struct tb_path *tx_path = NULL, *rx_path = NULL;
1137 if (!receive_ring || !transmit_ring)
1140 for (i = 0; i < tunnel->npaths; i++) {
1141 const struct tb_path *path = tunnel->paths[i];
1146 if (tb_port_is_nhi(path->hops[0].in_port))
1148 else if (tb_port_is_nhi(path->hops[path->path_length - 1].out_port))
1152 if (transmit_ring > 0 || transmit_path > 0) {
1155 if (transmit_ring > 0 &&
1156 (tx_path->hops[0].in_hop_index != transmit_ring))
1158 if (transmit_path > 0 &&
1159 (tx_path->hops[tx_path->path_length - 1].next_hop_index != transmit_path))
1163 if (receive_ring > 0 || receive_path > 0) {
1166 if (receive_path > 0 &&
1167 (rx_path->hops[0].in_hop_index != receive_path))
1169 if (receive_ring > 0 &&
1170 (rx_path->hops[rx_path->path_length - 1].next_hop_index != receive_ring))
1177 static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down)
1179 int ret, up_max_rate, down_max_rate;
1181 ret = usb4_usb3_port_max_link_rate(up);
1186 ret = usb4_usb3_port_max_link_rate(down);
1189 down_max_rate = ret;
1191 return min(up_max_rate, down_max_rate);
1194 static int tb_usb3_init(struct tb_tunnel *tunnel)
1196 tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n",
1197 tunnel->allocated_up, tunnel->allocated_down);
1199 return usb4_usb3_port_allocate_bandwidth(tunnel->src_port,
1200 &tunnel->allocated_up,
1201 &tunnel->allocated_down);
1204 static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
1208 res = tb_usb3_port_enable(tunnel->src_port, activate);
1212 if (tb_port_is_usb3_up(tunnel->dst_port))
1213 return tb_usb3_port_enable(tunnel->dst_port, activate);
1218 static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel,
1219 int *consumed_up, int *consumed_down)
1221 int pcie_enabled = tb_acpi_may_tunnel_pcie();
1224 * PCIe tunneling, if enabled, affects the USB3 bandwidth so
1225 * take that it into account here.
1227 *consumed_up = tunnel->allocated_up * (3 + pcie_enabled) / 3;
1228 *consumed_down = tunnel->allocated_down * (3 + pcie_enabled) / 3;
1232 static int tb_usb3_release_unused_bandwidth(struct tb_tunnel *tunnel)
1236 ret = usb4_usb3_port_release_bandwidth(tunnel->src_port,
1237 &tunnel->allocated_up,
1238 &tunnel->allocated_down);
1242 tb_tunnel_dbg(tunnel, "decreased bandwidth allocation to %d/%d Mb/s\n",
1243 tunnel->allocated_up, tunnel->allocated_down);
1247 static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
1249 int *available_down)
1251 int ret, max_rate, allocate_up, allocate_down;
1253 ret = usb4_usb3_port_actual_link_rate(tunnel->src_port);
1255 tb_tunnel_warn(tunnel, "failed to read actual link rate\n");
1258 /* Use maximum link rate if the link valid is not set */
1259 ret = usb4_usb3_port_max_link_rate(tunnel->src_port);
1261 tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
1267 * 90% of the max rate can be allocated for isochronous
1270 max_rate = ret * 90 / 100;
1272 /* No need to reclaim if already at maximum */
1273 if (tunnel->allocated_up >= max_rate &&
1274 tunnel->allocated_down >= max_rate)
1277 /* Don't go lower than what is already allocated */
1278 allocate_up = min(max_rate, *available_up);
1279 if (allocate_up < tunnel->allocated_up)
1280 allocate_up = tunnel->allocated_up;
1282 allocate_down = min(max_rate, *available_down);
1283 if (allocate_down < tunnel->allocated_down)
1284 allocate_down = tunnel->allocated_down;
1286 /* If no changes no need to do more */
1287 if (allocate_up == tunnel->allocated_up &&
1288 allocate_down == tunnel->allocated_down)
1291 ret = usb4_usb3_port_allocate_bandwidth(tunnel->src_port, &allocate_up,
1294 tb_tunnel_info(tunnel, "failed to allocate bandwidth\n");
1298 tunnel->allocated_up = allocate_up;
1299 *available_up -= tunnel->allocated_up;
1301 tunnel->allocated_down = allocate_down;
1302 *available_down -= tunnel->allocated_down;
1304 tb_tunnel_dbg(tunnel, "increased bandwidth allocation to %d/%d Mb/s\n",
1305 tunnel->allocated_up, tunnel->allocated_down);
1308 static void tb_usb3_init_credits(struct tb_path_hop *hop)
1310 struct tb_port *port = hop->in_port;
1311 struct tb_switch *sw = port->sw;
1312 unsigned int credits;
1314 if (tb_port_use_credit_allocation(port)) {
1315 credits = sw->max_usb3_credits;
1317 if (tb_port_is_null(port))
1318 credits = port->bonded ? 32 : 16;
1323 hop->initial_credits = credits;
1326 static void tb_usb3_init_path(struct tb_path *path)
1328 struct tb_path_hop *hop;
1330 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1331 path->egress_shared_buffer = TB_PATH_NONE;
1332 path->ingress_fc_enable = TB_PATH_ALL;
1333 path->ingress_shared_buffer = TB_PATH_NONE;
1336 path->drop_packages = 0;
1338 tb_path_for_each_hop(path, hop)
1339 tb_usb3_init_credits(hop);
1343 * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels
1344 * @tb: Pointer to the domain structure
1345 * @down: USB3 downstream adapter
1347 * If @down adapter is active, follows the tunnel to the USB3 upstream
1348 * adapter and back. Returns the discovered tunnel or %NULL if there was
1351 struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down)
1353 struct tb_tunnel *tunnel;
1354 struct tb_path *path;
1356 if (!tb_usb3_port_is_enabled(down))
1359 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
1363 tunnel->activate = tb_usb3_activate;
1364 tunnel->src_port = down;
1367 * Discover both paths even if they are not complete. We will
1368 * clean them up by calling tb_tunnel_deactivate() below in that
1371 path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1,
1372 &tunnel->dst_port, "USB3 Down");
1374 /* Just disable the downstream port */
1375 tb_usb3_port_enable(down, false);
1378 tunnel->paths[TB_USB3_PATH_DOWN] = path;
1379 tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
1381 path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL,
1384 goto err_deactivate;
1385 tunnel->paths[TB_USB3_PATH_UP] = path;
1386 tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]);
1388 /* Validate that the tunnel is complete */
1389 if (!tb_port_is_usb3_up(tunnel->dst_port)) {
1390 tb_port_warn(tunnel->dst_port,
1391 "path does not end on an USB3 adapter, cleaning up\n");
1392 goto err_deactivate;
1395 if (down != tunnel->src_port) {
1396 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
1397 goto err_deactivate;
1400 if (!tb_usb3_port_is_enabled(tunnel->dst_port)) {
1401 tb_tunnel_warn(tunnel,
1402 "tunnel is not fully activated, cleaning up\n");
1403 goto err_deactivate;
1406 if (!tb_route(down->sw)) {
1410 * Read the initial bandwidth allocation for the first
1413 ret = usb4_usb3_port_allocated_bandwidth(down,
1414 &tunnel->allocated_up, &tunnel->allocated_down);
1416 goto err_deactivate;
1418 tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n",
1419 tunnel->allocated_up, tunnel->allocated_down);
1421 tunnel->init = tb_usb3_init;
1422 tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
1423 tunnel->release_unused_bandwidth =
1424 tb_usb3_release_unused_bandwidth;
1425 tunnel->reclaim_available_bandwidth =
1426 tb_usb3_reclaim_available_bandwidth;
1429 tb_tunnel_dbg(tunnel, "discovered\n");
1433 tb_tunnel_deactivate(tunnel);
1435 tb_tunnel_free(tunnel);
1441 * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel
1442 * @tb: Pointer to the domain structure
1443 * @up: USB3 upstream adapter port
1444 * @down: USB3 downstream adapter port
1445 * @max_up: Maximum available upstream bandwidth for the USB3 tunnel (%0
1447 * @max_down: Maximum available downstream bandwidth for the USB3 tunnel
1448 * (%0 if not limited).
1450 * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
1451 * @TB_TYPE_USB3_DOWN.
1453 * Return: Returns a tb_tunnel on success or %NULL on failure.
1455 struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
1456 struct tb_port *down, int max_up,
1459 struct tb_tunnel *tunnel;
1460 struct tb_path *path;
1464 * Check that we have enough bandwidth available for the new
1467 if (max_up > 0 || max_down > 0) {
1468 max_rate = tb_usb3_max_link_rate(down, up);
1472 /* Only 90% can be allocated for USB3 isochronous transfers */
1473 max_rate = max_rate * 90 / 100;
1474 tb_port_dbg(up, "required bandwidth for USB3 tunnel %d Mb/s\n",
1477 if (max_rate > max_up || max_rate > max_down) {
1478 tb_port_warn(up, "not enough bandwidth for USB3 tunnel\n");
1483 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
1487 tunnel->activate = tb_usb3_activate;
1488 tunnel->src_port = down;
1489 tunnel->dst_port = up;
1490 tunnel->max_up = max_up;
1491 tunnel->max_down = max_down;
1493 path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
1496 tb_tunnel_free(tunnel);
1499 tb_usb3_init_path(path);
1500 tunnel->paths[TB_USB3_PATH_DOWN] = path;
1502 path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
1505 tb_tunnel_free(tunnel);
1508 tb_usb3_init_path(path);
1509 tunnel->paths[TB_USB3_PATH_UP] = path;
1511 if (!tb_route(down->sw)) {
1512 tunnel->allocated_up = max_rate;
1513 tunnel->allocated_down = max_rate;
1515 tunnel->init = tb_usb3_init;
1516 tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
1517 tunnel->release_unused_bandwidth =
1518 tb_usb3_release_unused_bandwidth;
1519 tunnel->reclaim_available_bandwidth =
1520 tb_usb3_reclaim_available_bandwidth;
1527 * tb_tunnel_free() - free a tunnel
1528 * @tunnel: Tunnel to be freed
1530 * Frees a tunnel. The tunnel does not need to be deactivated.
1532 void tb_tunnel_free(struct tb_tunnel *tunnel)
1540 tunnel->deinit(tunnel);
1542 for (i = 0; i < tunnel->npaths; i++) {
1543 if (tunnel->paths[i])
1544 tb_path_free(tunnel->paths[i]);
1547 kfree(tunnel->paths);
1552 * tb_tunnel_is_invalid - check whether an activated path is still valid
1553 * @tunnel: Tunnel to check
1555 bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
1559 for (i = 0; i < tunnel->npaths; i++) {
1560 WARN_ON(!tunnel->paths[i]->activated);
1561 if (tb_path_is_invalid(tunnel->paths[i]))
1569 * tb_tunnel_restart() - activate a tunnel after a hardware reset
1570 * @tunnel: Tunnel to restart
1572 * Return: 0 on success and negative errno in case if failure
1574 int tb_tunnel_restart(struct tb_tunnel *tunnel)
1578 tb_tunnel_dbg(tunnel, "activating\n");
1581 * Make sure all paths are properly disabled before enabling
1584 for (i = 0; i < tunnel->npaths; i++) {
1585 if (tunnel->paths[i]->activated) {
1586 tb_path_deactivate(tunnel->paths[i]);
1587 tunnel->paths[i]->activated = false;
1592 res = tunnel->init(tunnel);
1597 for (i = 0; i < tunnel->npaths; i++) {
1598 res = tb_path_activate(tunnel->paths[i]);
1603 if (tunnel->activate) {
1604 res = tunnel->activate(tunnel, true);
1612 tb_tunnel_warn(tunnel, "activation failed\n");
1613 tb_tunnel_deactivate(tunnel);
1618 * tb_tunnel_activate() - activate a tunnel
1619 * @tunnel: Tunnel to activate
1621 * Return: Returns 0 on success or an error code on failure.
1623 int tb_tunnel_activate(struct tb_tunnel *tunnel)
1627 for (i = 0; i < tunnel->npaths; i++) {
1628 if (tunnel->paths[i]->activated) {
1629 tb_tunnel_WARN(tunnel,
1630 "trying to activate an already activated tunnel\n");
1635 return tb_tunnel_restart(tunnel);
1639 * tb_tunnel_deactivate() - deactivate a tunnel
1640 * @tunnel: Tunnel to deactivate
1642 void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
1646 tb_tunnel_dbg(tunnel, "deactivating\n");
1648 if (tunnel->activate)
1649 tunnel->activate(tunnel, false);
1651 for (i = 0; i < tunnel->npaths; i++) {
1652 if (tunnel->paths[i] && tunnel->paths[i]->activated)
1653 tb_path_deactivate(tunnel->paths[i]);
1658 * tb_tunnel_port_on_path() - Does the tunnel go through port
1659 * @tunnel: Tunnel to check
1660 * @port: Port to check
1662 * Returns true if @tunnel goes through @port (direction does not matter),
1665 bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
1666 const struct tb_port *port)
1670 for (i = 0; i < tunnel->npaths; i++) {
1671 if (!tunnel->paths[i])
1674 if (tb_path_port_on_path(tunnel->paths[i], port))
1681 static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
1685 for (i = 0; i < tunnel->npaths; i++) {
1686 if (!tunnel->paths[i])
1688 if (!tunnel->paths[i]->activated)
1696 * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
1697 * @tunnel: Tunnel to check
1698 * @consumed_up: Consumed bandwidth in Mb/s from @dst_port to @src_port.
1700 * @consumed_down: Consumed bandwidth in Mb/s from @src_port to @dst_port.
1703 * Stores the amount of isochronous bandwidth @tunnel consumes in
1704 * @consumed_up and @consumed_down. In case of success returns %0,
1705 * negative errno otherwise.
1707 int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
1710 int up_bw = 0, down_bw = 0;
1712 if (!tb_tunnel_is_active(tunnel))
1715 if (tunnel->consumed_bandwidth) {
1718 ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw);
1722 tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw,
1728 *consumed_up = up_bw;
1730 *consumed_down = down_bw;
1736 * tb_tunnel_release_unused_bandwidth() - Release unused bandwidth
1737 * @tunnel: Tunnel whose unused bandwidth to release
1739 * If tunnel supports dynamic bandwidth management (USB3 tunnels at the
1740 * moment) this function makes it to release all the unused bandwidth.
1742 * Returns %0 in case of success and negative errno otherwise.
1744 int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel)
1746 if (!tb_tunnel_is_active(tunnel))
1749 if (tunnel->release_unused_bandwidth) {
1752 ret = tunnel->release_unused_bandwidth(tunnel);
1761 * tb_tunnel_reclaim_available_bandwidth() - Reclaim available bandwidth
1762 * @tunnel: Tunnel reclaiming available bandwidth
1763 * @available_up: Available upstream bandwidth (in Mb/s)
1764 * @available_down: Available downstream bandwidth (in Mb/s)
1766 * Reclaims bandwidth from @available_up and @available_down and updates
1767 * the variables accordingly (e.g decreases both according to what was
1768 * reclaimed by the tunnel). If nothing was reclaimed the values are
1771 void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
1773 int *available_down)
1775 if (!tb_tunnel_is_active(tunnel))
1778 if (tunnel->reclaim_available_bandwidth)
1779 tunnel->reclaim_available_bandwidth(tunnel, available_up,