1 // SPDX-License-Identifier: GPL-2.0
3 * Thunderbolt driver - bus logic (NHI independent)
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2019, Intel Corporation
9 #include <linux/slab.h>
10 #include <linux/errno.h>
11 #include <linux/delay.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/platform_data/x86/apple.h>
19 #define TB_TIMEOUT 100 /* ms */
22 * struct tb_cm - Simple Thunderbolt connection manager
23 * @tunnel_list: List of active tunnels
24 * @dp_resources: List of available DP resources for DP tunneling
25 * @hotplug_active: tb_handle_hotplug will stop progressing plug
26 * events and exit if this is not set (it needs to
27 * acquire the lock one more time). Used to drain wq
28 * after cfg has been paused.
29 * @remove_work: Work used to remove any unplugged routers after
33 struct list_head tunnel_list;
34 struct list_head dp_resources;
36 struct delayed_work remove_work;
39 static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
41 return ((void *)tcm - sizeof(struct tb));
44 struct tb_hotplug_event {
45 struct work_struct work;
52 static void tb_handle_hotplug(struct work_struct *work);
54 static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
56 struct tb_hotplug_event *ev;
58 ev = kmalloc(sizeof(*ev), GFP_KERNEL);
66 INIT_WORK(&ev->work, tb_handle_hotplug);
67 queue_work(tb->wq, &ev->work);
70 /* enumeration & hot plug handling */
72 static void tb_add_dp_resources(struct tb_switch *sw)
74 struct tb_cm *tcm = tb_priv(sw->tb);
77 tb_switch_for_each_port(sw, port) {
78 if (!tb_port_is_dpin(port))
81 if (!tb_switch_query_dp_resource(sw, port))
84 list_add_tail(&port->list, &tcm->dp_resources);
85 tb_port_dbg(port, "DP IN resource available\n");
89 static void tb_remove_dp_resources(struct tb_switch *sw)
91 struct tb_cm *tcm = tb_priv(sw->tb);
92 struct tb_port *port, *tmp;
94 /* Clear children resources first */
95 tb_switch_for_each_port(sw, port) {
96 if (tb_port_has_remote(port))
97 tb_remove_dp_resources(port->remote->sw);
100 list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
101 if (port->sw == sw) {
102 tb_port_dbg(port, "DP OUT resource unavailable\n");
103 list_del_init(&port->list);
108 static void tb_discover_dp_resource(struct tb *tb, struct tb_port *port)
110 struct tb_cm *tcm = tb_priv(tb);
113 list_for_each_entry(p, &tcm->dp_resources, list) {
118 tb_port_dbg(port, "DP %s resource available discovered\n",
119 tb_port_is_dpin(port) ? "IN" : "OUT");
120 list_add_tail(&port->list, &tcm->dp_resources);
123 static void tb_discover_dp_resources(struct tb *tb)
125 struct tb_cm *tcm = tb_priv(tb);
126 struct tb_tunnel *tunnel;
128 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
129 if (tb_tunnel_is_dp(tunnel))
130 tb_discover_dp_resource(tb, tunnel->dst_port);
134 static void tb_switch_discover_tunnels(struct tb_switch *sw,
135 struct list_head *list,
138 struct tb *tb = sw->tb;
139 struct tb_port *port;
141 tb_switch_for_each_port(sw, port) {
142 struct tb_tunnel *tunnel = NULL;
144 switch (port->config.type) {
145 case TB_TYPE_DP_HDMI_IN:
146 tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids);
148 * In case of DP tunnel exists, change host router's
149 * 1st children TMU mode to HiFi for CL0s to work.
152 tb_switch_enable_tmu_1st_child(tb->root_switch,
153 TB_SWITCH_TMU_RATE_HIFI);
156 case TB_TYPE_PCIE_DOWN:
157 tunnel = tb_tunnel_discover_pci(tb, port, alloc_hopids);
160 case TB_TYPE_USB3_DOWN:
161 tunnel = tb_tunnel_discover_usb3(tb, port, alloc_hopids);
169 list_add_tail(&tunnel->list, list);
172 tb_switch_for_each_port(sw, port) {
173 if (tb_port_has_remote(port)) {
174 tb_switch_discover_tunnels(port->remote->sw, list,
180 static void tb_discover_tunnels(struct tb *tb)
182 struct tb_cm *tcm = tb_priv(tb);
183 struct tb_tunnel *tunnel;
185 tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
187 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
188 if (tb_tunnel_is_pci(tunnel)) {
189 struct tb_switch *parent = tunnel->dst_port->sw;
191 while (parent != tunnel->src_port->sw) {
193 parent = tb_switch_parent(parent);
195 } else if (tb_tunnel_is_dp(tunnel)) {
196 /* Keep the domain from powering down */
197 pm_runtime_get_sync(&tunnel->src_port->sw->dev);
198 pm_runtime_get_sync(&tunnel->dst_port->sw->dev);
203 static int tb_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd)
205 if (tb_switch_is_usb4(port->sw))
206 return usb4_port_configure_xdomain(port, xd);
207 return tb_lc_configure_xdomain(port);
210 static void tb_port_unconfigure_xdomain(struct tb_port *port)
212 if (tb_switch_is_usb4(port->sw))
213 usb4_port_unconfigure_xdomain(port);
215 tb_lc_unconfigure_xdomain(port);
217 tb_port_enable(port->dual_link_port);
220 static void tb_scan_xdomain(struct tb_port *port)
222 struct tb_switch *sw = port->sw;
223 struct tb *tb = sw->tb;
224 struct tb_xdomain *xd;
227 if (!tb_is_xdomain_enabled())
230 route = tb_downstream_route(port);
231 xd = tb_xdomain_find_by_route(tb, route);
237 xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
240 tb_port_at(route, sw)->xdomain = xd;
241 tb_port_configure_xdomain(port, xd);
246 static int tb_enable_tmu(struct tb_switch *sw)
250 /* If it is already enabled in correct mode, don't touch it */
251 if (tb_switch_tmu_is_enabled(sw, sw->tmu.unidirectional_request))
254 ret = tb_switch_tmu_disable(sw);
258 ret = tb_switch_tmu_post_time(sw);
262 return tb_switch_tmu_enable(sw);
266 * tb_find_unused_port() - return the first inactive port on @sw
267 * @sw: Switch to find the port on
268 * @type: Port type to look for
270 static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
271 enum tb_port_type type)
273 struct tb_port *port;
275 tb_switch_for_each_port(sw, port) {
276 if (tb_is_upstream_port(port))
278 if (port->config.type != type)
282 if (tb_port_is_enabled(port))
289 static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
290 const struct tb_port *port)
292 struct tb_port *down;
294 down = usb4_switch_map_usb3_down(sw, port);
295 if (down && !tb_usb3_port_is_enabled(down))
300 static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
301 struct tb_port *src_port,
302 struct tb_port *dst_port)
304 struct tb_cm *tcm = tb_priv(tb);
305 struct tb_tunnel *tunnel;
307 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
308 if (tunnel->type == type &&
309 ((src_port && src_port == tunnel->src_port) ||
310 (dst_port && dst_port == tunnel->dst_port))) {
318 static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
319 struct tb_port *src_port,
320 struct tb_port *dst_port)
322 struct tb_port *port, *usb3_down;
323 struct tb_switch *sw;
325 /* Pick the router that is deepest in the topology */
326 if (dst_port->sw->config.depth > src_port->sw->config.depth)
331 /* Can't be the host router */
332 if (sw == tb->root_switch)
335 /* Find the downstream USB4 port that leads to this router */
336 port = tb_port_at(tb_route(sw), tb->root_switch);
337 /* Find the corresponding host router USB3 downstream port */
338 usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port);
342 return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
345 static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
346 struct tb_port *dst_port, int *available_up, int *available_down)
348 int usb3_consumed_up, usb3_consumed_down, ret;
349 struct tb_cm *tcm = tb_priv(tb);
350 struct tb_tunnel *tunnel;
351 struct tb_port *port;
353 tb_port_dbg(dst_port, "calculating available bandwidth\n");
355 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
357 ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up,
358 &usb3_consumed_down);
362 usb3_consumed_up = 0;
363 usb3_consumed_down = 0;
366 *available_up = *available_down = 40000;
368 /* Find the minimum available bandwidth over all links */
369 tb_for_each_port_on_path(src_port, dst_port, port) {
370 int link_speed, link_width, up_bw, down_bw;
372 if (!tb_port_is_null(port))
375 if (tb_is_upstream_port(port)) {
376 link_speed = port->sw->link_speed;
378 link_speed = tb_port_get_link_speed(port);
383 link_width = port->bonded ? 2 : 1;
385 up_bw = link_speed * link_width * 1000; /* Mb/s */
386 /* Leave 10% guard band */
390 tb_port_dbg(port, "link total bandwidth %d Mb/s\n", up_bw);
393 * Find all DP tunnels that cross the port and reduce
394 * their consumed bandwidth from the available.
396 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
397 int dp_consumed_up, dp_consumed_down;
399 if (!tb_tunnel_is_dp(tunnel))
402 if (!tb_tunnel_port_on_path(tunnel, port))
405 ret = tb_tunnel_consumed_bandwidth(tunnel,
411 up_bw -= dp_consumed_up;
412 down_bw -= dp_consumed_down;
416 * If USB3 is tunneled from the host router down to the
417 * branch leading to port we need to take USB3 consumed
418 * bandwidth into account regardless whether it actually
421 up_bw -= usb3_consumed_up;
422 down_bw -= usb3_consumed_down;
424 if (up_bw < *available_up)
425 *available_up = up_bw;
426 if (down_bw < *available_down)
427 *available_down = down_bw;
430 if (*available_up < 0)
432 if (*available_down < 0)
438 static int tb_release_unused_usb3_bandwidth(struct tb *tb,
439 struct tb_port *src_port,
440 struct tb_port *dst_port)
442 struct tb_tunnel *tunnel;
444 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
445 return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0;
448 static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port,
449 struct tb_port *dst_port)
451 int ret, available_up, available_down;
452 struct tb_tunnel *tunnel;
454 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
458 tb_dbg(tb, "reclaiming unused bandwidth for USB3\n");
461 * Calculate available bandwidth for the first hop USB3 tunnel.
462 * That determines the whole USB3 bandwidth for this branch.
464 ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
465 &available_up, &available_down);
467 tb_warn(tb, "failed to calculate available bandwidth\n");
471 tb_dbg(tb, "available bandwidth for USB3 %d/%d Mb/s\n",
472 available_up, available_down);
474 tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down);
477 static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
479 struct tb_switch *parent = tb_switch_parent(sw);
480 int ret, available_up, available_down;
481 struct tb_port *up, *down, *port;
482 struct tb_cm *tcm = tb_priv(tb);
483 struct tb_tunnel *tunnel;
485 if (!tb_acpi_may_tunnel_usb3()) {
486 tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n");
490 up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
498 * Look up available down port. Since we are chaining it should
499 * be found right above this switch.
501 port = tb_port_at(tb_route(sw), parent);
502 down = tb_find_usb3_down(parent, port);
506 if (tb_route(parent)) {
507 struct tb_port *parent_up;
509 * Check first that the parent switch has its upstream USB3
510 * port enabled. Otherwise the chain is not complete and
511 * there is no point setting up a new tunnel.
513 parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
514 if (!parent_up || !tb_port_is_enabled(parent_up))
517 /* Make all unused bandwidth available for the new tunnel */
518 ret = tb_release_unused_usb3_bandwidth(tb, down, up);
523 ret = tb_available_bandwidth(tb, down, up, &available_up,
528 tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
529 available_up, available_down);
531 tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up,
538 if (tb_tunnel_activate(tunnel)) {
540 "USB3 tunnel activation failed, aborting\n");
545 list_add_tail(&tunnel->list, &tcm->tunnel_list);
546 if (tb_route(parent))
547 tb_reclaim_usb3_bandwidth(tb, down, up);
552 tb_tunnel_free(tunnel);
554 if (tb_route(parent))
555 tb_reclaim_usb3_bandwidth(tb, down, up);
560 static int tb_create_usb3_tunnels(struct tb_switch *sw)
562 struct tb_port *port;
565 if (!tb_acpi_may_tunnel_usb3())
569 ret = tb_tunnel_usb3(sw->tb, sw);
574 tb_switch_for_each_port(sw, port) {
575 if (!tb_port_has_remote(port))
577 ret = tb_create_usb3_tunnels(port->remote->sw);
585 static void tb_scan_port(struct tb_port *port);
588 * tb_scan_switch() - scan for and initialize downstream switches
590 static void tb_scan_switch(struct tb_switch *sw)
592 struct tb_port *port;
594 pm_runtime_get_sync(&sw->dev);
596 tb_switch_for_each_port(sw, port)
599 pm_runtime_mark_last_busy(&sw->dev);
600 pm_runtime_put_autosuspend(&sw->dev);
604 * tb_scan_port() - check for and initialize switches below port
606 static void tb_scan_port(struct tb_port *port)
608 struct tb_cm *tcm = tb_priv(port->sw->tb);
609 struct tb_port *upstream_port;
610 struct tb_switch *sw;
613 if (tb_is_upstream_port(port))
616 if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
617 !tb_dp_port_is_enabled(port)) {
618 tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
619 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
624 if (port->config.type != TB_TYPE_PORT)
626 if (port->dual_link_port && port->link_nr)
628 * Downstream switch is reachable through two ports.
629 * Only scan on the primary port (link_nr == 0).
633 pm_runtime_get_sync(&port->usb4->dev);
635 if (tb_wait_for_port(port, false) <= 0)
638 tb_port_dbg(port, "port already has a remote\n");
642 tb_retimer_scan(port, true);
644 sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
645 tb_downstream_route(port));
648 * If there is an error accessing the connected switch
649 * it may be connected to another domain. Also we allow
650 * the other domain to be connected to a max depth switch.
652 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
653 tb_scan_xdomain(port);
657 if (tb_switch_configure(sw)) {
663 * If there was previously another domain connected remove it
667 tb_xdomain_remove(port->xdomain);
668 tb_port_unconfigure_xdomain(port);
669 port->xdomain = NULL;
673 * Do not send uevents until we have discovered all existing
674 * tunnels and know which switches were authorized already by
677 if (!tcm->hotplug_active)
678 dev_set_uevent_suppress(&sw->dev, true);
681 * At the moment Thunderbolt 2 and beyond (devices with LC) we
682 * can support runtime PM.
684 sw->rpm = sw->generation > 1;
686 if (tb_switch_add(sw)) {
691 /* Link the switches using both links if available */
692 upstream_port = tb_upstream_port(sw);
693 port->remote = upstream_port;
694 upstream_port->remote = port;
695 if (port->dual_link_port && upstream_port->dual_link_port) {
696 port->dual_link_port->remote = upstream_port->dual_link_port;
697 upstream_port->dual_link_port->remote = port->dual_link_port;
700 /* Enable lane bonding if supported */
701 tb_switch_lane_bonding_enable(sw);
702 /* Set the link configured */
703 tb_switch_configure_link(sw);
705 * CL0s and CL1 are enabled and supported together.
706 * Silently ignore CLx enabling in case CLx is not supported.
708 ret = tb_switch_enable_clx(sw, TB_CL1);
709 if (ret && ret != -EOPNOTSUPP)
710 tb_sw_warn(sw, "failed to enable %s on upstream port\n",
711 tb_switch_clx_name(TB_CL1));
713 if (tb_switch_is_clx_enabled(sw, TB_CL1))
715 * To support highest CLx state, we set router's TMU to
718 tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_NORMAL, true);
720 /* If CLx disabled, configure router's TMU to HiFi-Bidir mode*/
721 tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_HIFI, false);
723 if (tb_enable_tmu(sw))
724 tb_sw_warn(sw, "failed to enable TMU\n");
726 /* Scan upstream retimers */
727 tb_retimer_scan(upstream_port, true);
730 * Create USB 3.x tunnels only when the switch is plugged to the
731 * domain. This is because we scan the domain also during discovery
732 * and want to discover existing USB 3.x tunnels before we create
735 if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
736 tb_sw_warn(sw, "USB3 tunnel creation failed\n");
738 tb_add_dp_resources(sw);
743 pm_runtime_mark_last_busy(&port->usb4->dev);
744 pm_runtime_put_autosuspend(&port->usb4->dev);
748 static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
750 struct tb_port *src_port, *dst_port;
756 tb_tunnel_deactivate(tunnel);
757 list_del(&tunnel->list);
760 src_port = tunnel->src_port;
761 dst_port = tunnel->dst_port;
763 switch (tunnel->type) {
766 * In case of DP tunnel make sure the DP IN resource is
767 * deallocated properly.
769 tb_switch_dealloc_dp_resource(src_port->sw, src_port);
770 /* Now we can allow the domain to runtime suspend again */
771 pm_runtime_mark_last_busy(&dst_port->sw->dev);
772 pm_runtime_put_autosuspend(&dst_port->sw->dev);
773 pm_runtime_mark_last_busy(&src_port->sw->dev);
774 pm_runtime_put_autosuspend(&src_port->sw->dev);
778 tb_reclaim_usb3_bandwidth(tb, src_port, dst_port);
783 * PCIe and DMA tunnels do not consume guaranteed
789 tb_tunnel_free(tunnel);
793 * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
795 static void tb_free_invalid_tunnels(struct tb *tb)
797 struct tb_cm *tcm = tb_priv(tb);
798 struct tb_tunnel *tunnel;
801 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
802 if (tb_tunnel_is_invalid(tunnel))
803 tb_deactivate_and_free_tunnel(tunnel);
808 * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
810 static void tb_free_unplugged_children(struct tb_switch *sw)
812 struct tb_port *port;
814 tb_switch_for_each_port(sw, port) {
815 if (!tb_port_has_remote(port))
818 if (port->remote->sw->is_unplugged) {
819 tb_retimer_remove_all(port);
820 tb_remove_dp_resources(port->remote->sw);
821 tb_switch_unconfigure_link(port->remote->sw);
822 tb_switch_lane_bonding_disable(port->remote->sw);
823 tb_switch_remove(port->remote->sw);
825 if (port->dual_link_port)
826 port->dual_link_port->remote = NULL;
828 tb_free_unplugged_children(port->remote->sw);
833 static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
834 const struct tb_port *port)
836 struct tb_port *down = NULL;
839 * To keep plugging devices consistently in the same PCIe
840 * hierarchy, do mapping here for switch downstream PCIe ports.
842 if (tb_switch_is_usb4(sw)) {
843 down = usb4_switch_map_pcie_down(sw, port);
844 } else if (!tb_route(sw)) {
845 int phy_port = tb_phy_port_from_link(port->port);
849 * Hard-coded Thunderbolt port to PCIe down port mapping
852 if (tb_switch_is_cactus_ridge(sw) ||
853 tb_switch_is_alpine_ridge(sw))
854 index = !phy_port ? 6 : 7;
855 else if (tb_switch_is_falcon_ridge(sw))
856 index = !phy_port ? 6 : 8;
857 else if (tb_switch_is_titan_ridge(sw))
858 index = !phy_port ? 8 : 9;
862 /* Validate the hard-coding */
863 if (WARN_ON(index > sw->config.max_port_number))
866 down = &sw->ports[index];
870 if (WARN_ON(!tb_port_is_pcie_down(down)))
872 if (tb_pci_port_is_enabled(down))
879 return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
882 static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
884 struct tb_port *host_port, *port;
885 struct tb_cm *tcm = tb_priv(tb);
887 host_port = tb_route(in->sw) ?
888 tb_port_at(tb_route(in->sw), tb->root_switch) : NULL;
890 list_for_each_entry(port, &tcm->dp_resources, list) {
891 if (!tb_port_is_dpout(port))
894 if (tb_port_is_enabled(port)) {
895 tb_port_dbg(port, "in use\n");
899 tb_port_dbg(port, "DP OUT available\n");
902 * Keep the DP tunnel under the topology starting from
903 * the same host router downstream port.
905 if (host_port && tb_route(port->sw)) {
908 p = tb_port_at(tb_route(port->sw), tb->root_switch);
919 static void tb_tunnel_dp(struct tb *tb)
921 int available_up, available_down, ret, link_nr;
922 struct tb_cm *tcm = tb_priv(tb);
923 struct tb_port *port, *in, *out;
924 struct tb_tunnel *tunnel;
926 if (!tb_acpi_may_tunnel_dp()) {
927 tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
932 * Find pair of inactive DP IN and DP OUT adapters and then
933 * establish a DP tunnel between them.
935 tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
939 list_for_each_entry(port, &tcm->dp_resources, list) {
940 if (!tb_port_is_dpin(port))
943 if (tb_port_is_enabled(port)) {
944 tb_port_dbg(port, "in use\n");
948 tb_port_dbg(port, "DP IN available\n");
950 out = tb_find_dp_out(tb, port);
958 tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
962 tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
967 * This is only applicable to links that are not bonded (so
968 * when Thunderbolt 1 hardware is involved somewhere in the
969 * topology). For these try to share the DP bandwidth between
973 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
974 if (tb_tunnel_is_dp(tunnel)) {
981 * DP stream needs the domain to be active so runtime resume
982 * both ends of the tunnel.
984 * This should bring the routers in the middle active as well
985 * and keeps the domain from runtime suspending while the DP
988 pm_runtime_get_sync(&in->sw->dev);
989 pm_runtime_get_sync(&out->sw->dev);
991 if (tb_switch_alloc_dp_resource(in->sw, in)) {
992 tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
996 /* Make all unused USB3 bandwidth available for the new DP tunnel */
997 ret = tb_release_unused_usb3_bandwidth(tb, in, out);
999 tb_warn(tb, "failed to release unused bandwidth\n");
1000 goto err_dealloc_dp;
1003 ret = tb_available_bandwidth(tb, in, out, &available_up,
1008 tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
1009 available_up, available_down);
1011 tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up,
1014 tb_port_dbg(out, "could not allocate DP tunnel\n");
1018 if (tb_tunnel_activate(tunnel)) {
1019 tb_port_info(out, "DP tunnel activation failed, aborting\n");
1023 list_add_tail(&tunnel->list, &tcm->tunnel_list);
1024 tb_reclaim_usb3_bandwidth(tb, in, out);
1026 * In case of DP tunnel exists, change host router's 1st children
1027 * TMU mode to HiFi for CL0s to work.
1029 tb_switch_enable_tmu_1st_child(tb->root_switch, TB_SWITCH_TMU_RATE_HIFI);
1034 tb_tunnel_free(tunnel);
1036 tb_reclaim_usb3_bandwidth(tb, in, out);
1038 tb_switch_dealloc_dp_resource(in->sw, in);
1040 pm_runtime_mark_last_busy(&out->sw->dev);
1041 pm_runtime_put_autosuspend(&out->sw->dev);
1042 pm_runtime_mark_last_busy(&in->sw->dev);
1043 pm_runtime_put_autosuspend(&in->sw->dev);
1046 static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
1048 struct tb_port *in, *out;
1049 struct tb_tunnel *tunnel;
1051 if (tb_port_is_dpin(port)) {
1052 tb_port_dbg(port, "DP IN resource unavailable\n");
1056 tb_port_dbg(port, "DP OUT resource unavailable\n");
1061 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
1062 tb_deactivate_and_free_tunnel(tunnel);
1063 list_del_init(&port->list);
1066 * See if there is another DP OUT port that can be used for
1067 * to create another tunnel.
1072 static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
1074 struct tb_cm *tcm = tb_priv(tb);
1077 if (tb_port_is_enabled(port))
1080 list_for_each_entry(p, &tcm->dp_resources, list) {
1085 tb_port_dbg(port, "DP %s resource available\n",
1086 tb_port_is_dpin(port) ? "IN" : "OUT");
1087 list_add_tail(&port->list, &tcm->dp_resources);
1089 /* Look for suitable DP IN <-> DP OUT pairs now */
1093 static void tb_disconnect_and_release_dp(struct tb *tb)
1095 struct tb_cm *tcm = tb_priv(tb);
1096 struct tb_tunnel *tunnel, *n;
1099 * Tear down all DP tunnels and release their resources. They
1100 * will be re-established after resume based on plug events.
1102 list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) {
1103 if (tb_tunnel_is_dp(tunnel))
1104 tb_deactivate_and_free_tunnel(tunnel);
1107 while (!list_empty(&tcm->dp_resources)) {
1108 struct tb_port *port;
1110 port = list_first_entry(&tcm->dp_resources,
1111 struct tb_port, list);
1112 list_del_init(&port->list);
1116 static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw)
1118 struct tb_tunnel *tunnel;
1121 up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
1125 tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up);
1126 if (WARN_ON(!tunnel))
1129 tb_switch_xhci_disconnect(sw);
1131 tb_tunnel_deactivate(tunnel);
1132 list_del(&tunnel->list);
1133 tb_tunnel_free(tunnel);
1137 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
1139 struct tb_port *up, *down, *port;
1140 struct tb_cm *tcm = tb_priv(tb);
1141 struct tb_switch *parent_sw;
1142 struct tb_tunnel *tunnel;
1144 up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
1149 * Look up available down port. Since we are chaining it should
1150 * be found right above this switch.
1152 parent_sw = tb_to_switch(sw->dev.parent);
1153 port = tb_port_at(tb_route(sw), parent_sw);
1154 down = tb_find_pcie_down(parent_sw, port);
1158 tunnel = tb_tunnel_alloc_pci(tb, up, down);
1162 if (tb_tunnel_activate(tunnel)) {
1164 "PCIe tunnel activation failed, aborting\n");
1165 tb_tunnel_free(tunnel);
1170 * PCIe L1 is needed to enable CL0s for Titan Ridge so enable it
1173 if (tb_switch_pcie_l1_enable(sw))
1174 tb_sw_warn(sw, "failed to enable PCIe L1 for Titan Ridge\n");
1176 if (tb_switch_xhci_connect(sw))
1177 tb_sw_warn(sw, "failed to connect xHCI\n");
1179 list_add_tail(&tunnel->list, &tcm->tunnel_list);
1183 static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1184 int transmit_path, int transmit_ring,
1185 int receive_path, int receive_ring)
1187 struct tb_cm *tcm = tb_priv(tb);
1188 struct tb_port *nhi_port, *dst_port;
1189 struct tb_tunnel *tunnel;
1190 struct tb_switch *sw;
1192 sw = tb_to_switch(xd->dev.parent);
1193 dst_port = tb_port_at(xd->route, sw);
1194 nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
1196 mutex_lock(&tb->lock);
1197 tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path,
1198 transmit_ring, receive_path, receive_ring);
1200 mutex_unlock(&tb->lock);
1204 if (tb_tunnel_activate(tunnel)) {
1205 tb_port_info(nhi_port,
1206 "DMA tunnel activation failed, aborting\n");
1207 tb_tunnel_free(tunnel);
1208 mutex_unlock(&tb->lock);
1212 list_add_tail(&tunnel->list, &tcm->tunnel_list);
1213 mutex_unlock(&tb->lock);
1217 static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1218 int transmit_path, int transmit_ring,
1219 int receive_path, int receive_ring)
1221 struct tb_cm *tcm = tb_priv(tb);
1222 struct tb_port *nhi_port, *dst_port;
1223 struct tb_tunnel *tunnel, *n;
1224 struct tb_switch *sw;
1226 sw = tb_to_switch(xd->dev.parent);
1227 dst_port = tb_port_at(xd->route, sw);
1228 nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
1230 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1231 if (!tb_tunnel_is_dma(tunnel))
1233 if (tunnel->src_port != nhi_port || tunnel->dst_port != dst_port)
1236 if (tb_tunnel_match_dma(tunnel, transmit_path, transmit_ring,
1237 receive_path, receive_ring))
1238 tb_deactivate_and_free_tunnel(tunnel);
1242 static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1243 int transmit_path, int transmit_ring,
1244 int receive_path, int receive_ring)
1246 if (!xd->is_unplugged) {
1247 mutex_lock(&tb->lock);
1248 __tb_disconnect_xdomain_paths(tb, xd, transmit_path,
1249 transmit_ring, receive_path,
1251 mutex_unlock(&tb->lock);
1256 /* hotplug handling */
1259 * tb_handle_hotplug() - handle hotplug event
1261 * Executes on tb->wq.
1263 static void tb_handle_hotplug(struct work_struct *work)
1265 struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
1266 struct tb *tb = ev->tb;
1267 struct tb_cm *tcm = tb_priv(tb);
1268 struct tb_switch *sw;
1269 struct tb_port *port;
1271 /* Bring the domain back from sleep if it was suspended */
1272 pm_runtime_get_sync(&tb->dev);
1274 mutex_lock(&tb->lock);
1275 if (!tcm->hotplug_active)
1276 goto out; /* during init, suspend or shutdown */
1278 sw = tb_switch_find_by_route(tb, ev->route);
1281 "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
1282 ev->route, ev->port, ev->unplug);
1285 if (ev->port > sw->config.max_port_number) {
1287 "hotplug event from non existent port %llx:%x (unplug: %d)\n",
1288 ev->route, ev->port, ev->unplug);
1291 port = &sw->ports[ev->port];
1292 if (tb_is_upstream_port(port)) {
1293 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
1294 ev->route, ev->port, ev->unplug);
1298 pm_runtime_get_sync(&sw->dev);
1301 tb_retimer_remove_all(port);
1303 if (tb_port_has_remote(port)) {
1304 tb_port_dbg(port, "switch unplugged\n");
1305 tb_sw_set_unplugged(port->remote->sw);
1306 tb_free_invalid_tunnels(tb);
1307 tb_remove_dp_resources(port->remote->sw);
1308 tb_switch_tmu_disable(port->remote->sw);
1309 tb_switch_unconfigure_link(port->remote->sw);
1310 tb_switch_lane_bonding_disable(port->remote->sw);
1311 tb_switch_remove(port->remote->sw);
1312 port->remote = NULL;
1313 if (port->dual_link_port)
1314 port->dual_link_port->remote = NULL;
1315 /* Maybe we can create another DP tunnel */
1317 } else if (port->xdomain) {
1318 struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
1320 tb_port_dbg(port, "xdomain unplugged\n");
1322 * Service drivers are unbound during
1323 * tb_xdomain_remove() so setting XDomain as
1324 * unplugged here prevents deadlock if they call
1325 * tb_xdomain_disable_paths(). We will tear down
1326 * all the tunnels below.
1328 xd->is_unplugged = true;
1329 tb_xdomain_remove(xd);
1330 port->xdomain = NULL;
1331 __tb_disconnect_xdomain_paths(tb, xd, -1, -1, -1, -1);
1333 tb_port_unconfigure_xdomain(port);
1334 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
1335 tb_dp_resource_unavailable(tb, port);
1336 } else if (!port->port) {
1337 tb_sw_dbg(sw, "xHCI disconnect request\n");
1338 tb_switch_xhci_disconnect(sw);
1341 "got unplug event for disconnected port, ignoring\n");
1343 } else if (port->remote) {
1344 tb_port_dbg(port, "got plug event for connected port, ignoring\n");
1345 } else if (!port->port && sw->authorized) {
1346 tb_sw_dbg(sw, "xHCI connect request\n");
1347 tb_switch_xhci_connect(sw);
1349 if (tb_port_is_null(port)) {
1350 tb_port_dbg(port, "hotplug: scanning\n");
1353 tb_port_dbg(port, "hotplug: no switch found\n");
1354 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
1355 tb_dp_resource_available(tb, port);
1359 pm_runtime_mark_last_busy(&sw->dev);
1360 pm_runtime_put_autosuspend(&sw->dev);
1365 mutex_unlock(&tb->lock);
1367 pm_runtime_mark_last_busy(&tb->dev);
1368 pm_runtime_put_autosuspend(&tb->dev);
1374 * tb_schedule_hotplug_handler() - callback function for the control channel
1376 * Delegates to tb_handle_hotplug.
1378 static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
1379 const void *buf, size_t size)
1381 const struct cfg_event_pkg *pkg = buf;
1384 if (type != TB_CFG_PKG_EVENT) {
1385 tb_warn(tb, "unexpected event %#x, ignoring\n", type);
1389 route = tb_cfg_get_route(&pkg->header);
1391 if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
1392 tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
1396 tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
1399 static void tb_stop(struct tb *tb)
1401 struct tb_cm *tcm = tb_priv(tb);
1402 struct tb_tunnel *tunnel;
1403 struct tb_tunnel *n;
1405 cancel_delayed_work(&tcm->remove_work);
1406 /* tunnels are only present after everything has been initialized */
1407 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1409 * DMA tunnels require the driver to be functional so we
1410 * tear them down. Other protocol tunnels can be left
1413 if (tb_tunnel_is_dma(tunnel))
1414 tb_tunnel_deactivate(tunnel);
1415 tb_tunnel_free(tunnel);
1417 tb_switch_remove(tb->root_switch);
1418 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
1421 static int tb_scan_finalize_switch(struct device *dev, void *data)
1423 if (tb_is_switch(dev)) {
1424 struct tb_switch *sw = tb_to_switch(dev);
1427 * If we found that the switch was already setup by the
1428 * boot firmware, mark it as authorized now before we
1429 * send uevent to userspace.
1434 dev_set_uevent_suppress(dev, false);
1435 kobject_uevent(&dev->kobj, KOBJ_ADD);
1436 device_for_each_child(dev, NULL, tb_scan_finalize_switch);
1442 static int tb_start(struct tb *tb)
1444 struct tb_cm *tcm = tb_priv(tb);
1447 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
1448 if (IS_ERR(tb->root_switch))
1449 return PTR_ERR(tb->root_switch);
1452 * ICM firmware upgrade needs running firmware and in native
1453 * mode that is not available so disable firmware upgrade of the
1456 * However, USB4 routers support NVM firmware upgrade if they
1457 * implement the necessary router operations.
1459 tb->root_switch->no_nvm_upgrade = !tb_switch_is_usb4(tb->root_switch);
1460 /* All USB4 routers support runtime PM */
1461 tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch);
1463 ret = tb_switch_configure(tb->root_switch);
1465 tb_switch_put(tb->root_switch);
1469 /* Announce the switch to the world */
1470 ret = tb_switch_add(tb->root_switch);
1472 tb_switch_put(tb->root_switch);
1477 * To support highest CLx state, we set host router's TMU to
1480 tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_RATE_NORMAL,
1482 /* Enable TMU if it is off */
1483 tb_switch_tmu_enable(tb->root_switch);
1484 /* Full scan to discover devices added before the driver was loaded. */
1485 tb_scan_switch(tb->root_switch);
1486 /* Find out tunnels created by the boot firmware */
1487 tb_discover_tunnels(tb);
1488 /* Add DP resources from the DP tunnels created by the boot firmware */
1489 tb_discover_dp_resources(tb);
1491 * If the boot firmware did not create USB 3.x tunnels create them
1492 * now for the whole topology.
1494 tb_create_usb3_tunnels(tb->root_switch);
1495 /* Add DP IN resources for the root switch */
1496 tb_add_dp_resources(tb->root_switch);
1497 /* Make the discovered switches available to the userspace */
1498 device_for_each_child(&tb->root_switch->dev, NULL,
1499 tb_scan_finalize_switch);
1501 /* Allow tb_handle_hotplug to progress events */
1502 tcm->hotplug_active = true;
1506 static int tb_suspend_noirq(struct tb *tb)
1508 struct tb_cm *tcm = tb_priv(tb);
1510 tb_dbg(tb, "suspending...\n");
1511 tb_disconnect_and_release_dp(tb);
1512 tb_switch_suspend(tb->root_switch, false);
1513 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
1514 tb_dbg(tb, "suspend finished\n");
1519 static void tb_restore_children(struct tb_switch *sw)
1521 struct tb_port *port;
1524 /* No need to restore if the router is already unplugged */
1525 if (sw->is_unplugged)
1529 * CL0s and CL1 are enabled and supported together.
1530 * Silently ignore CLx re-enabling in case CLx is not supported.
1532 ret = tb_switch_enable_clx(sw, TB_CL1);
1533 if (ret && ret != -EOPNOTSUPP)
1534 tb_sw_warn(sw, "failed to re-enable %s on upstream port\n",
1535 tb_switch_clx_name(TB_CL1));
1537 if (tb_switch_is_clx_enabled(sw, TB_CL1))
1539 * To support highest CLx state, we set router's TMU to
1542 tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_NORMAL, true);
1544 /* If CLx disabled, configure router's TMU to HiFi-Bidir mode*/
1545 tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_HIFI, false);
1547 if (tb_enable_tmu(sw))
1548 tb_sw_warn(sw, "failed to restore TMU configuration\n");
1550 tb_switch_for_each_port(sw, port) {
1551 if (!tb_port_has_remote(port) && !port->xdomain)
1555 tb_switch_lane_bonding_enable(port->remote->sw);
1556 tb_switch_configure_link(port->remote->sw);
1558 tb_restore_children(port->remote->sw);
1559 } else if (port->xdomain) {
1560 tb_port_configure_xdomain(port, port->xdomain);
1565 static int tb_resume_noirq(struct tb *tb)
1567 struct tb_cm *tcm = tb_priv(tb);
1568 struct tb_tunnel *tunnel, *n;
1569 unsigned int usb3_delay = 0;
1572 tb_dbg(tb, "resuming...\n");
1574 /* remove any pci devices the firmware might have setup */
1575 tb_switch_reset(tb->root_switch);
1577 tb_switch_resume(tb->root_switch);
1578 tb_free_invalid_tunnels(tb);
1579 tb_free_unplugged_children(tb->root_switch);
1580 tb_restore_children(tb->root_switch);
1583 * If we get here from suspend to disk the boot firmware or the
1584 * restore kernel might have created tunnels of its own. Since
1585 * we cannot be sure they are usable for us we find and tear
1588 tb_switch_discover_tunnels(tb->root_switch, &tunnels, false);
1589 list_for_each_entry_safe_reverse(tunnel, n, &tunnels, list) {
1590 if (tb_tunnel_is_usb3(tunnel))
1592 tb_tunnel_deactivate(tunnel);
1593 tb_tunnel_free(tunnel);
1596 /* Re-create our tunnels now */
1597 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1598 /* USB3 requires delay before it can be re-activated */
1599 if (tb_tunnel_is_usb3(tunnel)) {
1601 /* Only need to do it once */
1604 tb_tunnel_restart(tunnel);
1606 if (!list_empty(&tcm->tunnel_list)) {
1608 * the pcie links need some time to get going.
1609 * 100ms works for me...
1611 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
1614 /* Allow tb_handle_hotplug to progress events */
1615 tcm->hotplug_active = true;
1616 tb_dbg(tb, "resume finished\n");
1621 static int tb_free_unplugged_xdomains(struct tb_switch *sw)
1623 struct tb_port *port;
1626 tb_switch_for_each_port(sw, port) {
1627 if (tb_is_upstream_port(port))
1629 if (port->xdomain && port->xdomain->is_unplugged) {
1630 tb_retimer_remove_all(port);
1631 tb_xdomain_remove(port->xdomain);
1632 tb_port_unconfigure_xdomain(port);
1633 port->xdomain = NULL;
1635 } else if (port->remote) {
1636 ret += tb_free_unplugged_xdomains(port->remote->sw);
1643 static int tb_freeze_noirq(struct tb *tb)
1645 struct tb_cm *tcm = tb_priv(tb);
1647 tcm->hotplug_active = false;
1651 static int tb_thaw_noirq(struct tb *tb)
1653 struct tb_cm *tcm = tb_priv(tb);
1655 tcm->hotplug_active = true;
1659 static void tb_complete(struct tb *tb)
1662 * Release any unplugged XDomains and if there is a case where
1663 * another domain is swapped in place of unplugged XDomain we
1664 * need to run another rescan.
1666 mutex_lock(&tb->lock);
1667 if (tb_free_unplugged_xdomains(tb->root_switch))
1668 tb_scan_switch(tb->root_switch);
1669 mutex_unlock(&tb->lock);
1672 static int tb_runtime_suspend(struct tb *tb)
1674 struct tb_cm *tcm = tb_priv(tb);
1676 mutex_lock(&tb->lock);
1677 tb_switch_suspend(tb->root_switch, true);
1678 tcm->hotplug_active = false;
1679 mutex_unlock(&tb->lock);
1684 static void tb_remove_work(struct work_struct *work)
1686 struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work);
1687 struct tb *tb = tcm_to_tb(tcm);
1689 mutex_lock(&tb->lock);
1690 if (tb->root_switch) {
1691 tb_free_unplugged_children(tb->root_switch);
1692 tb_free_unplugged_xdomains(tb->root_switch);
1694 mutex_unlock(&tb->lock);
1697 static int tb_runtime_resume(struct tb *tb)
1699 struct tb_cm *tcm = tb_priv(tb);
1700 struct tb_tunnel *tunnel, *n;
1702 mutex_lock(&tb->lock);
1703 tb_switch_resume(tb->root_switch);
1704 tb_free_invalid_tunnels(tb);
1705 tb_restore_children(tb->root_switch);
1706 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
1707 tb_tunnel_restart(tunnel);
1708 tcm->hotplug_active = true;
1709 mutex_unlock(&tb->lock);
1712 * Schedule cleanup of any unplugged devices. Run this in a
1713 * separate thread to avoid possible deadlock if the device
1714 * removal runtime resumes the unplugged device.
1716 queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50));
1720 static const struct tb_cm_ops tb_cm_ops = {
1723 .suspend_noirq = tb_suspend_noirq,
1724 .resume_noirq = tb_resume_noirq,
1725 .freeze_noirq = tb_freeze_noirq,
1726 .thaw_noirq = tb_thaw_noirq,
1727 .complete = tb_complete,
1728 .runtime_suspend = tb_runtime_suspend,
1729 .runtime_resume = tb_runtime_resume,
1730 .handle_event = tb_handle_event,
1731 .disapprove_switch = tb_disconnect_pci,
1732 .approve_switch = tb_tunnel_pci,
1733 .approve_xdomain_paths = tb_approve_xdomain_paths,
1734 .disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
1738 * During suspend the Thunderbolt controller is reset and all PCIe
1739 * tunnels are lost. The NHI driver will try to reestablish all tunnels
1740 * during resume. This adds device links between the tunneled PCIe
1741 * downstream ports and the NHI so that the device core will make sure
1742 * NHI is resumed first before the rest.
1744 static void tb_apple_add_links(struct tb_nhi *nhi)
1746 struct pci_dev *upstream, *pdev;
1748 if (!x86_apple_machine)
1751 switch (nhi->pdev->device) {
1752 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1753 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
1754 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
1755 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
1761 upstream = pci_upstream_bridge(nhi->pdev);
1763 if (!pci_is_pcie(upstream))
1765 if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM)
1767 upstream = pci_upstream_bridge(upstream);
1774 * For each hotplug downstream port, create add device link
1775 * back to NHI so that PCIe tunnels can be re-established after
1778 for_each_pci_bridge(pdev, upstream->subordinate) {
1779 const struct device_link *link;
1781 if (!pci_is_pcie(pdev))
1783 if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM ||
1784 !pdev->is_hotplug_bridge)
1787 link = device_link_add(&pdev->dev, &nhi->pdev->dev,
1788 DL_FLAG_AUTOREMOVE_SUPPLIER |
1789 DL_FLAG_PM_RUNTIME);
1791 dev_dbg(&nhi->pdev->dev, "created link from %s\n",
1792 dev_name(&pdev->dev));
1794 dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
1795 dev_name(&pdev->dev));
1800 struct tb *tb_probe(struct tb_nhi *nhi)
1805 tb = tb_domain_alloc(nhi, TB_TIMEOUT, sizeof(*tcm));
1809 if (tb_acpi_may_tunnel_pcie())
1810 tb->security_level = TB_SECURITY_USER;
1812 tb->security_level = TB_SECURITY_NOPCIE;
1814 tb->cm_ops = &tb_cm_ops;
1817 INIT_LIST_HEAD(&tcm->tunnel_list);
1818 INIT_LIST_HEAD(&tcm->dp_resources);
1819 INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work);
1821 tb_dbg(tb, "using software connection manager\n");
1823 tb_apple_add_links(nhi);
1824 tb_acpi_add_links(nhi);