Merge patch series "riscv: some CMO alternative related clean up"
[platform/kernel/linux-starfive.git] / drivers / thunderbolt / tb.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt driver - bus logic (NHI independent)
4  *
5  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6  * Copyright (C) 2019, Intel Corporation
7  */
8
9 #include <linux/slab.h>
10 #include <linux/errno.h>
11 #include <linux/delay.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/platform_data/x86/apple.h>
14
15 #include "tb.h"
16 #include "tb_regs.h"
17 #include "tunnel.h"
18
19 #define TB_TIMEOUT      100     /* ms */
20 #define MAX_GROUPS      7       /* max Group_ID is 7 */
21
22 /**
23  * struct tb_cm - Simple Thunderbolt connection manager
24  * @tunnel_list: List of active tunnels
25  * @dp_resources: List of available DP resources for DP tunneling
26  * @hotplug_active: tb_handle_hotplug will stop progressing plug
27  *                  events and exit if this is not set (it needs to
28  *                  acquire the lock one more time). Used to drain wq
29  *                  after cfg has been paused.
30  * @remove_work: Work used to remove any unplugged routers after
31  *               runtime resume
32  * @groups: Bandwidth groups used in this domain.
33  */
34 struct tb_cm {
35         struct list_head tunnel_list;
36         struct list_head dp_resources;
37         bool hotplug_active;
38         struct delayed_work remove_work;
39         struct tb_bandwidth_group groups[MAX_GROUPS];
40 };
41
42 static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
43 {
44         return ((void *)tcm - sizeof(struct tb));
45 }
46
47 struct tb_hotplug_event {
48         struct work_struct work;
49         struct tb *tb;
50         u64 route;
51         u8 port;
52         bool unplug;
53 };
54
55 static void tb_init_bandwidth_groups(struct tb_cm *tcm)
56 {
57         int i;
58
59         for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
60                 struct tb_bandwidth_group *group = &tcm->groups[i];
61
62                 group->tb = tcm_to_tb(tcm);
63                 group->index = i + 1;
64                 INIT_LIST_HEAD(&group->ports);
65         }
66 }
67
68 static void tb_bandwidth_group_attach_port(struct tb_bandwidth_group *group,
69                                            struct tb_port *in)
70 {
71         if (!group || WARN_ON(in->group))
72                 return;
73
74         in->group = group;
75         list_add_tail(&in->group_list, &group->ports);
76
77         tb_port_dbg(in, "attached to bandwidth group %d\n", group->index);
78 }
79
80 static struct tb_bandwidth_group *tb_find_free_bandwidth_group(struct tb_cm *tcm)
81 {
82         int i;
83
84         for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
85                 struct tb_bandwidth_group *group = &tcm->groups[i];
86
87                 if (list_empty(&group->ports))
88                         return group;
89         }
90
91         return NULL;
92 }
93
94 static struct tb_bandwidth_group *
95 tb_attach_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
96                           struct tb_port *out)
97 {
98         struct tb_bandwidth_group *group;
99         struct tb_tunnel *tunnel;
100
101         /*
102          * Find all DP tunnels that go through all the same USB4 links
103          * as this one. Because we always setup tunnels the same way we
104          * can just check for the routers at both ends of the tunnels
105          * and if they are the same we have a match.
106          */
107         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
108                 if (!tb_tunnel_is_dp(tunnel))
109                         continue;
110
111                 if (tunnel->src_port->sw == in->sw &&
112                     tunnel->dst_port->sw == out->sw) {
113                         group = tunnel->src_port->group;
114                         if (group) {
115                                 tb_bandwidth_group_attach_port(group, in);
116                                 return group;
117                         }
118                 }
119         }
120
121         /* Pick up next available group then */
122         group = tb_find_free_bandwidth_group(tcm);
123         if (group)
124                 tb_bandwidth_group_attach_port(group, in);
125         else
126                 tb_port_warn(in, "no available bandwidth groups\n");
127
128         return group;
129 }
130
131 static void tb_discover_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
132                                         struct tb_port *out)
133 {
134         if (usb4_dp_port_bw_mode_enabled(in)) {
135                 int index, i;
136
137                 index = usb4_dp_port_group_id(in);
138                 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
139                         if (tcm->groups[i].index == index) {
140                                 tb_bandwidth_group_attach_port(&tcm->groups[i], in);
141                                 return;
142                         }
143                 }
144         }
145
146         tb_attach_bandwidth_group(tcm, in, out);
147 }
148
149 static void tb_detach_bandwidth_group(struct tb_port *in)
150 {
151         struct tb_bandwidth_group *group = in->group;
152
153         if (group) {
154                 in->group = NULL;
155                 list_del_init(&in->group_list);
156
157                 tb_port_dbg(in, "detached from bandwidth group %d\n", group->index);
158         }
159 }
160
161 static void tb_handle_hotplug(struct work_struct *work);
162
163 static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
164 {
165         struct tb_hotplug_event *ev;
166
167         ev = kmalloc(sizeof(*ev), GFP_KERNEL);
168         if (!ev)
169                 return;
170
171         ev->tb = tb;
172         ev->route = route;
173         ev->port = port;
174         ev->unplug = unplug;
175         INIT_WORK(&ev->work, tb_handle_hotplug);
176         queue_work(tb->wq, &ev->work);
177 }
178
179 /* enumeration & hot plug handling */
180
181 static void tb_add_dp_resources(struct tb_switch *sw)
182 {
183         struct tb_cm *tcm = tb_priv(sw->tb);
184         struct tb_port *port;
185
186         tb_switch_for_each_port(sw, port) {
187                 if (!tb_port_is_dpin(port))
188                         continue;
189
190                 if (!tb_switch_query_dp_resource(sw, port))
191                         continue;
192
193                 list_add_tail(&port->list, &tcm->dp_resources);
194                 tb_port_dbg(port, "DP IN resource available\n");
195         }
196 }
197
198 static void tb_remove_dp_resources(struct tb_switch *sw)
199 {
200         struct tb_cm *tcm = tb_priv(sw->tb);
201         struct tb_port *port, *tmp;
202
203         /* Clear children resources first */
204         tb_switch_for_each_port(sw, port) {
205                 if (tb_port_has_remote(port))
206                         tb_remove_dp_resources(port->remote->sw);
207         }
208
209         list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
210                 if (port->sw == sw) {
211                         tb_port_dbg(port, "DP OUT resource unavailable\n");
212                         list_del_init(&port->list);
213                 }
214         }
215 }
216
217 static void tb_discover_dp_resource(struct tb *tb, struct tb_port *port)
218 {
219         struct tb_cm *tcm = tb_priv(tb);
220         struct tb_port *p;
221
222         list_for_each_entry(p, &tcm->dp_resources, list) {
223                 if (p == port)
224                         return;
225         }
226
227         tb_port_dbg(port, "DP %s resource available discovered\n",
228                     tb_port_is_dpin(port) ? "IN" : "OUT");
229         list_add_tail(&port->list, &tcm->dp_resources);
230 }
231
232 static void tb_discover_dp_resources(struct tb *tb)
233 {
234         struct tb_cm *tcm = tb_priv(tb);
235         struct tb_tunnel *tunnel;
236
237         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
238                 if (tb_tunnel_is_dp(tunnel))
239                         tb_discover_dp_resource(tb, tunnel->dst_port);
240         }
241 }
242
243 static void tb_switch_discover_tunnels(struct tb_switch *sw,
244                                        struct list_head *list,
245                                        bool alloc_hopids)
246 {
247         struct tb *tb = sw->tb;
248         struct tb_port *port;
249
250         tb_switch_for_each_port(sw, port) {
251                 struct tb_tunnel *tunnel = NULL;
252
253                 switch (port->config.type) {
254                 case TB_TYPE_DP_HDMI_IN:
255                         tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids);
256                         /*
257                          * In case of DP tunnel exists, change host router's
258                          * 1st children TMU mode to HiFi for CL0s to work.
259                          */
260                         if (tunnel)
261                                 tb_switch_enable_tmu_1st_child(tb->root_switch,
262                                                 TB_SWITCH_TMU_RATE_HIFI);
263                         break;
264
265                 case TB_TYPE_PCIE_DOWN:
266                         tunnel = tb_tunnel_discover_pci(tb, port, alloc_hopids);
267                         break;
268
269                 case TB_TYPE_USB3_DOWN:
270                         tunnel = tb_tunnel_discover_usb3(tb, port, alloc_hopids);
271                         break;
272
273                 default:
274                         break;
275                 }
276
277                 if (tunnel)
278                         list_add_tail(&tunnel->list, list);
279         }
280
281         tb_switch_for_each_port(sw, port) {
282                 if (tb_port_has_remote(port)) {
283                         tb_switch_discover_tunnels(port->remote->sw, list,
284                                                    alloc_hopids);
285                 }
286         }
287 }
288
289 static void tb_discover_tunnels(struct tb *tb)
290 {
291         struct tb_cm *tcm = tb_priv(tb);
292         struct tb_tunnel *tunnel;
293
294         tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
295
296         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
297                 if (tb_tunnel_is_pci(tunnel)) {
298                         struct tb_switch *parent = tunnel->dst_port->sw;
299
300                         while (parent != tunnel->src_port->sw) {
301                                 parent->boot = true;
302                                 parent = tb_switch_parent(parent);
303                         }
304                 } else if (tb_tunnel_is_dp(tunnel)) {
305                         struct tb_port *in = tunnel->src_port;
306                         struct tb_port *out = tunnel->dst_port;
307
308                         /* Keep the domain from powering down */
309                         pm_runtime_get_sync(&in->sw->dev);
310                         pm_runtime_get_sync(&out->sw->dev);
311
312                         tb_discover_bandwidth_group(tcm, in, out);
313                 }
314         }
315 }
316
317 static int tb_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd)
318 {
319         if (tb_switch_is_usb4(port->sw))
320                 return usb4_port_configure_xdomain(port, xd);
321         return tb_lc_configure_xdomain(port);
322 }
323
324 static void tb_port_unconfigure_xdomain(struct tb_port *port)
325 {
326         if (tb_switch_is_usb4(port->sw))
327                 usb4_port_unconfigure_xdomain(port);
328         else
329                 tb_lc_unconfigure_xdomain(port);
330
331         tb_port_enable(port->dual_link_port);
332 }
333
334 static void tb_scan_xdomain(struct tb_port *port)
335 {
336         struct tb_switch *sw = port->sw;
337         struct tb *tb = sw->tb;
338         struct tb_xdomain *xd;
339         u64 route;
340
341         if (!tb_is_xdomain_enabled())
342                 return;
343
344         route = tb_downstream_route(port);
345         xd = tb_xdomain_find_by_route(tb, route);
346         if (xd) {
347                 tb_xdomain_put(xd);
348                 return;
349         }
350
351         xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
352                               NULL);
353         if (xd) {
354                 tb_port_at(route, sw)->xdomain = xd;
355                 tb_port_configure_xdomain(port, xd);
356                 tb_xdomain_add(xd);
357         }
358 }
359
360 static int tb_enable_tmu(struct tb_switch *sw)
361 {
362         int ret;
363
364         /* If it is already enabled in correct mode, don't touch it */
365         if (tb_switch_tmu_is_enabled(sw, sw->tmu.unidirectional_request))
366                 return 0;
367
368         ret = tb_switch_tmu_disable(sw);
369         if (ret)
370                 return ret;
371
372         ret = tb_switch_tmu_post_time(sw);
373         if (ret)
374                 return ret;
375
376         return tb_switch_tmu_enable(sw);
377 }
378
379 /**
380  * tb_find_unused_port() - return the first inactive port on @sw
381  * @sw: Switch to find the port on
382  * @type: Port type to look for
383  */
384 static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
385                                            enum tb_port_type type)
386 {
387         struct tb_port *port;
388
389         tb_switch_for_each_port(sw, port) {
390                 if (tb_is_upstream_port(port))
391                         continue;
392                 if (port->config.type != type)
393                         continue;
394                 if (!port->cap_adap)
395                         continue;
396                 if (tb_port_is_enabled(port))
397                         continue;
398                 return port;
399         }
400         return NULL;
401 }
402
403 static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
404                                          const struct tb_port *port)
405 {
406         struct tb_port *down;
407
408         down = usb4_switch_map_usb3_down(sw, port);
409         if (down && !tb_usb3_port_is_enabled(down))
410                 return down;
411         return NULL;
412 }
413
414 static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
415                                         struct tb_port *src_port,
416                                         struct tb_port *dst_port)
417 {
418         struct tb_cm *tcm = tb_priv(tb);
419         struct tb_tunnel *tunnel;
420
421         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
422                 if (tunnel->type == type &&
423                     ((src_port && src_port == tunnel->src_port) ||
424                      (dst_port && dst_port == tunnel->dst_port))) {
425                         return tunnel;
426                 }
427         }
428
429         return NULL;
430 }
431
432 static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
433                                                    struct tb_port *src_port,
434                                                    struct tb_port *dst_port)
435 {
436         struct tb_port *port, *usb3_down;
437         struct tb_switch *sw;
438
439         /* Pick the router that is deepest in the topology */
440         if (dst_port->sw->config.depth > src_port->sw->config.depth)
441                 sw = dst_port->sw;
442         else
443                 sw = src_port->sw;
444
445         /* Can't be the host router */
446         if (sw == tb->root_switch)
447                 return NULL;
448
449         /* Find the downstream USB4 port that leads to this router */
450         port = tb_port_at(tb_route(sw), tb->root_switch);
451         /* Find the corresponding host router USB3 downstream port */
452         usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port);
453         if (!usb3_down)
454                 return NULL;
455
456         return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
457 }
458
459 static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
460         struct tb_port *dst_port, int *available_up, int *available_down)
461 {
462         int usb3_consumed_up, usb3_consumed_down, ret;
463         struct tb_cm *tcm = tb_priv(tb);
464         struct tb_tunnel *tunnel;
465         struct tb_port *port;
466
467         tb_dbg(tb, "calculating available bandwidth between %llx:%u <-> %llx:%u\n",
468                tb_route(src_port->sw), src_port->port, tb_route(dst_port->sw),
469                dst_port->port);
470
471         tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
472         if (tunnel && tunnel->src_port != src_port &&
473             tunnel->dst_port != dst_port) {
474                 ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up,
475                                                    &usb3_consumed_down);
476                 if (ret)
477                         return ret;
478         } else {
479                 usb3_consumed_up = 0;
480                 usb3_consumed_down = 0;
481         }
482
483         *available_up = *available_down = 40000;
484
485         /* Find the minimum available bandwidth over all links */
486         tb_for_each_port_on_path(src_port, dst_port, port) {
487                 int link_speed, link_width, up_bw, down_bw;
488
489                 if (!tb_port_is_null(port))
490                         continue;
491
492                 if (tb_is_upstream_port(port)) {
493                         link_speed = port->sw->link_speed;
494                 } else {
495                         link_speed = tb_port_get_link_speed(port);
496                         if (link_speed < 0)
497                                 return link_speed;
498                 }
499
500                 link_width = port->bonded ? 2 : 1;
501
502                 up_bw = link_speed * link_width * 1000; /* Mb/s */
503                 /* Leave 10% guard band */
504                 up_bw -= up_bw / 10;
505                 down_bw = up_bw;
506
507                 tb_port_dbg(port, "link total bandwidth %d/%d Mb/s\n", up_bw,
508                             down_bw);
509
510                 /*
511                  * Find all DP tunnels that cross the port and reduce
512                  * their consumed bandwidth from the available.
513                  */
514                 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
515                         int dp_consumed_up, dp_consumed_down;
516
517                         if (tb_tunnel_is_invalid(tunnel))
518                                 continue;
519
520                         if (!tb_tunnel_is_dp(tunnel))
521                                 continue;
522
523                         if (!tb_tunnel_port_on_path(tunnel, port))
524                                 continue;
525
526                         /*
527                          * Ignore the DP tunnel between src_port and
528                          * dst_port because it is the same tunnel and we
529                          * may be re-calculating estimated bandwidth.
530                          */
531                         if (tunnel->src_port == src_port &&
532                             tunnel->dst_port == dst_port)
533                                 continue;
534
535                         ret = tb_tunnel_consumed_bandwidth(tunnel,
536                                                            &dp_consumed_up,
537                                                            &dp_consumed_down);
538                         if (ret)
539                                 return ret;
540
541                         up_bw -= dp_consumed_up;
542                         down_bw -= dp_consumed_down;
543                 }
544
545                 /*
546                  * If USB3 is tunneled from the host router down to the
547                  * branch leading to port we need to take USB3 consumed
548                  * bandwidth into account regardless whether it actually
549                  * crosses the port.
550                  */
551                 up_bw -= usb3_consumed_up;
552                 down_bw -= usb3_consumed_down;
553
554                 if (up_bw < *available_up)
555                         *available_up = up_bw;
556                 if (down_bw < *available_down)
557                         *available_down = down_bw;
558         }
559
560         if (*available_up < 0)
561                 *available_up = 0;
562         if (*available_down < 0)
563                 *available_down = 0;
564
565         return 0;
566 }
567
568 static int tb_release_unused_usb3_bandwidth(struct tb *tb,
569                                             struct tb_port *src_port,
570                                             struct tb_port *dst_port)
571 {
572         struct tb_tunnel *tunnel;
573
574         tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
575         return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0;
576 }
577
578 static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port,
579                                       struct tb_port *dst_port)
580 {
581         int ret, available_up, available_down;
582         struct tb_tunnel *tunnel;
583
584         tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
585         if (!tunnel)
586                 return;
587
588         tb_dbg(tb, "reclaiming unused bandwidth for USB3\n");
589
590         /*
591          * Calculate available bandwidth for the first hop USB3 tunnel.
592          * That determines the whole USB3 bandwidth for this branch.
593          */
594         ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
595                                      &available_up, &available_down);
596         if (ret) {
597                 tb_warn(tb, "failed to calculate available bandwidth\n");
598                 return;
599         }
600
601         tb_dbg(tb, "available bandwidth for USB3 %d/%d Mb/s\n",
602                available_up, available_down);
603
604         tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down);
605 }
606
607 static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
608 {
609         struct tb_switch *parent = tb_switch_parent(sw);
610         int ret, available_up, available_down;
611         struct tb_port *up, *down, *port;
612         struct tb_cm *tcm = tb_priv(tb);
613         struct tb_tunnel *tunnel;
614
615         if (!tb_acpi_may_tunnel_usb3()) {
616                 tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n");
617                 return 0;
618         }
619
620         up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
621         if (!up)
622                 return 0;
623
624         if (!sw->link_usb4)
625                 return 0;
626
627         /*
628          * Look up available down port. Since we are chaining it should
629          * be found right above this switch.
630          */
631         port = tb_port_at(tb_route(sw), parent);
632         down = tb_find_usb3_down(parent, port);
633         if (!down)
634                 return 0;
635
636         if (tb_route(parent)) {
637                 struct tb_port *parent_up;
638                 /*
639                  * Check first that the parent switch has its upstream USB3
640                  * port enabled. Otherwise the chain is not complete and
641                  * there is no point setting up a new tunnel.
642                  */
643                 parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
644                 if (!parent_up || !tb_port_is_enabled(parent_up))
645                         return 0;
646
647                 /* Make all unused bandwidth available for the new tunnel */
648                 ret = tb_release_unused_usb3_bandwidth(tb, down, up);
649                 if (ret)
650                         return ret;
651         }
652
653         ret = tb_available_bandwidth(tb, down, up, &available_up,
654                                      &available_down);
655         if (ret)
656                 goto err_reclaim;
657
658         tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
659                     available_up, available_down);
660
661         tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up,
662                                       available_down);
663         if (!tunnel) {
664                 ret = -ENOMEM;
665                 goto err_reclaim;
666         }
667
668         if (tb_tunnel_activate(tunnel)) {
669                 tb_port_info(up,
670                              "USB3 tunnel activation failed, aborting\n");
671                 ret = -EIO;
672                 goto err_free;
673         }
674
675         list_add_tail(&tunnel->list, &tcm->tunnel_list);
676         if (tb_route(parent))
677                 tb_reclaim_usb3_bandwidth(tb, down, up);
678
679         return 0;
680
681 err_free:
682         tb_tunnel_free(tunnel);
683 err_reclaim:
684         if (tb_route(parent))
685                 tb_reclaim_usb3_bandwidth(tb, down, up);
686
687         return ret;
688 }
689
690 static int tb_create_usb3_tunnels(struct tb_switch *sw)
691 {
692         struct tb_port *port;
693         int ret;
694
695         if (!tb_acpi_may_tunnel_usb3())
696                 return 0;
697
698         if (tb_route(sw)) {
699                 ret = tb_tunnel_usb3(sw->tb, sw);
700                 if (ret)
701                         return ret;
702         }
703
704         tb_switch_for_each_port(sw, port) {
705                 if (!tb_port_has_remote(port))
706                         continue;
707                 ret = tb_create_usb3_tunnels(port->remote->sw);
708                 if (ret)
709                         return ret;
710         }
711
712         return 0;
713 }
714
715 static void tb_scan_port(struct tb_port *port);
716
717 /*
718  * tb_scan_switch() - scan for and initialize downstream switches
719  */
720 static void tb_scan_switch(struct tb_switch *sw)
721 {
722         struct tb_port *port;
723
724         pm_runtime_get_sync(&sw->dev);
725
726         tb_switch_for_each_port(sw, port)
727                 tb_scan_port(port);
728
729         pm_runtime_mark_last_busy(&sw->dev);
730         pm_runtime_put_autosuspend(&sw->dev);
731 }
732
733 /*
734  * tb_scan_port() - check for and initialize switches below port
735  */
736 static void tb_scan_port(struct tb_port *port)
737 {
738         struct tb_cm *tcm = tb_priv(port->sw->tb);
739         struct tb_port *upstream_port;
740         bool discovery = false;
741         struct tb_switch *sw;
742         int ret;
743
744         if (tb_is_upstream_port(port))
745                 return;
746
747         if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
748             !tb_dp_port_is_enabled(port)) {
749                 tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
750                 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
751                                  false);
752                 return;
753         }
754
755         if (port->config.type != TB_TYPE_PORT)
756                 return;
757         if (port->dual_link_port && port->link_nr)
758                 return; /*
759                          * Downstream switch is reachable through two ports.
760                          * Only scan on the primary port (link_nr == 0).
761                          */
762
763         if (port->usb4)
764                 pm_runtime_get_sync(&port->usb4->dev);
765
766         if (tb_wait_for_port(port, false) <= 0)
767                 goto out_rpm_put;
768         if (port->remote) {
769                 tb_port_dbg(port, "port already has a remote\n");
770                 goto out_rpm_put;
771         }
772
773         tb_retimer_scan(port, true);
774
775         sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
776                              tb_downstream_route(port));
777         if (IS_ERR(sw)) {
778                 /*
779                  * If there is an error accessing the connected switch
780                  * it may be connected to another domain. Also we allow
781                  * the other domain to be connected to a max depth switch.
782                  */
783                 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
784                         tb_scan_xdomain(port);
785                 goto out_rpm_put;
786         }
787
788         if (tb_switch_configure(sw)) {
789                 tb_switch_put(sw);
790                 goto out_rpm_put;
791         }
792
793         /*
794          * If there was previously another domain connected remove it
795          * first.
796          */
797         if (port->xdomain) {
798                 tb_xdomain_remove(port->xdomain);
799                 tb_port_unconfigure_xdomain(port);
800                 port->xdomain = NULL;
801         }
802
803         /*
804          * Do not send uevents until we have discovered all existing
805          * tunnels and know which switches were authorized already by
806          * the boot firmware.
807          */
808         if (!tcm->hotplug_active) {
809                 dev_set_uevent_suppress(&sw->dev, true);
810                 discovery = true;
811         }
812
813         /*
814          * At the moment Thunderbolt 2 and beyond (devices with LC) we
815          * can support runtime PM.
816          */
817         sw->rpm = sw->generation > 1;
818
819         if (tb_switch_add(sw)) {
820                 tb_switch_put(sw);
821                 goto out_rpm_put;
822         }
823
824         /* Link the switches using both links if available */
825         upstream_port = tb_upstream_port(sw);
826         port->remote = upstream_port;
827         upstream_port->remote = port;
828         if (port->dual_link_port && upstream_port->dual_link_port) {
829                 port->dual_link_port->remote = upstream_port->dual_link_port;
830                 upstream_port->dual_link_port->remote = port->dual_link_port;
831         }
832
833         /* Enable lane bonding if supported */
834         tb_switch_lane_bonding_enable(sw);
835         /* Set the link configured */
836         tb_switch_configure_link(sw);
837         /*
838          * CL0s and CL1 are enabled and supported together.
839          * Silently ignore CLx enabling in case CLx is not supported.
840          */
841         if (discovery) {
842                 tb_sw_dbg(sw, "discovery, not touching CL states\n");
843         } else {
844                 ret = tb_switch_enable_clx(sw, TB_CL1);
845                 if (ret && ret != -EOPNOTSUPP)
846                         tb_sw_warn(sw, "failed to enable %s on upstream port\n",
847                                    tb_switch_clx_name(TB_CL1));
848         }
849
850         if (tb_switch_is_clx_enabled(sw, TB_CL1))
851                 /*
852                  * To support highest CLx state, we set router's TMU to
853                  * Normal-Uni mode.
854                  */
855                 tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_NORMAL, true);
856         else
857                 /* If CLx disabled, configure router's TMU to HiFi-Bidir mode*/
858                 tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_HIFI, false);
859
860         if (tb_enable_tmu(sw))
861                 tb_sw_warn(sw, "failed to enable TMU\n");
862
863         /* Scan upstream retimers */
864         tb_retimer_scan(upstream_port, true);
865
866         /*
867          * Create USB 3.x tunnels only when the switch is plugged to the
868          * domain. This is because we scan the domain also during discovery
869          * and want to discover existing USB 3.x tunnels before we create
870          * any new.
871          */
872         if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
873                 tb_sw_warn(sw, "USB3 tunnel creation failed\n");
874
875         tb_add_dp_resources(sw);
876         tb_scan_switch(sw);
877
878 out_rpm_put:
879         if (port->usb4) {
880                 pm_runtime_mark_last_busy(&port->usb4->dev);
881                 pm_runtime_put_autosuspend(&port->usb4->dev);
882         }
883 }
884
885 static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
886 {
887         struct tb_port *src_port, *dst_port;
888         struct tb *tb;
889
890         if (!tunnel)
891                 return;
892
893         tb_tunnel_deactivate(tunnel);
894         list_del(&tunnel->list);
895
896         tb = tunnel->tb;
897         src_port = tunnel->src_port;
898         dst_port = tunnel->dst_port;
899
900         switch (tunnel->type) {
901         case TB_TUNNEL_DP:
902                 tb_detach_bandwidth_group(src_port);
903                 /*
904                  * In case of DP tunnel make sure the DP IN resource is
905                  * deallocated properly.
906                  */
907                 tb_switch_dealloc_dp_resource(src_port->sw, src_port);
908                 /* Now we can allow the domain to runtime suspend again */
909                 pm_runtime_mark_last_busy(&dst_port->sw->dev);
910                 pm_runtime_put_autosuspend(&dst_port->sw->dev);
911                 pm_runtime_mark_last_busy(&src_port->sw->dev);
912                 pm_runtime_put_autosuspend(&src_port->sw->dev);
913                 fallthrough;
914
915         case TB_TUNNEL_USB3:
916                 tb_reclaim_usb3_bandwidth(tb, src_port, dst_port);
917                 break;
918
919         default:
920                 /*
921                  * PCIe and DMA tunnels do not consume guaranteed
922                  * bandwidth.
923                  */
924                 break;
925         }
926
927         tb_tunnel_free(tunnel);
928 }
929
930 /*
931  * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
932  */
933 static void tb_free_invalid_tunnels(struct tb *tb)
934 {
935         struct tb_cm *tcm = tb_priv(tb);
936         struct tb_tunnel *tunnel;
937         struct tb_tunnel *n;
938
939         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
940                 if (tb_tunnel_is_invalid(tunnel))
941                         tb_deactivate_and_free_tunnel(tunnel);
942         }
943 }
944
945 /*
946  * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
947  */
948 static void tb_free_unplugged_children(struct tb_switch *sw)
949 {
950         struct tb_port *port;
951
952         tb_switch_for_each_port(sw, port) {
953                 if (!tb_port_has_remote(port))
954                         continue;
955
956                 if (port->remote->sw->is_unplugged) {
957                         tb_retimer_remove_all(port);
958                         tb_remove_dp_resources(port->remote->sw);
959                         tb_switch_unconfigure_link(port->remote->sw);
960                         tb_switch_lane_bonding_disable(port->remote->sw);
961                         tb_switch_remove(port->remote->sw);
962                         port->remote = NULL;
963                         if (port->dual_link_port)
964                                 port->dual_link_port->remote = NULL;
965                 } else {
966                         tb_free_unplugged_children(port->remote->sw);
967                 }
968         }
969 }
970
971 static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
972                                          const struct tb_port *port)
973 {
974         struct tb_port *down = NULL;
975
976         /*
977          * To keep plugging devices consistently in the same PCIe
978          * hierarchy, do mapping here for switch downstream PCIe ports.
979          */
980         if (tb_switch_is_usb4(sw)) {
981                 down = usb4_switch_map_pcie_down(sw, port);
982         } else if (!tb_route(sw)) {
983                 int phy_port = tb_phy_port_from_link(port->port);
984                 int index;
985
986                 /*
987                  * Hard-coded Thunderbolt port to PCIe down port mapping
988                  * per controller.
989                  */
990                 if (tb_switch_is_cactus_ridge(sw) ||
991                     tb_switch_is_alpine_ridge(sw))
992                         index = !phy_port ? 6 : 7;
993                 else if (tb_switch_is_falcon_ridge(sw))
994                         index = !phy_port ? 6 : 8;
995                 else if (tb_switch_is_titan_ridge(sw))
996                         index = !phy_port ? 8 : 9;
997                 else
998                         goto out;
999
1000                 /* Validate the hard-coding */
1001                 if (WARN_ON(index > sw->config.max_port_number))
1002                         goto out;
1003
1004                 down = &sw->ports[index];
1005         }
1006
1007         if (down) {
1008                 if (WARN_ON(!tb_port_is_pcie_down(down)))
1009                         goto out;
1010                 if (tb_pci_port_is_enabled(down))
1011                         goto out;
1012
1013                 return down;
1014         }
1015
1016 out:
1017         return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
1018 }
1019
1020 static void
1021 tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
1022 {
1023         struct tb_tunnel *first_tunnel;
1024         struct tb *tb = group->tb;
1025         struct tb_port *in;
1026         int ret;
1027
1028         tb_dbg(tb, "re-calculating bandwidth estimation for group %u\n",
1029                group->index);
1030
1031         first_tunnel = NULL;
1032         list_for_each_entry(in, &group->ports, group_list) {
1033                 int estimated_bw, estimated_up, estimated_down;
1034                 struct tb_tunnel *tunnel;
1035                 struct tb_port *out;
1036
1037                 if (!usb4_dp_port_bw_mode_enabled(in))
1038                         continue;
1039
1040                 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
1041                 if (WARN_ON(!tunnel))
1042                         break;
1043
1044                 if (!first_tunnel) {
1045                         /*
1046                          * Since USB3 bandwidth is shared by all DP
1047                          * tunnels under the host router USB4 port, even
1048                          * if they do not begin from the host router, we
1049                          * can release USB3 bandwidth just once and not
1050                          * for each tunnel separately.
1051                          */
1052                         first_tunnel = tunnel;
1053                         ret = tb_release_unused_usb3_bandwidth(tb,
1054                                 first_tunnel->src_port, first_tunnel->dst_port);
1055                         if (ret) {
1056                                 tb_port_warn(in,
1057                                         "failed to release unused bandwidth\n");
1058                                 break;
1059                         }
1060                 }
1061
1062                 out = tunnel->dst_port;
1063                 ret = tb_available_bandwidth(tb, in, out, &estimated_up,
1064                                              &estimated_down);
1065                 if (ret) {
1066                         tb_port_warn(in,
1067                                 "failed to re-calculate estimated bandwidth\n");
1068                         break;
1069                 }
1070
1071                 /*
1072                  * Estimated bandwidth includes:
1073                  *  - already allocated bandwidth for the DP tunnel
1074                  *  - available bandwidth along the path
1075                  *  - bandwidth allocated for USB 3.x but not used.
1076                  */
1077                 tb_port_dbg(in, "re-calculated estimated bandwidth %u/%u Mb/s\n",
1078                             estimated_up, estimated_down);
1079
1080                 if (in->sw->config.depth < out->sw->config.depth)
1081                         estimated_bw = estimated_down;
1082                 else
1083                         estimated_bw = estimated_up;
1084
1085                 if (usb4_dp_port_set_estimated_bw(in, estimated_bw))
1086                         tb_port_warn(in, "failed to update estimated bandwidth\n");
1087         }
1088
1089         if (first_tunnel)
1090                 tb_reclaim_usb3_bandwidth(tb, first_tunnel->src_port,
1091                                           first_tunnel->dst_port);
1092
1093         tb_dbg(tb, "bandwidth estimation for group %u done\n", group->index);
1094 }
1095
1096 static void tb_recalc_estimated_bandwidth(struct tb *tb)
1097 {
1098         struct tb_cm *tcm = tb_priv(tb);
1099         int i;
1100
1101         tb_dbg(tb, "bandwidth consumption changed, re-calculating estimated bandwidth\n");
1102
1103         for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
1104                 struct tb_bandwidth_group *group = &tcm->groups[i];
1105
1106                 if (!list_empty(&group->ports))
1107                         tb_recalc_estimated_bandwidth_for_group(group);
1108         }
1109
1110         tb_dbg(tb, "bandwidth re-calculation done\n");
1111 }
1112
1113 static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
1114 {
1115         struct tb_port *host_port, *port;
1116         struct tb_cm *tcm = tb_priv(tb);
1117
1118         host_port = tb_route(in->sw) ?
1119                 tb_port_at(tb_route(in->sw), tb->root_switch) : NULL;
1120
1121         list_for_each_entry(port, &tcm->dp_resources, list) {
1122                 if (!tb_port_is_dpout(port))
1123                         continue;
1124
1125                 if (tb_port_is_enabled(port)) {
1126                         tb_port_dbg(port, "DP OUT in use\n");
1127                         continue;
1128                 }
1129
1130                 tb_port_dbg(port, "DP OUT available\n");
1131
1132                 /*
1133                  * Keep the DP tunnel under the topology starting from
1134                  * the same host router downstream port.
1135                  */
1136                 if (host_port && tb_route(port->sw)) {
1137                         struct tb_port *p;
1138
1139                         p = tb_port_at(tb_route(port->sw), tb->root_switch);
1140                         if (p != host_port)
1141                                 continue;
1142                 }
1143
1144                 return port;
1145         }
1146
1147         return NULL;
1148 }
1149
1150 static void tb_tunnel_dp(struct tb *tb)
1151 {
1152         int available_up, available_down, ret, link_nr;
1153         struct tb_cm *tcm = tb_priv(tb);
1154         struct tb_port *port, *in, *out;
1155         struct tb_tunnel *tunnel;
1156
1157         if (!tb_acpi_may_tunnel_dp()) {
1158                 tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
1159                 return;
1160         }
1161
1162         /*
1163          * Find pair of inactive DP IN and DP OUT adapters and then
1164          * establish a DP tunnel between them.
1165          */
1166         tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
1167
1168         in = NULL;
1169         out = NULL;
1170         list_for_each_entry(port, &tcm->dp_resources, list) {
1171                 if (!tb_port_is_dpin(port))
1172                         continue;
1173
1174                 if (tb_port_is_enabled(port)) {
1175                         tb_port_dbg(port, "DP IN in use\n");
1176                         continue;
1177                 }
1178
1179                 tb_port_dbg(port, "DP IN available\n");
1180
1181                 out = tb_find_dp_out(tb, port);
1182                 if (out) {
1183                         in = port;
1184                         break;
1185                 }
1186         }
1187
1188         if (!in) {
1189                 tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
1190                 return;
1191         }
1192         if (!out) {
1193                 tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
1194                 return;
1195         }
1196
1197         /*
1198          * This is only applicable to links that are not bonded (so
1199          * when Thunderbolt 1 hardware is involved somewhere in the
1200          * topology). For these try to share the DP bandwidth between
1201          * the two lanes.
1202          */
1203         link_nr = 1;
1204         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
1205                 if (tb_tunnel_is_dp(tunnel)) {
1206                         link_nr = 0;
1207                         break;
1208                 }
1209         }
1210
1211         /*
1212          * DP stream needs the domain to be active so runtime resume
1213          * both ends of the tunnel.
1214          *
1215          * This should bring the routers in the middle active as well
1216          * and keeps the domain from runtime suspending while the DP
1217          * tunnel is active.
1218          */
1219         pm_runtime_get_sync(&in->sw->dev);
1220         pm_runtime_get_sync(&out->sw->dev);
1221
1222         if (tb_switch_alloc_dp_resource(in->sw, in)) {
1223                 tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
1224                 goto err_rpm_put;
1225         }
1226
1227         if (!tb_attach_bandwidth_group(tcm, in, out))
1228                 goto err_dealloc_dp;
1229
1230         /* Make all unused USB3 bandwidth available for the new DP tunnel */
1231         ret = tb_release_unused_usb3_bandwidth(tb, in, out);
1232         if (ret) {
1233                 tb_warn(tb, "failed to release unused bandwidth\n");
1234                 goto err_detach_group;
1235         }
1236
1237         ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down);
1238         if (ret)
1239                 goto err_reclaim_usb;
1240
1241         tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
1242                available_up, available_down);
1243
1244         tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up,
1245                                     available_down);
1246         if (!tunnel) {
1247                 tb_port_dbg(out, "could not allocate DP tunnel\n");
1248                 goto err_reclaim_usb;
1249         }
1250
1251         if (tb_tunnel_activate(tunnel)) {
1252                 tb_port_info(out, "DP tunnel activation failed, aborting\n");
1253                 goto err_free;
1254         }
1255
1256         list_add_tail(&tunnel->list, &tcm->tunnel_list);
1257         tb_reclaim_usb3_bandwidth(tb, in, out);
1258
1259         /* Update the domain with the new bandwidth estimation */
1260         tb_recalc_estimated_bandwidth(tb);
1261
1262         /*
1263          * In case of DP tunnel exists, change host router's 1st children
1264          * TMU mode to HiFi for CL0s to work.
1265          */
1266         tb_switch_enable_tmu_1st_child(tb->root_switch, TB_SWITCH_TMU_RATE_HIFI);
1267
1268         return;
1269
1270 err_free:
1271         tb_tunnel_free(tunnel);
1272 err_reclaim_usb:
1273         tb_reclaim_usb3_bandwidth(tb, in, out);
1274 err_detach_group:
1275         tb_detach_bandwidth_group(in);
1276 err_dealloc_dp:
1277         tb_switch_dealloc_dp_resource(in->sw, in);
1278 err_rpm_put:
1279         pm_runtime_mark_last_busy(&out->sw->dev);
1280         pm_runtime_put_autosuspend(&out->sw->dev);
1281         pm_runtime_mark_last_busy(&in->sw->dev);
1282         pm_runtime_put_autosuspend(&in->sw->dev);
1283 }
1284
1285 static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
1286 {
1287         struct tb_port *in, *out;
1288         struct tb_tunnel *tunnel;
1289
1290         if (tb_port_is_dpin(port)) {
1291                 tb_port_dbg(port, "DP IN resource unavailable\n");
1292                 in = port;
1293                 out = NULL;
1294         } else {
1295                 tb_port_dbg(port, "DP OUT resource unavailable\n");
1296                 in = NULL;
1297                 out = port;
1298         }
1299
1300         tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
1301         tb_deactivate_and_free_tunnel(tunnel);
1302         list_del_init(&port->list);
1303
1304         /*
1305          * See if there is another DP OUT port that can be used for
1306          * to create another tunnel.
1307          */
1308         tb_recalc_estimated_bandwidth(tb);
1309         tb_tunnel_dp(tb);
1310 }
1311
1312 static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
1313 {
1314         struct tb_cm *tcm = tb_priv(tb);
1315         struct tb_port *p;
1316
1317         if (tb_port_is_enabled(port))
1318                 return;
1319
1320         list_for_each_entry(p, &tcm->dp_resources, list) {
1321                 if (p == port)
1322                         return;
1323         }
1324
1325         tb_port_dbg(port, "DP %s resource available\n",
1326                     tb_port_is_dpin(port) ? "IN" : "OUT");
1327         list_add_tail(&port->list, &tcm->dp_resources);
1328
1329         /* Look for suitable DP IN <-> DP OUT pairs now */
1330         tb_tunnel_dp(tb);
1331 }
1332
1333 static void tb_disconnect_and_release_dp(struct tb *tb)
1334 {
1335         struct tb_cm *tcm = tb_priv(tb);
1336         struct tb_tunnel *tunnel, *n;
1337
1338         /*
1339          * Tear down all DP tunnels and release their resources. They
1340          * will be re-established after resume based on plug events.
1341          */
1342         list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) {
1343                 if (tb_tunnel_is_dp(tunnel))
1344                         tb_deactivate_and_free_tunnel(tunnel);
1345         }
1346
1347         while (!list_empty(&tcm->dp_resources)) {
1348                 struct tb_port *port;
1349
1350                 port = list_first_entry(&tcm->dp_resources,
1351                                         struct tb_port, list);
1352                 list_del_init(&port->list);
1353         }
1354 }
1355
1356 static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw)
1357 {
1358         struct tb_tunnel *tunnel;
1359         struct tb_port *up;
1360
1361         up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
1362         if (WARN_ON(!up))
1363                 return -ENODEV;
1364
1365         tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up);
1366         if (WARN_ON(!tunnel))
1367                 return -ENODEV;
1368
1369         tb_switch_xhci_disconnect(sw);
1370
1371         tb_tunnel_deactivate(tunnel);
1372         list_del(&tunnel->list);
1373         tb_tunnel_free(tunnel);
1374         return 0;
1375 }
1376
1377 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
1378 {
1379         struct tb_port *up, *down, *port;
1380         struct tb_cm *tcm = tb_priv(tb);
1381         struct tb_switch *parent_sw;
1382         struct tb_tunnel *tunnel;
1383
1384         up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
1385         if (!up)
1386                 return 0;
1387
1388         /*
1389          * Look up available down port. Since we are chaining it should
1390          * be found right above this switch.
1391          */
1392         parent_sw = tb_to_switch(sw->dev.parent);
1393         port = tb_port_at(tb_route(sw), parent_sw);
1394         down = tb_find_pcie_down(parent_sw, port);
1395         if (!down)
1396                 return 0;
1397
1398         tunnel = tb_tunnel_alloc_pci(tb, up, down);
1399         if (!tunnel)
1400                 return -ENOMEM;
1401
1402         if (tb_tunnel_activate(tunnel)) {
1403                 tb_port_info(up,
1404                              "PCIe tunnel activation failed, aborting\n");
1405                 tb_tunnel_free(tunnel);
1406                 return -EIO;
1407         }
1408
1409         /*
1410          * PCIe L1 is needed to enable CL0s for Titan Ridge so enable it
1411          * here.
1412          */
1413         if (tb_switch_pcie_l1_enable(sw))
1414                 tb_sw_warn(sw, "failed to enable PCIe L1 for Titan Ridge\n");
1415
1416         if (tb_switch_xhci_connect(sw))
1417                 tb_sw_warn(sw, "failed to connect xHCI\n");
1418
1419         list_add_tail(&tunnel->list, &tcm->tunnel_list);
1420         return 0;
1421 }
1422
1423 static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1424                                     int transmit_path, int transmit_ring,
1425                                     int receive_path, int receive_ring)
1426 {
1427         struct tb_cm *tcm = tb_priv(tb);
1428         struct tb_port *nhi_port, *dst_port;
1429         struct tb_tunnel *tunnel;
1430         struct tb_switch *sw;
1431
1432         sw = tb_to_switch(xd->dev.parent);
1433         dst_port = tb_port_at(xd->route, sw);
1434         nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
1435
1436         mutex_lock(&tb->lock);
1437         tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path,
1438                                      transmit_ring, receive_path, receive_ring);
1439         if (!tunnel) {
1440                 mutex_unlock(&tb->lock);
1441                 return -ENOMEM;
1442         }
1443
1444         if (tb_tunnel_activate(tunnel)) {
1445                 tb_port_info(nhi_port,
1446                              "DMA tunnel activation failed, aborting\n");
1447                 tb_tunnel_free(tunnel);
1448                 mutex_unlock(&tb->lock);
1449                 return -EIO;
1450         }
1451
1452         list_add_tail(&tunnel->list, &tcm->tunnel_list);
1453         mutex_unlock(&tb->lock);
1454         return 0;
1455 }
1456
1457 static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1458                                           int transmit_path, int transmit_ring,
1459                                           int receive_path, int receive_ring)
1460 {
1461         struct tb_cm *tcm = tb_priv(tb);
1462         struct tb_port *nhi_port, *dst_port;
1463         struct tb_tunnel *tunnel, *n;
1464         struct tb_switch *sw;
1465
1466         sw = tb_to_switch(xd->dev.parent);
1467         dst_port = tb_port_at(xd->route, sw);
1468         nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
1469
1470         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1471                 if (!tb_tunnel_is_dma(tunnel))
1472                         continue;
1473                 if (tunnel->src_port != nhi_port || tunnel->dst_port != dst_port)
1474                         continue;
1475
1476                 if (tb_tunnel_match_dma(tunnel, transmit_path, transmit_ring,
1477                                         receive_path, receive_ring))
1478                         tb_deactivate_and_free_tunnel(tunnel);
1479         }
1480 }
1481
1482 static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1483                                        int transmit_path, int transmit_ring,
1484                                        int receive_path, int receive_ring)
1485 {
1486         if (!xd->is_unplugged) {
1487                 mutex_lock(&tb->lock);
1488                 __tb_disconnect_xdomain_paths(tb, xd, transmit_path,
1489                                               transmit_ring, receive_path,
1490                                               receive_ring);
1491                 mutex_unlock(&tb->lock);
1492         }
1493         return 0;
1494 }
1495
1496 /* hotplug handling */
1497
1498 /*
1499  * tb_handle_hotplug() - handle hotplug event
1500  *
1501  * Executes on tb->wq.
1502  */
1503 static void tb_handle_hotplug(struct work_struct *work)
1504 {
1505         struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
1506         struct tb *tb = ev->tb;
1507         struct tb_cm *tcm = tb_priv(tb);
1508         struct tb_switch *sw;
1509         struct tb_port *port;
1510
1511         /* Bring the domain back from sleep if it was suspended */
1512         pm_runtime_get_sync(&tb->dev);
1513
1514         mutex_lock(&tb->lock);
1515         if (!tcm->hotplug_active)
1516                 goto out; /* during init, suspend or shutdown */
1517
1518         sw = tb_switch_find_by_route(tb, ev->route);
1519         if (!sw) {
1520                 tb_warn(tb,
1521                         "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
1522                         ev->route, ev->port, ev->unplug);
1523                 goto out;
1524         }
1525         if (ev->port > sw->config.max_port_number) {
1526                 tb_warn(tb,
1527                         "hotplug event from non existent port %llx:%x (unplug: %d)\n",
1528                         ev->route, ev->port, ev->unplug);
1529                 goto put_sw;
1530         }
1531         port = &sw->ports[ev->port];
1532         if (tb_is_upstream_port(port)) {
1533                 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
1534                        ev->route, ev->port, ev->unplug);
1535                 goto put_sw;
1536         }
1537
1538         pm_runtime_get_sync(&sw->dev);
1539
1540         if (ev->unplug) {
1541                 tb_retimer_remove_all(port);
1542
1543                 if (tb_port_has_remote(port)) {
1544                         tb_port_dbg(port, "switch unplugged\n");
1545                         tb_sw_set_unplugged(port->remote->sw);
1546                         tb_free_invalid_tunnels(tb);
1547                         tb_remove_dp_resources(port->remote->sw);
1548                         tb_switch_tmu_disable(port->remote->sw);
1549                         tb_switch_unconfigure_link(port->remote->sw);
1550                         tb_switch_lane_bonding_disable(port->remote->sw);
1551                         tb_switch_remove(port->remote->sw);
1552                         port->remote = NULL;
1553                         if (port->dual_link_port)
1554                                 port->dual_link_port->remote = NULL;
1555                         /* Maybe we can create another DP tunnel */
1556                         tb_recalc_estimated_bandwidth(tb);
1557                         tb_tunnel_dp(tb);
1558                 } else if (port->xdomain) {
1559                         struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
1560
1561                         tb_port_dbg(port, "xdomain unplugged\n");
1562                         /*
1563                          * Service drivers are unbound during
1564                          * tb_xdomain_remove() so setting XDomain as
1565                          * unplugged here prevents deadlock if they call
1566                          * tb_xdomain_disable_paths(). We will tear down
1567                          * all the tunnels below.
1568                          */
1569                         xd->is_unplugged = true;
1570                         tb_xdomain_remove(xd);
1571                         port->xdomain = NULL;
1572                         __tb_disconnect_xdomain_paths(tb, xd, -1, -1, -1, -1);
1573                         tb_xdomain_put(xd);
1574                         tb_port_unconfigure_xdomain(port);
1575                 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
1576                         tb_dp_resource_unavailable(tb, port);
1577                 } else if (!port->port) {
1578                         tb_sw_dbg(sw, "xHCI disconnect request\n");
1579                         tb_switch_xhci_disconnect(sw);
1580                 } else {
1581                         tb_port_dbg(port,
1582                                    "got unplug event for disconnected port, ignoring\n");
1583                 }
1584         } else if (port->remote) {
1585                 tb_port_dbg(port, "got plug event for connected port, ignoring\n");
1586         } else if (!port->port && sw->authorized) {
1587                 tb_sw_dbg(sw, "xHCI connect request\n");
1588                 tb_switch_xhci_connect(sw);
1589         } else {
1590                 if (tb_port_is_null(port)) {
1591                         tb_port_dbg(port, "hotplug: scanning\n");
1592                         tb_scan_port(port);
1593                         if (!port->remote)
1594                                 tb_port_dbg(port, "hotplug: no switch found\n");
1595                 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
1596                         tb_dp_resource_available(tb, port);
1597                 }
1598         }
1599
1600         pm_runtime_mark_last_busy(&sw->dev);
1601         pm_runtime_put_autosuspend(&sw->dev);
1602
1603 put_sw:
1604         tb_switch_put(sw);
1605 out:
1606         mutex_unlock(&tb->lock);
1607
1608         pm_runtime_mark_last_busy(&tb->dev);
1609         pm_runtime_put_autosuspend(&tb->dev);
1610
1611         kfree(ev);
1612 }
1613
1614 static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
1615                                  int *requested_down)
1616 {
1617         int allocated_up, allocated_down, available_up, available_down, ret;
1618         int requested_up_corrected, requested_down_corrected, granularity;
1619         int max_up, max_down, max_up_rounded, max_down_rounded;
1620         struct tb *tb = tunnel->tb;
1621         struct tb_port *in, *out;
1622
1623         ret = tb_tunnel_allocated_bandwidth(tunnel, &allocated_up, &allocated_down);
1624         if (ret)
1625                 return ret;
1626
1627         in = tunnel->src_port;
1628         out = tunnel->dst_port;
1629
1630         tb_port_dbg(in, "bandwidth allocated currently %d/%d Mb/s\n",
1631                     allocated_up, allocated_down);
1632
1633         /*
1634          * If we get rounded up request from graphics side, say HBR2 x 4
1635          * that is 17500 instead of 17280 (this is because of the
1636          * granularity), we allow it too. Here the graphics has already
1637          * negotiated with the DPRX the maximum possible rates (which is
1638          * 17280 in this case).
1639          *
1640          * Since the link cannot go higher than 17280 we use that in our
1641          * calculations but the DP IN adapter Allocated BW write must be
1642          * the same value (17500) otherwise the adapter will mark it as
1643          * failed for graphics.
1644          */
1645         ret = tb_tunnel_maximum_bandwidth(tunnel, &max_up, &max_down);
1646         if (ret)
1647                 return ret;
1648
1649         ret = usb4_dp_port_granularity(in);
1650         if (ret < 0)
1651                 return ret;
1652         granularity = ret;
1653
1654         max_up_rounded = roundup(max_up, granularity);
1655         max_down_rounded = roundup(max_down, granularity);
1656
1657         /*
1658          * This will "fix" the request down to the maximum supported
1659          * rate * lanes if it is at the maximum rounded up level.
1660          */
1661         requested_up_corrected = *requested_up;
1662         if (requested_up_corrected == max_up_rounded)
1663                 requested_up_corrected = max_up;
1664         else if (requested_up_corrected < 0)
1665                 requested_up_corrected = 0;
1666         requested_down_corrected = *requested_down;
1667         if (requested_down_corrected == max_down_rounded)
1668                 requested_down_corrected = max_down;
1669         else if (requested_down_corrected < 0)
1670                 requested_down_corrected = 0;
1671
1672         tb_port_dbg(in, "corrected bandwidth request %d/%d Mb/s\n",
1673                     requested_up_corrected, requested_down_corrected);
1674
1675         if ((*requested_up >= 0 && requested_up_corrected > max_up_rounded) ||
1676             (*requested_down >= 0 && requested_down_corrected > max_down_rounded)) {
1677                 tb_port_dbg(in, "bandwidth request too high (%d/%d Mb/s > %d/%d Mb/s)\n",
1678                             requested_up_corrected, requested_down_corrected,
1679                             max_up_rounded, max_down_rounded);
1680                 return -ENOBUFS;
1681         }
1682
1683         if ((*requested_up >= 0 && requested_up_corrected <= allocated_up) ||
1684             (*requested_down >= 0 && requested_down_corrected <= allocated_down)) {
1685                 /*
1686                  * If requested bandwidth is less or equal than what is
1687                  * currently allocated to that tunnel we simply change
1688                  * the reservation of the tunnel. Since all the tunnels
1689                  * going out from the same USB4 port are in the same
1690                  * group the released bandwidth will be taken into
1691                  * account for the other tunnels automatically below.
1692                  */
1693                 return tb_tunnel_alloc_bandwidth(tunnel, requested_up,
1694                                                  requested_down);
1695         }
1696
1697         /*
1698          * More bandwidth is requested. Release all the potential
1699          * bandwidth from USB3 first.
1700          */
1701         ret = tb_release_unused_usb3_bandwidth(tb, in, out);
1702         if (ret)
1703                 return ret;
1704
1705         /*
1706          * Then go over all tunnels that cross the same USB4 ports (they
1707          * are also in the same group but we use the same function here
1708          * that we use with the normal bandwidth allocation).
1709          */
1710         ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down);
1711         if (ret)
1712                 goto reclaim;
1713
1714         tb_port_dbg(in, "bandwidth available for allocation %d/%d Mb/s\n",
1715                     available_up, available_down);
1716
1717         if ((*requested_up >= 0 && available_up >= requested_up_corrected) ||
1718             (*requested_down >= 0 && available_down >= requested_down_corrected)) {
1719                 ret = tb_tunnel_alloc_bandwidth(tunnel, requested_up,
1720                                                 requested_down);
1721         } else {
1722                 ret = -ENOBUFS;
1723         }
1724
1725 reclaim:
1726         tb_reclaim_usb3_bandwidth(tb, in, out);
1727         return ret;
1728 }
1729
1730 static void tb_handle_dp_bandwidth_request(struct work_struct *work)
1731 {
1732         struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
1733         int requested_bw, requested_up, requested_down, ret;
1734         struct tb_port *in, *out;
1735         struct tb_tunnel *tunnel;
1736         struct tb *tb = ev->tb;
1737         struct tb_cm *tcm = tb_priv(tb);
1738         struct tb_switch *sw;
1739
1740         pm_runtime_get_sync(&tb->dev);
1741
1742         mutex_lock(&tb->lock);
1743         if (!tcm->hotplug_active)
1744                 goto unlock;
1745
1746         sw = tb_switch_find_by_route(tb, ev->route);
1747         if (!sw) {
1748                 tb_warn(tb, "bandwidth request from non-existent router %llx\n",
1749                         ev->route);
1750                 goto unlock;
1751         }
1752
1753         in = &sw->ports[ev->port];
1754         if (!tb_port_is_dpin(in)) {
1755                 tb_port_warn(in, "bandwidth request to non-DP IN adapter\n");
1756                 goto unlock;
1757         }
1758
1759         tb_port_dbg(in, "handling bandwidth allocation request\n");
1760
1761         if (!usb4_dp_port_bw_mode_enabled(in)) {
1762                 tb_port_warn(in, "bandwidth allocation mode not enabled\n");
1763                 goto unlock;
1764         }
1765
1766         ret = usb4_dp_port_requested_bw(in);
1767         if (ret < 0) {
1768                 if (ret == -ENODATA)
1769                         tb_port_dbg(in, "no bandwidth request active\n");
1770                 else
1771                         tb_port_warn(in, "failed to read requested bandwidth\n");
1772                 goto unlock;
1773         }
1774         requested_bw = ret;
1775
1776         tb_port_dbg(in, "requested bandwidth %d Mb/s\n", requested_bw);
1777
1778         tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
1779         if (!tunnel) {
1780                 tb_port_warn(in, "failed to find tunnel\n");
1781                 goto unlock;
1782         }
1783
1784         out = tunnel->dst_port;
1785
1786         if (in->sw->config.depth < out->sw->config.depth) {
1787                 requested_up = -1;
1788                 requested_down = requested_bw;
1789         } else {
1790                 requested_up = requested_bw;
1791                 requested_down = -1;
1792         }
1793
1794         ret = tb_alloc_dp_bandwidth(tunnel, &requested_up, &requested_down);
1795         if (ret) {
1796                 if (ret == -ENOBUFS)
1797                         tb_port_warn(in, "not enough bandwidth available\n");
1798                 else
1799                         tb_port_warn(in, "failed to change bandwidth allocation\n");
1800         } else {
1801                 tb_port_dbg(in, "bandwidth allocation changed to %d/%d Mb/s\n",
1802                             requested_up, requested_down);
1803
1804                 /* Update other clients about the allocation change */
1805                 tb_recalc_estimated_bandwidth(tb);
1806         }
1807
1808 unlock:
1809         mutex_unlock(&tb->lock);
1810
1811         pm_runtime_mark_last_busy(&tb->dev);
1812         pm_runtime_put_autosuspend(&tb->dev);
1813 }
1814
1815 static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port)
1816 {
1817         struct tb_hotplug_event *ev;
1818
1819         ev = kmalloc(sizeof(*ev), GFP_KERNEL);
1820         if (!ev)
1821                 return;
1822
1823         ev->tb = tb;
1824         ev->route = route;
1825         ev->port = port;
1826         INIT_WORK(&ev->work, tb_handle_dp_bandwidth_request);
1827         queue_work(tb->wq, &ev->work);
1828 }
1829
1830 static void tb_handle_notification(struct tb *tb, u64 route,
1831                                    const struct cfg_error_pkg *error)
1832 {
1833         if (tb_cfg_ack_notification(tb->ctl, route, error))
1834                 tb_warn(tb, "could not ack notification on %llx\n", route);
1835
1836         switch (error->error) {
1837         case TB_CFG_ERROR_DP_BW:
1838                 tb_queue_dp_bandwidth_request(tb, route, error->port);
1839                 break;
1840
1841         default:
1842                 /* Ack is enough */
1843                 return;
1844         }
1845 }
1846
1847 /*
1848  * tb_schedule_hotplug_handler() - callback function for the control channel
1849  *
1850  * Delegates to tb_handle_hotplug.
1851  */
1852 static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
1853                             const void *buf, size_t size)
1854 {
1855         const struct cfg_event_pkg *pkg = buf;
1856         u64 route = tb_cfg_get_route(&pkg->header);
1857
1858         switch (type) {
1859         case TB_CFG_PKG_ERROR:
1860                 tb_handle_notification(tb, route, (const struct cfg_error_pkg *)buf);
1861                 return;
1862         case TB_CFG_PKG_EVENT:
1863                 break;
1864         default:
1865                 tb_warn(tb, "unexpected event %#x, ignoring\n", type);
1866                 return;
1867         }
1868
1869         if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
1870                 tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
1871                         pkg->port);
1872         }
1873
1874         tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
1875 }
1876
1877 static void tb_stop(struct tb *tb)
1878 {
1879         struct tb_cm *tcm = tb_priv(tb);
1880         struct tb_tunnel *tunnel;
1881         struct tb_tunnel *n;
1882
1883         cancel_delayed_work(&tcm->remove_work);
1884         /* tunnels are only present after everything has been initialized */
1885         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1886                 /*
1887                  * DMA tunnels require the driver to be functional so we
1888                  * tear them down. Other protocol tunnels can be left
1889                  * intact.
1890                  */
1891                 if (tb_tunnel_is_dma(tunnel))
1892                         tb_tunnel_deactivate(tunnel);
1893                 tb_tunnel_free(tunnel);
1894         }
1895         tb_switch_remove(tb->root_switch);
1896         tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
1897 }
1898
1899 static int tb_scan_finalize_switch(struct device *dev, void *data)
1900 {
1901         if (tb_is_switch(dev)) {
1902                 struct tb_switch *sw = tb_to_switch(dev);
1903
1904                 /*
1905                  * If we found that the switch was already setup by the
1906                  * boot firmware, mark it as authorized now before we
1907                  * send uevent to userspace.
1908                  */
1909                 if (sw->boot)
1910                         sw->authorized = 1;
1911
1912                 dev_set_uevent_suppress(dev, false);
1913                 kobject_uevent(&dev->kobj, KOBJ_ADD);
1914                 device_for_each_child(dev, NULL, tb_scan_finalize_switch);
1915         }
1916
1917         return 0;
1918 }
1919
1920 static int tb_start(struct tb *tb)
1921 {
1922         struct tb_cm *tcm = tb_priv(tb);
1923         int ret;
1924
1925         tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
1926         if (IS_ERR(tb->root_switch))
1927                 return PTR_ERR(tb->root_switch);
1928
1929         /*
1930          * ICM firmware upgrade needs running firmware and in native
1931          * mode that is not available so disable firmware upgrade of the
1932          * root switch.
1933          *
1934          * However, USB4 routers support NVM firmware upgrade if they
1935          * implement the necessary router operations.
1936          */
1937         tb->root_switch->no_nvm_upgrade = !tb_switch_is_usb4(tb->root_switch);
1938         /* All USB4 routers support runtime PM */
1939         tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch);
1940
1941         ret = tb_switch_configure(tb->root_switch);
1942         if (ret) {
1943                 tb_switch_put(tb->root_switch);
1944                 return ret;
1945         }
1946
1947         /* Announce the switch to the world */
1948         ret = tb_switch_add(tb->root_switch);
1949         if (ret) {
1950                 tb_switch_put(tb->root_switch);
1951                 return ret;
1952         }
1953
1954         /*
1955          * To support highest CLx state, we set host router's TMU to
1956          * Normal mode.
1957          */
1958         tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_RATE_NORMAL,
1959                                 false);
1960         /* Enable TMU if it is off */
1961         tb_switch_tmu_enable(tb->root_switch);
1962         /* Full scan to discover devices added before the driver was loaded. */
1963         tb_scan_switch(tb->root_switch);
1964         /* Find out tunnels created by the boot firmware */
1965         tb_discover_tunnels(tb);
1966         /* Add DP resources from the DP tunnels created by the boot firmware */
1967         tb_discover_dp_resources(tb);
1968         /*
1969          * If the boot firmware did not create USB 3.x tunnels create them
1970          * now for the whole topology.
1971          */
1972         tb_create_usb3_tunnels(tb->root_switch);
1973         /* Add DP IN resources for the root switch */
1974         tb_add_dp_resources(tb->root_switch);
1975         /* Make the discovered switches available to the userspace */
1976         device_for_each_child(&tb->root_switch->dev, NULL,
1977                               tb_scan_finalize_switch);
1978
1979         /* Allow tb_handle_hotplug to progress events */
1980         tcm->hotplug_active = true;
1981         return 0;
1982 }
1983
1984 static int tb_suspend_noirq(struct tb *tb)
1985 {
1986         struct tb_cm *tcm = tb_priv(tb);
1987
1988         tb_dbg(tb, "suspending...\n");
1989         tb_disconnect_and_release_dp(tb);
1990         tb_switch_suspend(tb->root_switch, false);
1991         tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
1992         tb_dbg(tb, "suspend finished\n");
1993
1994         return 0;
1995 }
1996
1997 static void tb_restore_children(struct tb_switch *sw)
1998 {
1999         struct tb_port *port;
2000         int ret;
2001
2002         /* No need to restore if the router is already unplugged */
2003         if (sw->is_unplugged)
2004                 return;
2005
2006         /*
2007          * CL0s and CL1 are enabled and supported together.
2008          * Silently ignore CLx re-enabling in case CLx is not supported.
2009          */
2010         ret = tb_switch_enable_clx(sw, TB_CL1);
2011         if (ret && ret != -EOPNOTSUPP)
2012                 tb_sw_warn(sw, "failed to re-enable %s on upstream port\n",
2013                            tb_switch_clx_name(TB_CL1));
2014
2015         if (tb_switch_is_clx_enabled(sw, TB_CL1))
2016                 /*
2017                  * To support highest CLx state, we set router's TMU to
2018                  * Normal-Uni mode.
2019                  */
2020                 tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_NORMAL, true);
2021         else
2022                 /* If CLx disabled, configure router's TMU to HiFi-Bidir mode*/
2023                 tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_HIFI, false);
2024
2025         if (tb_enable_tmu(sw))
2026                 tb_sw_warn(sw, "failed to restore TMU configuration\n");
2027
2028         tb_switch_for_each_port(sw, port) {
2029                 if (!tb_port_has_remote(port) && !port->xdomain)
2030                         continue;
2031
2032                 if (port->remote) {
2033                         tb_switch_lane_bonding_enable(port->remote->sw);
2034                         tb_switch_configure_link(port->remote->sw);
2035
2036                         tb_restore_children(port->remote->sw);
2037                 } else if (port->xdomain) {
2038                         tb_port_configure_xdomain(port, port->xdomain);
2039                 }
2040         }
2041 }
2042
2043 static int tb_resume_noirq(struct tb *tb)
2044 {
2045         struct tb_cm *tcm = tb_priv(tb);
2046         struct tb_tunnel *tunnel, *n;
2047         unsigned int usb3_delay = 0;
2048         LIST_HEAD(tunnels);
2049
2050         tb_dbg(tb, "resuming...\n");
2051
2052         /* remove any pci devices the firmware might have setup */
2053         tb_switch_reset(tb->root_switch);
2054
2055         tb_switch_resume(tb->root_switch);
2056         tb_free_invalid_tunnels(tb);
2057         tb_free_unplugged_children(tb->root_switch);
2058         tb_restore_children(tb->root_switch);
2059
2060         /*
2061          * If we get here from suspend to disk the boot firmware or the
2062          * restore kernel might have created tunnels of its own. Since
2063          * we cannot be sure they are usable for us we find and tear
2064          * them down.
2065          */
2066         tb_switch_discover_tunnels(tb->root_switch, &tunnels, false);
2067         list_for_each_entry_safe_reverse(tunnel, n, &tunnels, list) {
2068                 if (tb_tunnel_is_usb3(tunnel))
2069                         usb3_delay = 500;
2070                 tb_tunnel_deactivate(tunnel);
2071                 tb_tunnel_free(tunnel);
2072         }
2073
2074         /* Re-create our tunnels now */
2075         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
2076                 /* USB3 requires delay before it can be re-activated */
2077                 if (tb_tunnel_is_usb3(tunnel)) {
2078                         msleep(usb3_delay);
2079                         /* Only need to do it once */
2080                         usb3_delay = 0;
2081                 }
2082                 tb_tunnel_restart(tunnel);
2083         }
2084         if (!list_empty(&tcm->tunnel_list)) {
2085                 /*
2086                  * the pcie links need some time to get going.
2087                  * 100ms works for me...
2088                  */
2089                 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
2090                 msleep(100);
2091         }
2092          /* Allow tb_handle_hotplug to progress events */
2093         tcm->hotplug_active = true;
2094         tb_dbg(tb, "resume finished\n");
2095
2096         return 0;
2097 }
2098
2099 static int tb_free_unplugged_xdomains(struct tb_switch *sw)
2100 {
2101         struct tb_port *port;
2102         int ret = 0;
2103
2104         tb_switch_for_each_port(sw, port) {
2105                 if (tb_is_upstream_port(port))
2106                         continue;
2107                 if (port->xdomain && port->xdomain->is_unplugged) {
2108                         tb_retimer_remove_all(port);
2109                         tb_xdomain_remove(port->xdomain);
2110                         tb_port_unconfigure_xdomain(port);
2111                         port->xdomain = NULL;
2112                         ret++;
2113                 } else if (port->remote) {
2114                         ret += tb_free_unplugged_xdomains(port->remote->sw);
2115                 }
2116         }
2117
2118         return ret;
2119 }
2120
2121 static int tb_freeze_noirq(struct tb *tb)
2122 {
2123         struct tb_cm *tcm = tb_priv(tb);
2124
2125         tcm->hotplug_active = false;
2126         return 0;
2127 }
2128
2129 static int tb_thaw_noirq(struct tb *tb)
2130 {
2131         struct tb_cm *tcm = tb_priv(tb);
2132
2133         tcm->hotplug_active = true;
2134         return 0;
2135 }
2136
2137 static void tb_complete(struct tb *tb)
2138 {
2139         /*
2140          * Release any unplugged XDomains and if there is a case where
2141          * another domain is swapped in place of unplugged XDomain we
2142          * need to run another rescan.
2143          */
2144         mutex_lock(&tb->lock);
2145         if (tb_free_unplugged_xdomains(tb->root_switch))
2146                 tb_scan_switch(tb->root_switch);
2147         mutex_unlock(&tb->lock);
2148 }
2149
2150 static int tb_runtime_suspend(struct tb *tb)
2151 {
2152         struct tb_cm *tcm = tb_priv(tb);
2153
2154         mutex_lock(&tb->lock);
2155         tb_switch_suspend(tb->root_switch, true);
2156         tcm->hotplug_active = false;
2157         mutex_unlock(&tb->lock);
2158
2159         return 0;
2160 }
2161
2162 static void tb_remove_work(struct work_struct *work)
2163 {
2164         struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work);
2165         struct tb *tb = tcm_to_tb(tcm);
2166
2167         mutex_lock(&tb->lock);
2168         if (tb->root_switch) {
2169                 tb_free_unplugged_children(tb->root_switch);
2170                 tb_free_unplugged_xdomains(tb->root_switch);
2171         }
2172         mutex_unlock(&tb->lock);
2173 }
2174
2175 static int tb_runtime_resume(struct tb *tb)
2176 {
2177         struct tb_cm *tcm = tb_priv(tb);
2178         struct tb_tunnel *tunnel, *n;
2179
2180         mutex_lock(&tb->lock);
2181         tb_switch_resume(tb->root_switch);
2182         tb_free_invalid_tunnels(tb);
2183         tb_restore_children(tb->root_switch);
2184         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
2185                 tb_tunnel_restart(tunnel);
2186         tcm->hotplug_active = true;
2187         mutex_unlock(&tb->lock);
2188
2189         /*
2190          * Schedule cleanup of any unplugged devices. Run this in a
2191          * separate thread to avoid possible deadlock if the device
2192          * removal runtime resumes the unplugged device.
2193          */
2194         queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50));
2195         return 0;
2196 }
2197
2198 static const struct tb_cm_ops tb_cm_ops = {
2199         .start = tb_start,
2200         .stop = tb_stop,
2201         .suspend_noirq = tb_suspend_noirq,
2202         .resume_noirq = tb_resume_noirq,
2203         .freeze_noirq = tb_freeze_noirq,
2204         .thaw_noirq = tb_thaw_noirq,
2205         .complete = tb_complete,
2206         .runtime_suspend = tb_runtime_suspend,
2207         .runtime_resume = tb_runtime_resume,
2208         .handle_event = tb_handle_event,
2209         .disapprove_switch = tb_disconnect_pci,
2210         .approve_switch = tb_tunnel_pci,
2211         .approve_xdomain_paths = tb_approve_xdomain_paths,
2212         .disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
2213 };
2214
2215 /*
2216  * During suspend the Thunderbolt controller is reset and all PCIe
2217  * tunnels are lost. The NHI driver will try to reestablish all tunnels
2218  * during resume. This adds device links between the tunneled PCIe
2219  * downstream ports and the NHI so that the device core will make sure
2220  * NHI is resumed first before the rest.
2221  */
2222 static void tb_apple_add_links(struct tb_nhi *nhi)
2223 {
2224         struct pci_dev *upstream, *pdev;
2225
2226         if (!x86_apple_machine)
2227                 return;
2228
2229         switch (nhi->pdev->device) {
2230         case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
2231         case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
2232         case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
2233         case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
2234                 break;
2235         default:
2236                 return;
2237         }
2238
2239         upstream = pci_upstream_bridge(nhi->pdev);
2240         while (upstream) {
2241                 if (!pci_is_pcie(upstream))
2242                         return;
2243                 if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM)
2244                         break;
2245                 upstream = pci_upstream_bridge(upstream);
2246         }
2247
2248         if (!upstream)
2249                 return;
2250
2251         /*
2252          * For each hotplug downstream port, create add device link
2253          * back to NHI so that PCIe tunnels can be re-established after
2254          * sleep.
2255          */
2256         for_each_pci_bridge(pdev, upstream->subordinate) {
2257                 const struct device_link *link;
2258
2259                 if (!pci_is_pcie(pdev))
2260                         continue;
2261                 if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM ||
2262                     !pdev->is_hotplug_bridge)
2263                         continue;
2264
2265                 link = device_link_add(&pdev->dev, &nhi->pdev->dev,
2266                                        DL_FLAG_AUTOREMOVE_SUPPLIER |
2267                                        DL_FLAG_PM_RUNTIME);
2268                 if (link) {
2269                         dev_dbg(&nhi->pdev->dev, "created link from %s\n",
2270                                 dev_name(&pdev->dev));
2271                 } else {
2272                         dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
2273                                  dev_name(&pdev->dev));
2274                 }
2275         }
2276 }
2277
2278 struct tb *tb_probe(struct tb_nhi *nhi)
2279 {
2280         struct tb_cm *tcm;
2281         struct tb *tb;
2282
2283         tb = tb_domain_alloc(nhi, TB_TIMEOUT, sizeof(*tcm));
2284         if (!tb)
2285                 return NULL;
2286
2287         if (tb_acpi_may_tunnel_pcie())
2288                 tb->security_level = TB_SECURITY_USER;
2289         else
2290                 tb->security_level = TB_SECURITY_NOPCIE;
2291
2292         tb->cm_ops = &tb_cm_ops;
2293
2294         tcm = tb_priv(tb);
2295         INIT_LIST_HEAD(&tcm->tunnel_list);
2296         INIT_LIST_HEAD(&tcm->dp_resources);
2297         INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work);
2298         tb_init_bandwidth_groups(tcm);
2299
2300         tb_dbg(tb, "using software connection manager\n");
2301
2302         tb_apple_add_links(nhi);
2303         tb_acpi_add_links(nhi);
2304
2305         return tb;
2306 }