1 // SPDX-License-Identifier: GPL-2.0
3 * Thunderbolt driver - path/tunnel functionality
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2019, Intel Corporation
9 #include <linux/slab.h>
10 #include <linux/errno.h>
11 #include <linux/delay.h>
12 #include <linux/ktime.h>
16 static void tb_dump_hop(const struct tb_path_hop *hop, const struct tb_regs_hop *regs)
18 const struct tb_port *port = hop->in_port;
20 tb_port_dbg(port, " In HopID: %d => Out port: %d Out HopID: %d\n",
21 hop->in_hop_index, regs->out_port, regs->next_hop);
22 tb_port_dbg(port, " Weight: %d Priority: %d Credits: %d Drop: %d\n",
23 regs->weight, regs->priority,
24 regs->initial_credits, regs->drop_packages);
25 tb_port_dbg(port, " Counter enabled: %d Counter index: %d\n",
26 regs->counter_enable, regs->counter);
27 tb_port_dbg(port, " Flow Control (In/Eg): %d/%d Shared Buffer (In/Eg): %d/%d\n",
28 regs->ingress_fc, regs->egress_fc,
29 regs->ingress_shared_buffer, regs->egress_shared_buffer);
30 tb_port_dbg(port, " Unknown1: %#x Unknown2: %#x Unknown3: %#x\n",
31 regs->unknown1, regs->unknown2, regs->unknown3);
34 static struct tb_port *tb_path_find_dst_port(struct tb_port *src, int src_hopid,
37 struct tb_port *port, *out_port = NULL;
38 struct tb_regs_hop hop;
45 for (i = 0; port && i < TB_PATH_MAX_HOPS; i++) {
48 ret = tb_port_read(port, &hop, TB_CFG_HOPS, 2 * hopid, 2);
50 tb_port_warn(port, "failed to read path at %d\n", hopid);
57 out_port = &sw->ports[hop.out_port];
59 port = out_port->remote;
62 return out_port && hopid == dst_hopid ? out_port : NULL;
65 static int tb_path_find_src_hopid(struct tb_port *src,
66 const struct tb_port *dst, int dst_hopid)
71 for (i = TB_PATH_MIN_HOPID; i <= src->config.max_in_hop_id; i++) {
72 out = tb_path_find_dst_port(src, i, dst_hopid);
81 * tb_path_discover() - Discover a path
82 * @src: First input port of a path
83 * @src_hopid: Starting HopID of a path (%-1 if don't care)
84 * @dst: Expected destination port of the path (%NULL if don't care)
85 * @dst_hopid: HopID to the @dst (%-1 if don't care)
86 * @last: Last port is filled here if not %NULL
87 * @name: Name of the path
88 * @alloc_hopid: Allocate HopIDs for the ports
90 * Follows a path starting from @src and @src_hopid to the last output
91 * port of the path. Allocates HopIDs for the visited ports (if
92 * @alloc_hopid is true). Call tb_path_free() to release the path and
93 * allocated HopIDs when the path is not needed anymore.
95 * Note function discovers also incomplete paths so caller should check
96 * that the @dst port is the expected one. If it is not, the path can be
97 * cleaned up by calling tb_path_deactivate() before tb_path_free().
99 * Return: Discovered path on success, %NULL in case of failure
101 struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid,
102 struct tb_port *dst, int dst_hopid,
103 struct tb_port **last, const char *name,
106 struct tb_port *out_port;
107 struct tb_regs_hop hop;
108 struct tb_path *path;
109 struct tb_switch *sw;
114 if (src_hopid < 0 && dst) {
116 * For incomplete paths the intermediate HopID can be
117 * different from the one used by the protocol adapter
118 * so in that case find a path that ends on @dst with
119 * matching @dst_hopid. That should give us the correct
120 * HopID for the @src.
122 src_hopid = tb_path_find_src_hopid(src, dst, dst_hopid);
131 for (i = 0; p && i < TB_PATH_MAX_HOPS; i++) {
134 ret = tb_port_read(p, &hop, TB_CFG_HOPS, 2 * h, 2);
136 tb_port_warn(p, "failed to read path at %d\n", h);
140 /* If the hop is not enabled we got an incomplete path */
144 out_port = &sw->ports[hop.out_port];
149 p = out_port->remote;
153 path = kzalloc(sizeof(*path), GFP_KERNEL);
158 path->tb = src->sw->tb;
159 path->path_length = num_hops;
160 path->activated = true;
161 path->alloc_hopid = alloc_hopid;
163 path->hops = kcalloc(num_hops, sizeof(*path->hops), GFP_KERNEL);
169 tb_dbg(path->tb, "discovering %s path starting from %llx:%u\n",
170 path->name, tb_route(src->sw), src->port);
175 for (i = 0; i < num_hops; i++) {
180 ret = tb_port_read(p, &hop, TB_CFG_HOPS, 2 * h, 2);
182 tb_port_warn(p, "failed to read path at %d\n", h);
186 if (alloc_hopid && tb_port_alloc_in_hopid(p, h, h) < 0)
189 out_port = &sw->ports[hop.out_port];
190 next_hop = hop.next_hop;
193 tb_port_alloc_out_hopid(out_port, next_hop, next_hop) < 0) {
194 tb_port_release_in_hopid(p, h);
198 path->hops[i].in_port = p;
199 path->hops[i].in_hop_index = h;
200 path->hops[i].in_counter_index = -1;
201 path->hops[i].out_port = out_port;
202 path->hops[i].next_hop_index = next_hop;
204 tb_dump_hop(&path->hops[i], &hop);
207 p = out_port->remote;
210 tb_dbg(path->tb, "path discovery complete\n");
214 tb_port_warn(src, "failed to discover path starting at HopID %d\n",
221 * tb_path_alloc() - allocate a thunderbolt path between two ports
222 * @tb: Domain pointer
223 * @src: Source port of the path
224 * @src_hopid: HopID used for the first ingress port in the path
225 * @dst: Destination port of the path
226 * @dst_hopid: HopID used for the last egress port in the path
227 * @link_nr: Preferred link if there are dual links on the path
228 * @name: Name of the path
230 * Creates path between two ports starting with given @src_hopid. Reserves
231 * HopIDs for each port (they can be different from @src_hopid depending on
232 * how many HopIDs each port already have reserved). If there are dual
233 * links on the path, prioritizes using @link_nr but takes into account
234 * that the lanes may be bonded.
236 * Return: Returns a tb_path on success or NULL on failure.
238 struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid,
239 struct tb_port *dst, int dst_hopid, int link_nr,
242 struct tb_port *in_port, *out_port, *first_port, *last_port;
243 int in_hopid, out_hopid;
244 struct tb_path *path;
248 path = kzalloc(sizeof(*path), GFP_KERNEL);
252 first_port = last_port = NULL;
254 tb_for_each_port_on_path(src, dst, in_port) {
256 first_port = in_port;
261 /* Check that src and dst are reachable */
262 if (first_port != src || last_port != dst) {
267 /* Each hop takes two ports */
270 path->hops = kcalloc(num_hops, sizeof(*path->hops), GFP_KERNEL);
276 path->alloc_hopid = true;
278 in_hopid = src_hopid;
281 for (i = 0; i < num_hops; i++) {
282 in_port = tb_next_port_on_path(src, dst, out_port);
286 /* When lanes are bonded primary link must be used */
287 if (!in_port->bonded && in_port->dual_link_port &&
288 in_port->link_nr != link_nr)
289 in_port = in_port->dual_link_port;
291 ret = tb_port_alloc_in_hopid(in_port, in_hopid, in_hopid);
296 out_port = tb_next_port_on_path(src, dst, in_port);
301 * Pick up right port when going from non-bonded to
302 * bonded or from bonded to non-bonded.
304 if (out_port->dual_link_port) {
305 if (!in_port->bonded && out_port->bonded &&
308 * Use primary link when going from
309 * non-bonded to bonded.
311 out_port = out_port->dual_link_port;
312 } else if (!out_port->bonded &&
313 out_port->link_nr != link_nr) {
315 * If out port is not bonded follow
318 out_port = out_port->dual_link_port;
322 if (i == num_hops - 1)
323 ret = tb_port_alloc_out_hopid(out_port, dst_hopid,
326 ret = tb_port_alloc_out_hopid(out_port, -1, -1);
332 path->hops[i].in_hop_index = in_hopid;
333 path->hops[i].in_port = in_port;
334 path->hops[i].in_counter_index = -1;
335 path->hops[i].out_port = out_port;
336 path->hops[i].next_hop_index = out_hopid;
338 in_hopid = out_hopid;
342 path->path_length = num_hops;
353 * tb_path_free() - free a path
354 * @path: Path to free
356 * Frees a path. The path does not need to be deactivated.
358 void tb_path_free(struct tb_path *path)
360 if (path->alloc_hopid) {
363 for (i = 0; i < path->path_length; i++) {
364 const struct tb_path_hop *hop = &path->hops[i];
367 tb_port_release_in_hopid(hop->in_port,
370 tb_port_release_out_hopid(hop->out_port,
371 hop->next_hop_index);
379 static void __tb_path_deallocate_nfc(struct tb_path *path, int first_hop)
382 for (i = first_hop; i < path->path_length; i++) {
383 res = tb_port_add_nfc_credits(path->hops[i].in_port,
384 -path->hops[i].nfc_credits);
386 tb_port_warn(path->hops[i].in_port,
387 "nfc credits deallocation failed for hop %d\n",
392 static int __tb_path_deactivate_hop(struct tb_port *port, int hop_index,
395 struct tb_regs_hop hop;
399 /* Disable the path */
400 ret = tb_port_read(port, &hop, TB_CFG_HOPS, 2 * hop_index, 2);
404 /* Already disabled */
410 ret = tb_port_write(port, &hop, TB_CFG_HOPS, 2 * hop_index, 2);
414 /* Wait until it is drained */
415 timeout = ktime_add_ms(ktime_get(), 500);
417 ret = tb_port_read(port, &hop, TB_CFG_HOPS, 2 * hop_index, 2);
424 * Clear flow control. Protocol adapters
425 * IFC and ISE bits are vendor defined
426 * in the USB4 spec so we clear them
427 * only for pre-USB4 adapters.
429 if (!tb_switch_is_usb4(port->sw)) {
431 hop.ingress_shared_buffer = 0;
434 hop.egress_shared_buffer = 0;
436 return tb_port_write(port, &hop, TB_CFG_HOPS,
443 usleep_range(10, 20);
444 } while (ktime_before(ktime_get(), timeout));
449 static void __tb_path_deactivate_hops(struct tb_path *path, int first_hop)
453 for (i = first_hop; i < path->path_length; i++) {
454 res = __tb_path_deactivate_hop(path->hops[i].in_port,
455 path->hops[i].in_hop_index,
457 if (res && res != -ENODEV)
458 tb_port_warn(path->hops[i].in_port,
459 "hop deactivation failed for hop %d, index %d\n",
460 i, path->hops[i].in_hop_index);
464 void tb_path_deactivate(struct tb_path *path)
466 if (!path->activated) {
467 tb_WARN(path->tb, "trying to deactivate an inactive path\n");
471 "deactivating %s path from %llx:%u to %llx:%u\n",
472 path->name, tb_route(path->hops[0].in_port->sw),
473 path->hops[0].in_port->port,
474 tb_route(path->hops[path->path_length - 1].out_port->sw),
475 path->hops[path->path_length - 1].out_port->port);
476 __tb_path_deactivate_hops(path, 0);
477 __tb_path_deallocate_nfc(path, 0);
478 path->activated = false;
482 * tb_path_activate() - activate a path
483 * @path: Path to activate
485 * Activate a path starting with the last hop and iterating backwards. The
486 * caller must fill path->hops before calling tb_path_activate().
488 * Return: Returns 0 on success or an error code on failure.
490 int tb_path_activate(struct tb_path *path)
493 enum tb_path_port out_mask, in_mask;
494 if (path->activated) {
495 tb_WARN(path->tb, "trying to activate already activated path\n");
500 "activating %s path from %llx:%u to %llx:%u\n",
501 path->name, tb_route(path->hops[0].in_port->sw),
502 path->hops[0].in_port->port,
503 tb_route(path->hops[path->path_length - 1].out_port->sw),
504 path->hops[path->path_length - 1].out_port->port);
506 /* Clear counters. */
507 for (i = path->path_length - 1; i >= 0; i--) {
508 if (path->hops[i].in_counter_index == -1)
510 res = tb_port_clear_counter(path->hops[i].in_port,
511 path->hops[i].in_counter_index);
516 /* Add non flow controlled credits. */
517 for (i = path->path_length - 1; i >= 0; i--) {
518 res = tb_port_add_nfc_credits(path->hops[i].in_port,
519 path->hops[i].nfc_credits);
521 __tb_path_deallocate_nfc(path, i);
527 for (i = path->path_length - 1; i >= 0; i--) {
528 struct tb_regs_hop hop = { 0 };
530 /* If it is left active deactivate it first */
531 __tb_path_deactivate_hop(path->hops[i].in_port,
532 path->hops[i].in_hop_index, path->clear_fc);
535 hop.next_hop = path->hops[i].next_hop_index;
536 hop.out_port = path->hops[i].out_port->port;
537 hop.initial_credits = path->hops[i].initial_credits;
542 out_mask = (i == path->path_length - 1) ?
543 TB_PATH_DESTINATION : TB_PATH_INTERNAL;
544 in_mask = (i == 0) ? TB_PATH_SOURCE : TB_PATH_INTERNAL;
545 hop.weight = path->weight;
547 hop.priority = path->priority;
548 hop.drop_packages = path->drop_packages;
549 hop.counter = path->hops[i].in_counter_index;
550 hop.counter_enable = path->hops[i].in_counter_index != -1;
551 hop.ingress_fc = path->ingress_fc_enable & in_mask;
552 hop.egress_fc = path->egress_fc_enable & out_mask;
553 hop.ingress_shared_buffer = path->ingress_shared_buffer
555 hop.egress_shared_buffer = path->egress_shared_buffer
559 tb_port_dbg(path->hops[i].in_port, "Writing hop %d\n", i);
560 tb_dump_hop(&path->hops[i], &hop);
561 res = tb_port_write(path->hops[i].in_port, &hop, TB_CFG_HOPS,
562 2 * path->hops[i].in_hop_index, 2);
564 __tb_path_deactivate_hops(path, i);
565 __tb_path_deallocate_nfc(path, 0);
569 path->activated = true;
570 tb_dbg(path->tb, "path activation complete\n");
573 tb_WARN(path->tb, "path activation failed\n");
578 * tb_path_is_invalid() - check whether any ports on the path are invalid
579 * @path: Path to check
581 * Return: Returns true if the path is invalid, false otherwise.
583 bool tb_path_is_invalid(struct tb_path *path)
586 for (i = 0; i < path->path_length; i++) {
587 if (path->hops[i].in_port->sw->is_unplugged)
589 if (path->hops[i].out_port->sw->is_unplugged)
596 * tb_path_port_on_path() - Does the path go through certain port
597 * @path: Path to check
598 * @port: Switch to check
600 * Goes over all hops on path and checks if @port is any of them.
601 * Direction does not matter.
603 bool tb_path_port_on_path(const struct tb_path *path, const struct tb_port *port)
607 for (i = 0; i < path->path_length; i++) {
608 if (path->hops[i].in_port == port ||
609 path->hops[i].out_port == port)