1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2020, Intel Corporation
6 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
9 #include <kunit/test.h>
10 #include <linux/idr.h>
15 static int __ida_init(struct kunit_resource *res, void *context)
17 struct ida *ida = context;
24 static void __ida_destroy(struct kunit_resource *res)
26 struct ida *ida = res->data;
31 static void kunit_ida_init(struct kunit *test, struct ida *ida)
33 kunit_alloc_resource(test, __ida_init, __ida_destroy, GFP_KERNEL, ida);
36 static struct tb_switch *alloc_switch(struct kunit *test, u64 route,
37 u8 upstream_port, u8 max_port_number)
43 sw = kunit_kzalloc(test, sizeof(*sw), GFP_KERNEL);
47 sw->config.upstream_port_number = upstream_port;
48 sw->config.depth = tb_route_length(route);
49 sw->config.route_hi = upper_32_bits(route);
50 sw->config.route_lo = lower_32_bits(route);
51 sw->config.enabled = 0;
52 sw->config.max_port_number = max_port_number;
54 size = (sw->config.max_port_number + 1) * sizeof(*sw->ports);
55 sw->ports = kunit_kzalloc(test, size, GFP_KERNEL);
59 for (i = 0; i <= sw->config.max_port_number; i++) {
61 sw->ports[i].port = i;
62 sw->ports[i].config.port_number = i;
64 kunit_ida_init(test, &sw->ports[i].in_hopids);
65 kunit_ida_init(test, &sw->ports[i].out_hopids);
72 static struct tb_switch *alloc_host(struct kunit *test)
76 sw = alloc_switch(test, 0, 7, 13);
80 sw->config.vendor_id = 0x8086;
81 sw->config.device_id = 0x9a1b;
83 sw->ports[0].config.type = TB_TYPE_PORT;
84 sw->ports[0].config.max_in_hop_id = 7;
85 sw->ports[0].config.max_out_hop_id = 7;
87 sw->ports[1].config.type = TB_TYPE_PORT;
88 sw->ports[1].config.max_in_hop_id = 19;
89 sw->ports[1].config.max_out_hop_id = 19;
90 sw->ports[1].total_credits = 60;
91 sw->ports[1].ctl_credits = 2;
92 sw->ports[1].dual_link_port = &sw->ports[2];
94 sw->ports[2].config.type = TB_TYPE_PORT;
95 sw->ports[2].config.max_in_hop_id = 19;
96 sw->ports[2].config.max_out_hop_id = 19;
97 sw->ports[2].total_credits = 60;
98 sw->ports[2].ctl_credits = 2;
99 sw->ports[2].dual_link_port = &sw->ports[1];
100 sw->ports[2].link_nr = 1;
102 sw->ports[3].config.type = TB_TYPE_PORT;
103 sw->ports[3].config.max_in_hop_id = 19;
104 sw->ports[3].config.max_out_hop_id = 19;
105 sw->ports[3].total_credits = 60;
106 sw->ports[3].ctl_credits = 2;
107 sw->ports[3].dual_link_port = &sw->ports[4];
109 sw->ports[4].config.type = TB_TYPE_PORT;
110 sw->ports[4].config.max_in_hop_id = 19;
111 sw->ports[4].config.max_out_hop_id = 19;
112 sw->ports[4].total_credits = 60;
113 sw->ports[4].ctl_credits = 2;
114 sw->ports[4].dual_link_port = &sw->ports[3];
115 sw->ports[4].link_nr = 1;
117 sw->ports[5].config.type = TB_TYPE_DP_HDMI_IN;
118 sw->ports[5].config.max_in_hop_id = 9;
119 sw->ports[5].config.max_out_hop_id = 9;
120 sw->ports[5].cap_adap = -1;
122 sw->ports[6].config.type = TB_TYPE_DP_HDMI_IN;
123 sw->ports[6].config.max_in_hop_id = 9;
124 sw->ports[6].config.max_out_hop_id = 9;
125 sw->ports[6].cap_adap = -1;
127 sw->ports[7].config.type = TB_TYPE_NHI;
128 sw->ports[7].config.max_in_hop_id = 11;
129 sw->ports[7].config.max_out_hop_id = 11;
130 sw->ports[7].config.nfc_credits = 0x41800000;
132 sw->ports[8].config.type = TB_TYPE_PCIE_DOWN;
133 sw->ports[8].config.max_in_hop_id = 8;
134 sw->ports[8].config.max_out_hop_id = 8;
136 sw->ports[9].config.type = TB_TYPE_PCIE_DOWN;
137 sw->ports[9].config.max_in_hop_id = 8;
138 sw->ports[9].config.max_out_hop_id = 8;
140 sw->ports[10].disabled = true;
141 sw->ports[11].disabled = true;
143 sw->ports[12].config.type = TB_TYPE_USB3_DOWN;
144 sw->ports[12].config.max_in_hop_id = 8;
145 sw->ports[12].config.max_out_hop_id = 8;
147 sw->ports[13].config.type = TB_TYPE_USB3_DOWN;
148 sw->ports[13].config.max_in_hop_id = 8;
149 sw->ports[13].config.max_out_hop_id = 8;
154 static struct tb_switch *alloc_host_usb4(struct kunit *test)
156 struct tb_switch *sw;
158 sw = alloc_host(test);
163 sw->credit_allocation = true;
164 sw->max_usb3_credits = 32;
165 sw->min_dp_aux_credits = 1;
166 sw->min_dp_main_credits = 0;
167 sw->max_pcie_credits = 64;
168 sw->max_dma_credits = 14;
173 static struct tb_switch *alloc_dev_default(struct kunit *test,
174 struct tb_switch *parent,
175 u64 route, bool bonded)
177 struct tb_port *port, *upstream_port;
178 struct tb_switch *sw;
180 sw = alloc_switch(test, route, 1, 19);
184 sw->config.vendor_id = 0x8086;
185 sw->config.device_id = 0x15ef;
187 sw->ports[0].config.type = TB_TYPE_PORT;
188 sw->ports[0].config.max_in_hop_id = 8;
189 sw->ports[0].config.max_out_hop_id = 8;
191 sw->ports[1].config.type = TB_TYPE_PORT;
192 sw->ports[1].config.max_in_hop_id = 19;
193 sw->ports[1].config.max_out_hop_id = 19;
194 sw->ports[1].total_credits = 60;
195 sw->ports[1].ctl_credits = 2;
196 sw->ports[1].dual_link_port = &sw->ports[2];
198 sw->ports[2].config.type = TB_TYPE_PORT;
199 sw->ports[2].config.max_in_hop_id = 19;
200 sw->ports[2].config.max_out_hop_id = 19;
201 sw->ports[2].total_credits = 60;
202 sw->ports[2].ctl_credits = 2;
203 sw->ports[2].dual_link_port = &sw->ports[1];
204 sw->ports[2].link_nr = 1;
206 sw->ports[3].config.type = TB_TYPE_PORT;
207 sw->ports[3].config.max_in_hop_id = 19;
208 sw->ports[3].config.max_out_hop_id = 19;
209 sw->ports[3].total_credits = 60;
210 sw->ports[3].ctl_credits = 2;
211 sw->ports[3].dual_link_port = &sw->ports[4];
213 sw->ports[4].config.type = TB_TYPE_PORT;
214 sw->ports[4].config.max_in_hop_id = 19;
215 sw->ports[4].config.max_out_hop_id = 19;
216 sw->ports[4].total_credits = 60;
217 sw->ports[4].ctl_credits = 2;
218 sw->ports[4].dual_link_port = &sw->ports[3];
219 sw->ports[4].link_nr = 1;
221 sw->ports[5].config.type = TB_TYPE_PORT;
222 sw->ports[5].config.max_in_hop_id = 19;
223 sw->ports[5].config.max_out_hop_id = 19;
224 sw->ports[5].total_credits = 60;
225 sw->ports[5].ctl_credits = 2;
226 sw->ports[5].dual_link_port = &sw->ports[6];
228 sw->ports[6].config.type = TB_TYPE_PORT;
229 sw->ports[6].config.max_in_hop_id = 19;
230 sw->ports[6].config.max_out_hop_id = 19;
231 sw->ports[6].total_credits = 60;
232 sw->ports[6].ctl_credits = 2;
233 sw->ports[6].dual_link_port = &sw->ports[5];
234 sw->ports[6].link_nr = 1;
236 sw->ports[7].config.type = TB_TYPE_PORT;
237 sw->ports[7].config.max_in_hop_id = 19;
238 sw->ports[7].config.max_out_hop_id = 19;
239 sw->ports[7].total_credits = 60;
240 sw->ports[7].ctl_credits = 2;
241 sw->ports[7].dual_link_port = &sw->ports[8];
243 sw->ports[8].config.type = TB_TYPE_PORT;
244 sw->ports[8].config.max_in_hop_id = 19;
245 sw->ports[8].config.max_out_hop_id = 19;
246 sw->ports[8].total_credits = 60;
247 sw->ports[8].ctl_credits = 2;
248 sw->ports[8].dual_link_port = &sw->ports[7];
249 sw->ports[8].link_nr = 1;
251 sw->ports[9].config.type = TB_TYPE_PCIE_UP;
252 sw->ports[9].config.max_in_hop_id = 8;
253 sw->ports[9].config.max_out_hop_id = 8;
255 sw->ports[10].config.type = TB_TYPE_PCIE_DOWN;
256 sw->ports[10].config.max_in_hop_id = 8;
257 sw->ports[10].config.max_out_hop_id = 8;
259 sw->ports[11].config.type = TB_TYPE_PCIE_DOWN;
260 sw->ports[11].config.max_in_hop_id = 8;
261 sw->ports[11].config.max_out_hop_id = 8;
263 sw->ports[12].config.type = TB_TYPE_PCIE_DOWN;
264 sw->ports[12].config.max_in_hop_id = 8;
265 sw->ports[12].config.max_out_hop_id = 8;
267 sw->ports[13].config.type = TB_TYPE_DP_HDMI_OUT;
268 sw->ports[13].config.max_in_hop_id = 9;
269 sw->ports[13].config.max_out_hop_id = 9;
270 sw->ports[13].cap_adap = -1;
272 sw->ports[14].config.type = TB_TYPE_DP_HDMI_OUT;
273 sw->ports[14].config.max_in_hop_id = 9;
274 sw->ports[14].config.max_out_hop_id = 9;
275 sw->ports[14].cap_adap = -1;
277 sw->ports[15].disabled = true;
279 sw->ports[16].config.type = TB_TYPE_USB3_UP;
280 sw->ports[16].config.max_in_hop_id = 8;
281 sw->ports[16].config.max_out_hop_id = 8;
283 sw->ports[17].config.type = TB_TYPE_USB3_DOWN;
284 sw->ports[17].config.max_in_hop_id = 8;
285 sw->ports[17].config.max_out_hop_id = 8;
287 sw->ports[18].config.type = TB_TYPE_USB3_DOWN;
288 sw->ports[18].config.max_in_hop_id = 8;
289 sw->ports[18].config.max_out_hop_id = 8;
291 sw->ports[19].config.type = TB_TYPE_USB3_DOWN;
292 sw->ports[19].config.max_in_hop_id = 8;
293 sw->ports[19].config.max_out_hop_id = 8;
299 upstream_port = tb_upstream_port(sw);
300 port = tb_port_at(route, parent);
301 port->remote = upstream_port;
302 upstream_port->remote = port;
303 if (port->dual_link_port && upstream_port->dual_link_port) {
304 port->dual_link_port->remote = upstream_port->dual_link_port;
305 upstream_port->dual_link_port->remote = port->dual_link_port;
308 /* Bonding is used */
310 port->total_credits *= 2;
311 port->dual_link_port->bonded = true;
312 port->dual_link_port->total_credits = 0;
313 upstream_port->bonded = true;
314 upstream_port->total_credits *= 2;
315 upstream_port->dual_link_port->bonded = true;
316 upstream_port->dual_link_port->total_credits = 0;
323 static struct tb_switch *alloc_dev_with_dpin(struct kunit *test,
324 struct tb_switch *parent,
325 u64 route, bool bonded)
327 struct tb_switch *sw;
329 sw = alloc_dev_default(test, parent, route, bonded);
333 sw->ports[13].config.type = TB_TYPE_DP_HDMI_IN;
334 sw->ports[13].config.max_in_hop_id = 9;
335 sw->ports[13].config.max_out_hop_id = 9;
337 sw->ports[14].config.type = TB_TYPE_DP_HDMI_IN;
338 sw->ports[14].config.max_in_hop_id = 9;
339 sw->ports[14].config.max_out_hop_id = 9;
344 static struct tb_switch *alloc_dev_without_dp(struct kunit *test,
345 struct tb_switch *parent,
346 u64 route, bool bonded)
348 struct tb_switch *sw;
351 sw = alloc_dev_default(test, parent, route, bonded);
356 * 2x USB4 Adapters (adapters 1,2 and 3,4),
357 * 1x PCIe Upstream (adapter 9),
358 * 1x PCIe Downstream (adapter 10),
359 * 1x USB3 Upstream (adapter 16),
360 * 1x USB3 Downstream (adapter 17)
362 for (i = 5; i <= 8; i++)
363 sw->ports[i].disabled = true;
365 for (i = 11; i <= 14; i++)
366 sw->ports[i].disabled = true;
368 sw->ports[13].cap_adap = 0;
369 sw->ports[14].cap_adap = 0;
371 for (i = 18; i <= 19; i++)
372 sw->ports[i].disabled = true;
375 sw->credit_allocation = true;
376 sw->max_usb3_credits = 109;
377 sw->min_dp_aux_credits = 0;
378 sw->min_dp_main_credits = 0;
379 sw->max_pcie_credits = 30;
380 sw->max_dma_credits = 1;
385 static struct tb_switch *alloc_dev_usb4(struct kunit *test,
386 struct tb_switch *parent,
387 u64 route, bool bonded)
389 struct tb_switch *sw;
391 sw = alloc_dev_default(test, parent, route, bonded);
396 sw->credit_allocation = true;
397 sw->max_usb3_credits = 14;
398 sw->min_dp_aux_credits = 1;
399 sw->min_dp_main_credits = 18;
400 sw->max_pcie_credits = 32;
401 sw->max_dma_credits = 14;
406 static void tb_test_path_basic(struct kunit *test)
408 struct tb_port *src_port, *dst_port, *p;
409 struct tb_switch *host;
411 host = alloc_host(test);
413 src_port = &host->ports[5];
416 p = tb_next_port_on_path(src_port, dst_port, NULL);
417 KUNIT_EXPECT_PTR_EQ(test, p, dst_port);
419 p = tb_next_port_on_path(src_port, dst_port, p);
420 KUNIT_EXPECT_TRUE(test, !p);
423 static void tb_test_path_not_connected_walk(struct kunit *test)
425 struct tb_port *src_port, *dst_port, *p;
426 struct tb_switch *host, *dev;
428 host = alloc_host(test);
429 /* No connection between host and dev */
430 dev = alloc_dev_default(test, NULL, 3, true);
432 src_port = &host->ports[12];
433 dst_port = &dev->ports[16];
435 p = tb_next_port_on_path(src_port, dst_port, NULL);
436 KUNIT_EXPECT_PTR_EQ(test, p, src_port);
438 p = tb_next_port_on_path(src_port, dst_port, p);
439 KUNIT_EXPECT_PTR_EQ(test, p, &host->ports[3]);
441 p = tb_next_port_on_path(src_port, dst_port, p);
442 KUNIT_EXPECT_TRUE(test, !p);
444 /* Other direction */
446 p = tb_next_port_on_path(dst_port, src_port, NULL);
447 KUNIT_EXPECT_PTR_EQ(test, p, dst_port);
449 p = tb_next_port_on_path(dst_port, src_port, p);
450 KUNIT_EXPECT_PTR_EQ(test, p, &dev->ports[1]);
452 p = tb_next_port_on_path(dst_port, src_port, p);
453 KUNIT_EXPECT_TRUE(test, !p);
456 struct port_expectation {
459 enum tb_port_type type;
462 static void tb_test_path_single_hop_walk(struct kunit *test)
465 * Walks from Host PCIe downstream port to Device #1 PCIe
473 static const struct port_expectation test_data[] = {
474 { .route = 0x0, .port = 8, .type = TB_TYPE_PCIE_DOWN },
475 { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
476 { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
477 { .route = 0x1, .port = 9, .type = TB_TYPE_PCIE_UP },
479 struct tb_port *src_port, *dst_port, *p;
480 struct tb_switch *host, *dev;
483 host = alloc_host(test);
484 dev = alloc_dev_default(test, host, 1, true);
486 src_port = &host->ports[8];
487 dst_port = &dev->ports[9];
489 /* Walk both directions */
492 tb_for_each_port_on_path(src_port, dst_port, p) {
493 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
494 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
495 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
496 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
501 KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
503 i = ARRAY_SIZE(test_data) - 1;
504 tb_for_each_port_on_path(dst_port, src_port, p) {
505 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
506 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
507 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
508 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
513 KUNIT_EXPECT_EQ(test, i, -1);
516 static void tb_test_path_daisy_chain_walk(struct kunit *test)
519 * Walks from Host DP IN to Device #2 DP OUT.
529 static const struct port_expectation test_data[] = {
530 { .route = 0x0, .port = 5, .type = TB_TYPE_DP_HDMI_IN },
531 { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
532 { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
533 { .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
534 { .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
535 { .route = 0x301, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
537 struct tb_port *src_port, *dst_port, *p;
538 struct tb_switch *host, *dev1, *dev2;
541 host = alloc_host(test);
542 dev1 = alloc_dev_default(test, host, 0x1, true);
543 dev2 = alloc_dev_default(test, dev1, 0x301, true);
545 src_port = &host->ports[5];
546 dst_port = &dev2->ports[13];
548 /* Walk both directions */
551 tb_for_each_port_on_path(src_port, dst_port, p) {
552 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
553 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
554 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
555 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
560 KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
562 i = ARRAY_SIZE(test_data) - 1;
563 tb_for_each_port_on_path(dst_port, src_port, p) {
564 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
565 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
566 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
567 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
572 KUNIT_EXPECT_EQ(test, i, -1);
575 static void tb_test_path_simple_tree_walk(struct kunit *test)
578 * Walks from Host DP IN to Device #3 DP OUT.
586 * [Device #2] | [Device #4]
590 static const struct port_expectation test_data[] = {
591 { .route = 0x0, .port = 5, .type = TB_TYPE_DP_HDMI_IN },
592 { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
593 { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
594 { .route = 0x1, .port = 5, .type = TB_TYPE_PORT },
595 { .route = 0x501, .port = 1, .type = TB_TYPE_PORT },
596 { .route = 0x501, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
598 struct tb_port *src_port, *dst_port, *p;
599 struct tb_switch *host, *dev1, *dev3;
602 host = alloc_host(test);
603 dev1 = alloc_dev_default(test, host, 0x1, true);
604 alloc_dev_default(test, dev1, 0x301, true);
605 dev3 = alloc_dev_default(test, dev1, 0x501, true);
606 alloc_dev_default(test, dev1, 0x701, true);
608 src_port = &host->ports[5];
609 dst_port = &dev3->ports[13];
611 /* Walk both directions */
614 tb_for_each_port_on_path(src_port, dst_port, p) {
615 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
616 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
617 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
618 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
623 KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
625 i = ARRAY_SIZE(test_data) - 1;
626 tb_for_each_port_on_path(dst_port, src_port, p) {
627 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
628 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
629 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
630 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
635 KUNIT_EXPECT_EQ(test, i, -1);
638 static void tb_test_path_complex_tree_walk(struct kunit *test)
641 * Walks from Device #3 DP IN to Device #9 DP OUT.
649 * [Device #2] | [Device #5]
651 * 1 | [Device #4] \ 1
652 * [Device #3] [Device #6]
661 static const struct port_expectation test_data[] = {
662 { .route = 0x50301, .port = 13, .type = TB_TYPE_DP_HDMI_IN },
663 { .route = 0x50301, .port = 1, .type = TB_TYPE_PORT },
664 { .route = 0x301, .port = 5, .type = TB_TYPE_PORT },
665 { .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
666 { .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
667 { .route = 0x1, .port = 7, .type = TB_TYPE_PORT },
668 { .route = 0x701, .port = 1, .type = TB_TYPE_PORT },
669 { .route = 0x701, .port = 7, .type = TB_TYPE_PORT },
670 { .route = 0x70701, .port = 1, .type = TB_TYPE_PORT },
671 { .route = 0x70701, .port = 3, .type = TB_TYPE_PORT },
672 { .route = 0x3070701, .port = 1, .type = TB_TYPE_PORT },
673 { .route = 0x3070701, .port = 5, .type = TB_TYPE_PORT },
674 { .route = 0x503070701, .port = 1, .type = TB_TYPE_PORT },
675 { .route = 0x503070701, .port = 14, .type = TB_TYPE_DP_HDMI_OUT },
677 struct tb_switch *host, *dev1, *dev2, *dev3, *dev5, *dev6, *dev7, *dev9;
678 struct tb_port *src_port, *dst_port, *p;
681 host = alloc_host(test);
682 dev1 = alloc_dev_default(test, host, 0x1, true);
683 dev2 = alloc_dev_default(test, dev1, 0x301, true);
684 dev3 = alloc_dev_with_dpin(test, dev2, 0x50301, true);
685 alloc_dev_default(test, dev1, 0x501, true);
686 dev5 = alloc_dev_default(test, dev1, 0x701, true);
687 dev6 = alloc_dev_default(test, dev5, 0x70701, true);
688 dev7 = alloc_dev_default(test, dev6, 0x3070701, true);
689 alloc_dev_default(test, dev7, 0x303070701, true);
690 dev9 = alloc_dev_default(test, dev7, 0x503070701, true);
692 src_port = &dev3->ports[13];
693 dst_port = &dev9->ports[14];
695 /* Walk both directions */
698 tb_for_each_port_on_path(src_port, dst_port, p) {
699 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
700 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
701 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
702 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
707 KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
709 i = ARRAY_SIZE(test_data) - 1;
710 tb_for_each_port_on_path(dst_port, src_port, p) {
711 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
712 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
713 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
714 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
719 KUNIT_EXPECT_EQ(test, i, -1);
722 static void tb_test_path_max_length_walk(struct kunit *test)
724 struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5, *dev6;
725 struct tb_switch *dev7, *dev8, *dev9, *dev10, *dev11, *dev12;
726 struct tb_port *src_port, *dst_port, *p;
730 * Walks from Device #6 DP IN to Device #12 DP OUT.
735 * [Device #1] [Device #7]
738 * [Device #2] [Device #8]
741 * [Device #3] [Device #9]
744 * [Device #4] [Device #10]
747 * [Device #5] [Device #11]
750 * [Device #6] [Device #12]
752 static const struct port_expectation test_data[] = {
753 { .route = 0x30303030301, .port = 13, .type = TB_TYPE_DP_HDMI_IN },
754 { .route = 0x30303030301, .port = 1, .type = TB_TYPE_PORT },
755 { .route = 0x303030301, .port = 3, .type = TB_TYPE_PORT },
756 { .route = 0x303030301, .port = 1, .type = TB_TYPE_PORT },
757 { .route = 0x3030301, .port = 3, .type = TB_TYPE_PORT },
758 { .route = 0x3030301, .port = 1, .type = TB_TYPE_PORT },
759 { .route = 0x30301, .port = 3, .type = TB_TYPE_PORT },
760 { .route = 0x30301, .port = 1, .type = TB_TYPE_PORT },
761 { .route = 0x301, .port = 3, .type = TB_TYPE_PORT },
762 { .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
763 { .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
764 { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
765 { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
766 { .route = 0x0, .port = 3, .type = TB_TYPE_PORT },
767 { .route = 0x3, .port = 1, .type = TB_TYPE_PORT },
768 { .route = 0x3, .port = 3, .type = TB_TYPE_PORT },
769 { .route = 0x303, .port = 1, .type = TB_TYPE_PORT },
770 { .route = 0x303, .port = 3, .type = TB_TYPE_PORT },
771 { .route = 0x30303, .port = 1, .type = TB_TYPE_PORT },
772 { .route = 0x30303, .port = 3, .type = TB_TYPE_PORT },
773 { .route = 0x3030303, .port = 1, .type = TB_TYPE_PORT },
774 { .route = 0x3030303, .port = 3, .type = TB_TYPE_PORT },
775 { .route = 0x303030303, .port = 1, .type = TB_TYPE_PORT },
776 { .route = 0x303030303, .port = 3, .type = TB_TYPE_PORT },
777 { .route = 0x30303030303, .port = 1, .type = TB_TYPE_PORT },
778 { .route = 0x30303030303, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
781 host = alloc_host(test);
782 dev1 = alloc_dev_default(test, host, 0x1, true);
783 dev2 = alloc_dev_default(test, dev1, 0x301, true);
784 dev3 = alloc_dev_default(test, dev2, 0x30301, true);
785 dev4 = alloc_dev_default(test, dev3, 0x3030301, true);
786 dev5 = alloc_dev_default(test, dev4, 0x303030301, true);
787 dev6 = alloc_dev_with_dpin(test, dev5, 0x30303030301, true);
788 dev7 = alloc_dev_default(test, host, 0x3, true);
789 dev8 = alloc_dev_default(test, dev7, 0x303, true);
790 dev9 = alloc_dev_default(test, dev8, 0x30303, true);
791 dev10 = alloc_dev_default(test, dev9, 0x3030303, true);
792 dev11 = alloc_dev_default(test, dev10, 0x303030303, true);
793 dev12 = alloc_dev_default(test, dev11, 0x30303030303, true);
795 src_port = &dev6->ports[13];
796 dst_port = &dev12->ports[13];
798 /* Walk both directions */
801 tb_for_each_port_on_path(src_port, dst_port, p) {
802 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
803 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
804 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
805 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
810 KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
812 i = ARRAY_SIZE(test_data) - 1;
813 tb_for_each_port_on_path(dst_port, src_port, p) {
814 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
815 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
816 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
817 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
822 KUNIT_EXPECT_EQ(test, i, -1);
825 static void tb_test_path_not_connected(struct kunit *test)
827 struct tb_switch *host, *dev1, *dev2;
828 struct tb_port *down, *up;
829 struct tb_path *path;
831 host = alloc_host(test);
832 dev1 = alloc_dev_default(test, host, 0x3, false);
833 /* Not connected to anything */
834 dev2 = alloc_dev_default(test, NULL, 0x303, false);
836 down = &dev1->ports[10];
837 up = &dev2->ports[9];
839 path = tb_path_alloc(NULL, down, 8, up, 8, 0, "PCIe Down");
840 KUNIT_ASSERT_NULL(test, path);
841 path = tb_path_alloc(NULL, down, 8, up, 8, 1, "PCIe Down");
842 KUNIT_ASSERT_NULL(test, path);
845 struct hop_expectation {
848 enum tb_port_type in_type;
850 enum tb_port_type out_type;
853 static void tb_test_path_not_bonded_lane0(struct kunit *test)
856 * PCIe path from host to device using lane 0.
863 static const struct hop_expectation test_data[] = {
867 .in_type = TB_TYPE_PCIE_DOWN,
869 .out_type = TB_TYPE_PORT,
874 .in_type = TB_TYPE_PORT,
876 .out_type = TB_TYPE_PCIE_UP,
879 struct tb_switch *host, *dev;
880 struct tb_port *down, *up;
881 struct tb_path *path;
884 host = alloc_host(test);
885 dev = alloc_dev_default(test, host, 0x3, false);
887 down = &host->ports[9];
890 path = tb_path_alloc(NULL, down, 8, up, 8, 0, "PCIe Down");
891 KUNIT_ASSERT_NOT_NULL(test, path);
892 KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
893 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
894 const struct tb_port *in_port, *out_port;
896 in_port = path->hops[i].in_port;
897 out_port = path->hops[i].out_port;
899 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
900 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
901 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
902 test_data[i].in_type);
903 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
904 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
905 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
906 test_data[i].out_type);
911 static void tb_test_path_not_bonded_lane1(struct kunit *test)
914 * DP Video path from host to device using lane 1. Paths like
915 * these are only used with Thunderbolt 1 devices where lane
916 * bonding is not possible. USB4 specifically does not allow
917 * paths like this (you either use lane 0 where lane 1 is
918 * disabled or both lanes are bonded).
925 static const struct hop_expectation test_data[] = {
929 .in_type = TB_TYPE_DP_HDMI_IN,
931 .out_type = TB_TYPE_PORT,
936 .in_type = TB_TYPE_PORT,
938 .out_type = TB_TYPE_DP_HDMI_OUT,
941 struct tb_switch *host, *dev;
942 struct tb_port *in, *out;
943 struct tb_path *path;
946 host = alloc_host(test);
947 dev = alloc_dev_default(test, host, 0x1, false);
949 in = &host->ports[5];
950 out = &dev->ports[13];
952 path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
953 KUNIT_ASSERT_NOT_NULL(test, path);
954 KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
955 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
956 const struct tb_port *in_port, *out_port;
958 in_port = path->hops[i].in_port;
959 out_port = path->hops[i].out_port;
961 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
962 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
963 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
964 test_data[i].in_type);
965 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
966 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
967 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
968 test_data[i].out_type);
973 static void tb_test_path_not_bonded_lane1_chain(struct kunit *test)
976 * DP Video path from host to device 3 using lane 1.
989 static const struct hop_expectation test_data[] = {
993 .in_type = TB_TYPE_DP_HDMI_IN,
995 .out_type = TB_TYPE_PORT,
1000 .in_type = TB_TYPE_PORT,
1002 .out_type = TB_TYPE_PORT,
1007 .in_type = TB_TYPE_PORT,
1009 .out_type = TB_TYPE_PORT,
1014 .in_type = TB_TYPE_PORT,
1016 .out_type = TB_TYPE_DP_HDMI_OUT,
1019 struct tb_switch *host, *dev1, *dev2, *dev3;
1020 struct tb_port *in, *out;
1021 struct tb_path *path;
1024 host = alloc_host(test);
1025 dev1 = alloc_dev_default(test, host, 0x1, false);
1026 dev2 = alloc_dev_default(test, dev1, 0x701, false);
1027 dev3 = alloc_dev_default(test, dev2, 0x50701, false);
1029 in = &host->ports[5];
1030 out = &dev3->ports[13];
1032 path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
1033 KUNIT_ASSERT_NOT_NULL(test, path);
1034 KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
1035 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
1036 const struct tb_port *in_port, *out_port;
1038 in_port = path->hops[i].in_port;
1039 out_port = path->hops[i].out_port;
1041 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
1042 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
1043 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
1044 test_data[i].in_type);
1045 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
1046 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
1047 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
1048 test_data[i].out_type);
1053 static void tb_test_path_not_bonded_lane1_chain_reverse(struct kunit *test)
1056 * DP Video path from device 3 to host using lane 1.
1069 static const struct hop_expectation test_data[] = {
1073 .in_type = TB_TYPE_DP_HDMI_IN,
1075 .out_type = TB_TYPE_PORT,
1080 .in_type = TB_TYPE_PORT,
1082 .out_type = TB_TYPE_PORT,
1087 .in_type = TB_TYPE_PORT,
1089 .out_type = TB_TYPE_PORT,
1094 .in_type = TB_TYPE_PORT,
1096 .out_type = TB_TYPE_DP_HDMI_IN,
1099 struct tb_switch *host, *dev1, *dev2, *dev3;
1100 struct tb_port *in, *out;
1101 struct tb_path *path;
1104 host = alloc_host(test);
1105 dev1 = alloc_dev_default(test, host, 0x1, false);
1106 dev2 = alloc_dev_default(test, dev1, 0x701, false);
1107 dev3 = alloc_dev_with_dpin(test, dev2, 0x50701, false);
1109 in = &dev3->ports[13];
1110 out = &host->ports[5];
1112 path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
1113 KUNIT_ASSERT_NOT_NULL(test, path);
1114 KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
1115 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
1116 const struct tb_port *in_port, *out_port;
1118 in_port = path->hops[i].in_port;
1119 out_port = path->hops[i].out_port;
1121 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
1122 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
1123 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
1124 test_data[i].in_type);
1125 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
1126 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
1127 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
1128 test_data[i].out_type);
1133 static void tb_test_path_mixed_chain(struct kunit *test)
1136 * DP Video path from host to device 4 where first and last link
1153 static const struct hop_expectation test_data[] = {
1157 .in_type = TB_TYPE_DP_HDMI_IN,
1159 .out_type = TB_TYPE_PORT,
1164 .in_type = TB_TYPE_PORT,
1166 .out_type = TB_TYPE_PORT,
1171 .in_type = TB_TYPE_PORT,
1173 .out_type = TB_TYPE_PORT,
1178 .in_type = TB_TYPE_PORT,
1180 .out_type = TB_TYPE_PORT,
1185 .in_type = TB_TYPE_PORT,
1187 .out_type = TB_TYPE_DP_HDMI_OUT,
1190 struct tb_switch *host, *dev1, *dev2, *dev3, *dev4;
1191 struct tb_port *in, *out;
1192 struct tb_path *path;
1195 host = alloc_host(test);
1196 dev1 = alloc_dev_default(test, host, 0x1, true);
1197 dev2 = alloc_dev_default(test, dev1, 0x701, false);
1198 dev3 = alloc_dev_default(test, dev2, 0x50701, false);
1199 dev4 = alloc_dev_default(test, dev3, 0x3050701, true);
1201 in = &host->ports[5];
1202 out = &dev4->ports[13];
1204 path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
1205 KUNIT_ASSERT_NOT_NULL(test, path);
1206 KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
1207 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
1208 const struct tb_port *in_port, *out_port;
1210 in_port = path->hops[i].in_port;
1211 out_port = path->hops[i].out_port;
1213 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
1214 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
1215 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
1216 test_data[i].in_type);
1217 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
1218 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
1219 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
1220 test_data[i].out_type);
1225 static void tb_test_path_mixed_chain_reverse(struct kunit *test)
1228 * DP Video path from device 4 to host where first and last link
1245 static const struct hop_expectation test_data[] = {
1249 .in_type = TB_TYPE_DP_HDMI_OUT,
1251 .out_type = TB_TYPE_PORT,
1256 .in_type = TB_TYPE_PORT,
1258 .out_type = TB_TYPE_PORT,
1263 .in_type = TB_TYPE_PORT,
1265 .out_type = TB_TYPE_PORT,
1270 .in_type = TB_TYPE_PORT,
1272 .out_type = TB_TYPE_PORT,
1277 .in_type = TB_TYPE_PORT,
1279 .out_type = TB_TYPE_DP_HDMI_IN,
1282 struct tb_switch *host, *dev1, *dev2, *dev3, *dev4;
1283 struct tb_port *in, *out;
1284 struct tb_path *path;
1287 host = alloc_host(test);
1288 dev1 = alloc_dev_default(test, host, 0x1, true);
1289 dev2 = alloc_dev_default(test, dev1, 0x701, false);
1290 dev3 = alloc_dev_default(test, dev2, 0x50701, false);
1291 dev4 = alloc_dev_default(test, dev3, 0x3050701, true);
1293 in = &dev4->ports[13];
1294 out = &host->ports[5];
1296 path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
1297 KUNIT_ASSERT_NOT_NULL(test, path);
1298 KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
1299 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
1300 const struct tb_port *in_port, *out_port;
1302 in_port = path->hops[i].in_port;
1303 out_port = path->hops[i].out_port;
1305 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
1306 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
1307 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
1308 test_data[i].in_type);
1309 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
1310 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
1311 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
1312 test_data[i].out_type);
1317 static void tb_test_tunnel_pcie(struct kunit *test)
1319 struct tb_switch *host, *dev1, *dev2;
1320 struct tb_tunnel *tunnel1, *tunnel2;
1321 struct tb_port *down, *up;
1324 * Create PCIe tunnel between host and two devices.
1334 host = alloc_host(test);
1335 dev1 = alloc_dev_default(test, host, 0x1, true);
1336 dev2 = alloc_dev_default(test, dev1, 0x501, true);
1338 down = &host->ports[8];
1339 up = &dev1->ports[9];
1340 tunnel1 = tb_tunnel_alloc_pci(NULL, up, down);
1341 KUNIT_ASSERT_NOT_NULL(test, tunnel1);
1342 KUNIT_EXPECT_EQ(test, tunnel1->type, TB_TUNNEL_PCI);
1343 KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, down);
1344 KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, up);
1345 KUNIT_ASSERT_EQ(test, tunnel1->npaths, 2);
1346 KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 2);
1347 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[0].in_port, down);
1348 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[1].out_port, up);
1349 KUNIT_ASSERT_EQ(test, tunnel1->paths[1]->path_length, 2);
1350 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[0].in_port, up);
1351 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[1].out_port, down);
1353 down = &dev1->ports[10];
1354 up = &dev2->ports[9];
1355 tunnel2 = tb_tunnel_alloc_pci(NULL, up, down);
1356 KUNIT_ASSERT_NOT_NULL(test, tunnel2);
1357 KUNIT_EXPECT_EQ(test, tunnel2->type, TB_TUNNEL_PCI);
1358 KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, down);
1359 KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, up);
1360 KUNIT_ASSERT_EQ(test, tunnel2->npaths, 2);
1361 KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 2);
1362 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[0].in_port, down);
1363 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[1].out_port, up);
1364 KUNIT_ASSERT_EQ(test, tunnel2->paths[1]->path_length, 2);
1365 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up);
1366 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down);
1368 tb_tunnel_free(tunnel2);
1369 tb_tunnel_free(tunnel1);
1372 static void tb_test_tunnel_dp(struct kunit *test)
1374 struct tb_switch *host, *dev;
1375 struct tb_port *in, *out;
1376 struct tb_tunnel *tunnel;
1379 * Create DP tunnel between Host and Device
1386 host = alloc_host(test);
1387 dev = alloc_dev_default(test, host, 0x3, true);
1389 in = &host->ports[5];
1390 out = &dev->ports[13];
1392 tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
1393 KUNIT_ASSERT_NOT_NULL(test, tunnel);
1394 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
1395 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1396 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
1397 KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
1398 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 2);
1399 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1400 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].out_port, out);
1401 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 2);
1402 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1403 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].out_port, out);
1404 KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 2);
1405 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1406 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[1].out_port, in);
1407 tb_tunnel_free(tunnel);
1410 static void tb_test_tunnel_dp_chain(struct kunit *test)
1412 struct tb_switch *host, *dev1, *dev4;
1413 struct tb_port *in, *out;
1414 struct tb_tunnel *tunnel;
1417 * Create DP tunnel from Host DP IN to Device #4 DP OUT.
1425 * [Device #2] | [Device #4]
1429 host = alloc_host(test);
1430 dev1 = alloc_dev_default(test, host, 0x1, true);
1431 alloc_dev_default(test, dev1, 0x301, true);
1432 alloc_dev_default(test, dev1, 0x501, true);
1433 dev4 = alloc_dev_default(test, dev1, 0x701, true);
1435 in = &host->ports[5];
1436 out = &dev4->ports[14];
1438 tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
1439 KUNIT_ASSERT_NOT_NULL(test, tunnel);
1440 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
1441 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1442 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
1443 KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
1444 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 3);
1445 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1446 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].out_port, out);
1447 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 3);
1448 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1449 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].out_port, out);
1450 KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 3);
1451 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1452 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[2].out_port, in);
1453 tb_tunnel_free(tunnel);
1456 static void tb_test_tunnel_dp_tree(struct kunit *test)
1458 struct tb_switch *host, *dev1, *dev2, *dev3, *dev5;
1459 struct tb_port *in, *out;
1460 struct tb_tunnel *tunnel;
1463 * Create DP tunnel from Device #2 DP IN to Device #5 DP OUT.
1471 * [Device #2] | [Device #4]
1478 host = alloc_host(test);
1479 dev1 = alloc_dev_default(test, host, 0x3, true);
1480 dev2 = alloc_dev_with_dpin(test, dev1, 0x303, true);
1481 dev3 = alloc_dev_default(test, dev1, 0x503, true);
1482 alloc_dev_default(test, dev1, 0x703, true);
1483 dev5 = alloc_dev_default(test, dev3, 0x50503, true);
1485 in = &dev2->ports[13];
1486 out = &dev5->ports[13];
1488 tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
1489 KUNIT_ASSERT_NOT_NULL(test, tunnel);
1490 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
1491 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1492 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
1493 KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
1494 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 4);
1495 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1496 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[3].out_port, out);
1497 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 4);
1498 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1499 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[3].out_port, out);
1500 KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 4);
1501 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1502 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[3].out_port, in);
1503 tb_tunnel_free(tunnel);
1506 static void tb_test_tunnel_dp_max_length(struct kunit *test)
1508 struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5, *dev6;
1509 struct tb_switch *dev7, *dev8, *dev9, *dev10, *dev11, *dev12;
1510 struct tb_port *in, *out;
1511 struct tb_tunnel *tunnel;
1514 * Creates DP tunnel from Device #6 to Device #12.
1519 * [Device #1] [Device #7]
1522 * [Device #2] [Device #8]
1525 * [Device #3] [Device #9]
1528 * [Device #4] [Device #10]
1531 * [Device #5] [Device #11]
1534 * [Device #6] [Device #12]
1536 host = alloc_host(test);
1537 dev1 = alloc_dev_default(test, host, 0x1, true);
1538 dev2 = alloc_dev_default(test, dev1, 0x301, true);
1539 dev3 = alloc_dev_default(test, dev2, 0x30301, true);
1540 dev4 = alloc_dev_default(test, dev3, 0x3030301, true);
1541 dev5 = alloc_dev_default(test, dev4, 0x303030301, true);
1542 dev6 = alloc_dev_with_dpin(test, dev5, 0x30303030301, true);
1543 dev7 = alloc_dev_default(test, host, 0x3, true);
1544 dev8 = alloc_dev_default(test, dev7, 0x303, true);
1545 dev9 = alloc_dev_default(test, dev8, 0x30303, true);
1546 dev10 = alloc_dev_default(test, dev9, 0x3030303, true);
1547 dev11 = alloc_dev_default(test, dev10, 0x303030303, true);
1548 dev12 = alloc_dev_default(test, dev11, 0x30303030303, true);
1550 in = &dev6->ports[13];
1551 out = &dev12->ports[13];
1553 tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
1554 KUNIT_ASSERT_NOT_NULL(test, tunnel);
1555 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
1556 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1557 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
1558 KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
1559 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 13);
1561 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1563 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[6].in_port,
1565 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[6].out_port,
1568 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[12].out_port, out);
1569 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 13);
1570 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1571 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[6].in_port,
1573 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[6].out_port,
1575 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[12].out_port, out);
1576 KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 13);
1577 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1578 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[6].in_port,
1580 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[6].out_port,
1582 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[12].out_port, in);
1583 tb_tunnel_free(tunnel);
1586 static void tb_test_tunnel_usb3(struct kunit *test)
1588 struct tb_switch *host, *dev1, *dev2;
1589 struct tb_tunnel *tunnel1, *tunnel2;
1590 struct tb_port *down, *up;
1593 * Create USB3 tunnel between host and two devices.
1603 host = alloc_host(test);
1604 dev1 = alloc_dev_default(test, host, 0x1, true);
1605 dev2 = alloc_dev_default(test, dev1, 0x701, true);
1607 down = &host->ports[12];
1608 up = &dev1->ports[16];
1609 tunnel1 = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
1610 KUNIT_ASSERT_NOT_NULL(test, tunnel1);
1611 KUNIT_EXPECT_EQ(test, tunnel1->type, TB_TUNNEL_USB3);
1612 KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, down);
1613 KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, up);
1614 KUNIT_ASSERT_EQ(test, tunnel1->npaths, 2);
1615 KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 2);
1616 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[0].in_port, down);
1617 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[1].out_port, up);
1618 KUNIT_ASSERT_EQ(test, tunnel1->paths[1]->path_length, 2);
1619 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[0].in_port, up);
1620 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[1].out_port, down);
1622 down = &dev1->ports[17];
1623 up = &dev2->ports[16];
1624 tunnel2 = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
1625 KUNIT_ASSERT_NOT_NULL(test, tunnel2);
1626 KUNIT_EXPECT_EQ(test, tunnel2->type, TB_TUNNEL_USB3);
1627 KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, down);
1628 KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, up);
1629 KUNIT_ASSERT_EQ(test, tunnel2->npaths, 2);
1630 KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 2);
1631 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[0].in_port, down);
1632 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[1].out_port, up);
1633 KUNIT_ASSERT_EQ(test, tunnel2->paths[1]->path_length, 2);
1634 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up);
1635 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down);
1637 tb_tunnel_free(tunnel2);
1638 tb_tunnel_free(tunnel1);
1641 static void tb_test_tunnel_port_on_path(struct kunit *test)
1643 struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5;
1644 struct tb_port *in, *out, *port;
1645 struct tb_tunnel *dp_tunnel;
1654 * [Device #2] | [Device #4]
1661 host = alloc_host(test);
1662 dev1 = alloc_dev_default(test, host, 0x3, true);
1663 dev2 = alloc_dev_with_dpin(test, dev1, 0x303, true);
1664 dev3 = alloc_dev_default(test, dev1, 0x503, true);
1665 dev4 = alloc_dev_default(test, dev1, 0x703, true);
1666 dev5 = alloc_dev_default(test, dev3, 0x50503, true);
1668 in = &dev2->ports[13];
1669 out = &dev5->ports[13];
1671 dp_tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
1672 KUNIT_ASSERT_NOT_NULL(test, dp_tunnel);
1674 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, in));
1675 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, out));
1677 port = &host->ports[8];
1678 KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1680 port = &host->ports[3];
1681 KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1683 port = &dev1->ports[1];
1684 KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1686 port = &dev1->ports[3];
1687 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1689 port = &dev1->ports[5];
1690 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1692 port = &dev1->ports[7];
1693 KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1695 port = &dev3->ports[1];
1696 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1698 port = &dev5->ports[1];
1699 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1701 port = &dev4->ports[1];
1702 KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1704 tb_tunnel_free(dp_tunnel);
1707 static void tb_test_tunnel_dma(struct kunit *test)
1709 struct tb_port *nhi, *port;
1710 struct tb_tunnel *tunnel;
1711 struct tb_switch *host;
1714 * Create DMA tunnel from NHI to port 1 and back.
1717 * 1 ^ In HopID 1 -> Out HopID 8
1719 * v In HopID 8 -> Out HopID 1
1720 * ............ Domain border
1724 host = alloc_host(test);
1725 nhi = &host->ports[7];
1726 port = &host->ports[1];
1728 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
1729 KUNIT_ASSERT_NOT_NULL(test, tunnel);
1730 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
1731 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
1732 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
1733 KUNIT_ASSERT_EQ(test, tunnel->npaths, 2);
1735 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
1736 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
1737 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 8);
1738 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, nhi);
1739 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 1);
1741 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 1);
1742 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, nhi);
1743 KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].in_hop_index, 1);
1744 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].out_port, port);
1745 KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].next_hop_index, 8);
1747 tb_tunnel_free(tunnel);
1750 static void tb_test_tunnel_dma_rx(struct kunit *test)
1752 struct tb_port *nhi, *port;
1753 struct tb_tunnel *tunnel;
1754 struct tb_switch *host;
1757 * Create DMA RX tunnel from port 1 to NHI.
1762 * | In HopID 15 -> Out HopID 2
1763 * ............ Domain border
1767 host = alloc_host(test);
1768 nhi = &host->ports[7];
1769 port = &host->ports[1];
1771 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, -1, -1, 15, 2);
1772 KUNIT_ASSERT_NOT_NULL(test, tunnel);
1773 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
1774 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
1775 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
1776 KUNIT_ASSERT_EQ(test, tunnel->npaths, 1);
1778 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
1779 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
1780 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 15);
1781 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, nhi);
1782 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 2);
1784 tb_tunnel_free(tunnel);
1787 static void tb_test_tunnel_dma_tx(struct kunit *test)
1789 struct tb_port *nhi, *port;
1790 struct tb_tunnel *tunnel;
1791 struct tb_switch *host;
1794 * Create DMA TX tunnel from NHI to port 1.
1797 * 1 | In HopID 2 -> Out HopID 15
1800 * ............ Domain border
1804 host = alloc_host(test);
1805 nhi = &host->ports[7];
1806 port = &host->ports[1];
1808 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 2, -1, -1);
1809 KUNIT_ASSERT_NOT_NULL(test, tunnel);
1810 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
1811 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
1812 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
1813 KUNIT_ASSERT_EQ(test, tunnel->npaths, 1);
1815 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
1816 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, nhi);
1817 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 2);
1818 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, port);
1819 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 15);
1821 tb_tunnel_free(tunnel);
1824 static void tb_test_tunnel_dma_chain(struct kunit *test)
1826 struct tb_switch *host, *dev1, *dev2;
1827 struct tb_port *nhi, *port;
1828 struct tb_tunnel *tunnel;
1831 * Create DMA tunnel from NHI to Device #2 port 3 and back.
1834 * 1 ^ In HopID 1 -> Out HopID x
1836 * 1 | In HopID x -> Out HopID 1
1841 * 3 | In HopID x -> Out HopID 8
1843 * v In HopID 8 -> Out HopID x
1844 * ............ Domain border
1848 host = alloc_host(test);
1849 dev1 = alloc_dev_default(test, host, 0x1, true);
1850 dev2 = alloc_dev_default(test, dev1, 0x701, true);
1852 nhi = &host->ports[7];
1853 port = &dev2->ports[3];
1854 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
1855 KUNIT_ASSERT_NOT_NULL(test, tunnel);
1856 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
1857 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
1858 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
1859 KUNIT_ASSERT_EQ(test, tunnel->npaths, 2);
1861 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 3);
1862 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
1863 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 8);
1864 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port,
1866 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].in_port,
1868 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].out_port,
1870 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].in_port,
1872 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].out_port, nhi);
1873 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[2].next_hop_index, 1);
1875 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 3);
1876 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, nhi);
1877 KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].in_hop_index, 1);
1878 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].in_port,
1880 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].out_port,
1882 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].in_port,
1884 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].out_port, port);
1885 KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[2].next_hop_index, 8);
1887 tb_tunnel_free(tunnel);
1890 static void tb_test_tunnel_dma_match(struct kunit *test)
1892 struct tb_port *nhi, *port;
1893 struct tb_tunnel *tunnel;
1894 struct tb_switch *host;
1896 host = alloc_host(test);
1897 nhi = &host->ports[7];
1898 port = &host->ports[1];
1900 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 1, 15, 1);
1901 KUNIT_ASSERT_NOT_NULL(test, tunnel);
1903 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, 15, 1));
1904 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 8, 1, 15, 1));
1905 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
1906 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, -1, -1));
1907 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, -1, -1, -1));
1908 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, 1, -1, -1));
1909 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, -1));
1910 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, 1));
1911 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
1912 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 8, -1, 8, -1));
1914 tb_tunnel_free(tunnel);
1916 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 1, -1, -1);
1917 KUNIT_ASSERT_NOT_NULL(test, tunnel);
1918 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, -1, -1));
1919 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, -1, -1, -1));
1920 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, 1, -1, -1));
1921 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
1922 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 1, 15, 1));
1923 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
1924 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 11, -1, -1));
1926 tb_tunnel_free(tunnel);
1928 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, -1, -1, 15, 11);
1929 KUNIT_ASSERT_NOT_NULL(test, tunnel);
1930 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 11));
1931 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, -1));
1932 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, 11));
1933 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
1934 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
1935 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 10, 11));
1936 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 11, -1, -1));
1938 tb_tunnel_free(tunnel);
1941 static void tb_test_credit_alloc_legacy_not_bonded(struct kunit *test)
1943 struct tb_switch *host, *dev;
1944 struct tb_port *up, *down;
1945 struct tb_tunnel *tunnel;
1946 struct tb_path *path;
1948 host = alloc_host(test);
1949 dev = alloc_dev_default(test, host, 0x1, false);
1951 down = &host->ports[8];
1952 up = &dev->ports[9];
1953 tunnel = tb_tunnel_alloc_pci(NULL, up, down);
1954 KUNIT_ASSERT_NOT_NULL(test, tunnel);
1955 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
1957 path = tunnel->paths[0];
1958 KUNIT_ASSERT_EQ(test, path->path_length, 2);
1959 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
1960 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
1961 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
1962 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 16U);
1964 path = tunnel->paths[1];
1965 KUNIT_ASSERT_EQ(test, path->path_length, 2);
1966 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
1967 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
1968 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
1969 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 16U);
1971 tb_tunnel_free(tunnel);
1974 static void tb_test_credit_alloc_legacy_bonded(struct kunit *test)
1976 struct tb_switch *host, *dev;
1977 struct tb_port *up, *down;
1978 struct tb_tunnel *tunnel;
1979 struct tb_path *path;
1981 host = alloc_host(test);
1982 dev = alloc_dev_default(test, host, 0x1, true);
1984 down = &host->ports[8];
1985 up = &dev->ports[9];
1986 tunnel = tb_tunnel_alloc_pci(NULL, up, down);
1987 KUNIT_ASSERT_NOT_NULL(test, tunnel);
1988 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
1990 path = tunnel->paths[0];
1991 KUNIT_ASSERT_EQ(test, path->path_length, 2);
1992 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
1993 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
1994 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
1995 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
1997 path = tunnel->paths[1];
1998 KUNIT_ASSERT_EQ(test, path->path_length, 2);
1999 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2000 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2001 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2002 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
2004 tb_tunnel_free(tunnel);
2007 static void tb_test_credit_alloc_pcie(struct kunit *test)
2009 struct tb_switch *host, *dev;
2010 struct tb_port *up, *down;
2011 struct tb_tunnel *tunnel;
2012 struct tb_path *path;
2014 host = alloc_host_usb4(test);
2015 dev = alloc_dev_usb4(test, host, 0x1, true);
2017 down = &host->ports[8];
2018 up = &dev->ports[9];
2019 tunnel = tb_tunnel_alloc_pci(NULL, up, down);
2020 KUNIT_ASSERT_NOT_NULL(test, tunnel);
2021 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
2023 path = tunnel->paths[0];
2024 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2025 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2026 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2027 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2028 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
2030 path = tunnel->paths[1];
2031 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2032 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2033 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2034 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2035 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
2037 tb_tunnel_free(tunnel);
2040 static void tb_test_credit_alloc_without_dp(struct kunit *test)
2042 struct tb_switch *host, *dev;
2043 struct tb_port *up, *down;
2044 struct tb_tunnel *tunnel;
2045 struct tb_path *path;
2047 host = alloc_host_usb4(test);
2048 dev = alloc_dev_without_dp(test, host, 0x1, true);
2051 * The device has no DP therefore baMinDPmain = baMinDPaux = 0
2053 * Create PCIe path with buffers less than baMaxPCIe.
2055 * For a device with buffers configurations:
2061 * Remaining Buffers = Total - (CP + DP) = 120 - (2 + 0) = 118
2062 * PCIe Credits = Max(6, Min(baMaxPCIe, Remaining Buffers - baMaxUSB3)
2063 * = Max(6, Min(30, 9) = 9
2065 down = &host->ports[8];
2066 up = &dev->ports[9];
2067 tunnel = tb_tunnel_alloc_pci(NULL, up, down);
2068 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
2069 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
2071 /* PCIe downstream path */
2072 path = tunnel->paths[0];
2073 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2074 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2075 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2076 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2077 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 9U);
2079 /* PCIe upstream path */
2080 path = tunnel->paths[1];
2081 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2082 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2083 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2084 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2085 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
2087 tb_tunnel_free(tunnel);
2090 static void tb_test_credit_alloc_dp(struct kunit *test)
2092 struct tb_switch *host, *dev;
2093 struct tb_port *in, *out;
2094 struct tb_tunnel *tunnel;
2095 struct tb_path *path;
2097 host = alloc_host_usb4(test);
2098 dev = alloc_dev_usb4(test, host, 0x1, true);
2100 in = &host->ports[5];
2101 out = &dev->ports[14];
2103 tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
2104 KUNIT_ASSERT_NOT_NULL(test, tunnel);
2105 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
2107 /* Video (main) path */
2108 path = tunnel->paths[0];
2109 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2110 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 12U);
2111 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2112 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 18U);
2113 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 0U);
2116 path = tunnel->paths[1];
2117 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2118 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2119 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2120 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2121 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2124 path = tunnel->paths[2];
2125 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2126 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2127 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2128 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2129 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2131 tb_tunnel_free(tunnel);
2134 static void tb_test_credit_alloc_usb3(struct kunit *test)
2136 struct tb_switch *host, *dev;
2137 struct tb_port *up, *down;
2138 struct tb_tunnel *tunnel;
2139 struct tb_path *path;
2141 host = alloc_host_usb4(test);
2142 dev = alloc_dev_usb4(test, host, 0x1, true);
2144 down = &host->ports[12];
2145 up = &dev->ports[16];
2146 tunnel = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
2147 KUNIT_ASSERT_NOT_NULL(test, tunnel);
2148 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
2150 path = tunnel->paths[0];
2151 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2152 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2153 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2154 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2155 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2157 path = tunnel->paths[1];
2158 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2159 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2160 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2161 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2162 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
2164 tb_tunnel_free(tunnel);
2167 static void tb_test_credit_alloc_dma(struct kunit *test)
2169 struct tb_switch *host, *dev;
2170 struct tb_port *nhi, *port;
2171 struct tb_tunnel *tunnel;
2172 struct tb_path *path;
2174 host = alloc_host_usb4(test);
2175 dev = alloc_dev_usb4(test, host, 0x1, true);
2177 nhi = &host->ports[7];
2178 port = &dev->ports[3];
2180 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
2181 KUNIT_ASSERT_NOT_NULL(test, tunnel);
2182 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
2185 path = tunnel->paths[0];
2186 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2187 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2188 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2189 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2190 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2193 path = tunnel->paths[1];
2194 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2195 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2196 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2197 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2198 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2200 tb_tunnel_free(tunnel);
2203 static void tb_test_credit_alloc_dma_multiple(struct kunit *test)
2205 struct tb_tunnel *tunnel1, *tunnel2, *tunnel3;
2206 struct tb_switch *host, *dev;
2207 struct tb_port *nhi, *port;
2208 struct tb_path *path;
2210 host = alloc_host_usb4(test);
2211 dev = alloc_dev_usb4(test, host, 0x1, true);
2213 nhi = &host->ports[7];
2214 port = &dev->ports[3];
2217 * Create three DMA tunnels through the same ports. With the
2218 * default buffers we should be able to create two and the last
2221 * For default host we have following buffers for DMA:
2223 * 120 - (2 + 2 * (1 + 0) + 32 + 64 + spare) = 20
2225 * For device we have following:
2227 * 120 - (2 + 2 * (1 + 18) + 14 + 32 + spare) = 34
2229 * spare = 14 + 1 = 15
2231 * So on host the first tunnel gets 14 and the second gets the
2232 * remaining 1 and then we run out of buffers.
2234 tunnel1 = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
2235 KUNIT_ASSERT_NOT_NULL(test, tunnel1);
2236 KUNIT_ASSERT_EQ(test, tunnel1->npaths, (size_t)2);
2238 path = tunnel1->paths[0];
2239 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2240 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2241 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2242 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2243 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2245 path = tunnel1->paths[1];
2246 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2247 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2248 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2249 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2250 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2252 tunnel2 = tb_tunnel_alloc_dma(NULL, nhi, port, 9, 2, 9, 2);
2253 KUNIT_ASSERT_NOT_NULL(test, tunnel2);
2254 KUNIT_ASSERT_EQ(test, tunnel2->npaths, (size_t)2);
2256 path = tunnel2->paths[0];
2257 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2258 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2259 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2260 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2261 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2263 path = tunnel2->paths[1];
2264 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2265 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2266 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2267 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2268 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2270 tunnel3 = tb_tunnel_alloc_dma(NULL, nhi, port, 10, 3, 10, 3);
2271 KUNIT_ASSERT_NULL(test, tunnel3);
2274 * Release the first DMA tunnel. That should make 14 buffers
2275 * available for the next tunnel.
2277 tb_tunnel_free(tunnel1);
2279 tunnel3 = tb_tunnel_alloc_dma(NULL, nhi, port, 10, 3, 10, 3);
2280 KUNIT_ASSERT_NOT_NULL(test, tunnel3);
2282 path = tunnel3->paths[0];
2283 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2284 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2285 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2286 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2287 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2289 path = tunnel3->paths[1];
2290 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2291 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2292 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2293 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2294 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2296 tb_tunnel_free(tunnel3);
2297 tb_tunnel_free(tunnel2);
2300 static struct tb_tunnel *TB_TEST_PCIE_TUNNEL(struct kunit *test,
2301 struct tb_switch *host, struct tb_switch *dev)
2303 struct tb_port *up, *down;
2304 struct tb_tunnel *pcie_tunnel;
2305 struct tb_path *path;
2307 down = &host->ports[8];
2308 up = &dev->ports[9];
2309 pcie_tunnel = tb_tunnel_alloc_pci(NULL, up, down);
2310 KUNIT_ASSERT_NOT_NULL(test, pcie_tunnel);
2311 KUNIT_ASSERT_EQ(test, pcie_tunnel->npaths, (size_t)2);
2313 path = pcie_tunnel->paths[0];
2314 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2315 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2316 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2317 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2318 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
2320 path = pcie_tunnel->paths[1];
2321 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2322 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2323 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2324 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2325 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
2330 static struct tb_tunnel *TB_TEST_DP_TUNNEL1(struct kunit *test,
2331 struct tb_switch *host, struct tb_switch *dev)
2333 struct tb_port *in, *out;
2334 struct tb_tunnel *dp_tunnel1;
2335 struct tb_path *path;
2337 in = &host->ports[5];
2338 out = &dev->ports[13];
2339 dp_tunnel1 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
2340 KUNIT_ASSERT_NOT_NULL(test, dp_tunnel1);
2341 KUNIT_ASSERT_EQ(test, dp_tunnel1->npaths, (size_t)3);
2343 path = dp_tunnel1->paths[0];
2344 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2345 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 12U);
2346 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2347 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 18U);
2348 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 0U);
2350 path = dp_tunnel1->paths[1];
2351 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2352 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2353 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2354 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2355 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2357 path = dp_tunnel1->paths[2];
2358 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2359 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2360 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2361 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2362 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2367 static struct tb_tunnel *TB_TEST_DP_TUNNEL2(struct kunit *test,
2368 struct tb_switch *host, struct tb_switch *dev)
2370 struct tb_port *in, *out;
2371 struct tb_tunnel *dp_tunnel2;
2372 struct tb_path *path;
2374 in = &host->ports[6];
2375 out = &dev->ports[14];
2376 dp_tunnel2 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
2377 KUNIT_ASSERT_NOT_NULL(test, dp_tunnel2);
2378 KUNIT_ASSERT_EQ(test, dp_tunnel2->npaths, (size_t)3);
2380 path = dp_tunnel2->paths[0];
2381 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2382 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 12U);
2383 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2384 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 18U);
2385 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 0U);
2387 path = dp_tunnel2->paths[1];
2388 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2389 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2390 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2391 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2392 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2394 path = dp_tunnel2->paths[2];
2395 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2396 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2397 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2398 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2399 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2404 static struct tb_tunnel *TB_TEST_USB3_TUNNEL(struct kunit *test,
2405 struct tb_switch *host, struct tb_switch *dev)
2407 struct tb_port *up, *down;
2408 struct tb_tunnel *usb3_tunnel;
2409 struct tb_path *path;
2411 down = &host->ports[12];
2412 up = &dev->ports[16];
2413 usb3_tunnel = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
2414 KUNIT_ASSERT_NOT_NULL(test, usb3_tunnel);
2415 KUNIT_ASSERT_EQ(test, usb3_tunnel->npaths, (size_t)2);
2417 path = usb3_tunnel->paths[0];
2418 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2419 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2420 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2421 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2422 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2424 path = usb3_tunnel->paths[1];
2425 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2426 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2427 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2428 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2429 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
2434 static struct tb_tunnel *TB_TEST_DMA_TUNNEL1(struct kunit *test,
2435 struct tb_switch *host, struct tb_switch *dev)
2437 struct tb_port *nhi, *port;
2438 struct tb_tunnel *dma_tunnel1;
2439 struct tb_path *path;
2441 nhi = &host->ports[7];
2442 port = &dev->ports[3];
2443 dma_tunnel1 = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
2444 KUNIT_ASSERT_NOT_NULL(test, dma_tunnel1);
2445 KUNIT_ASSERT_EQ(test, dma_tunnel1->npaths, (size_t)2);
2447 path = dma_tunnel1->paths[0];
2448 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2449 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2450 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2451 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2452 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2454 path = dma_tunnel1->paths[1];
2455 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2456 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2457 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2458 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2459 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2464 static struct tb_tunnel *TB_TEST_DMA_TUNNEL2(struct kunit *test,
2465 struct tb_switch *host, struct tb_switch *dev)
2467 struct tb_port *nhi, *port;
2468 struct tb_tunnel *dma_tunnel2;
2469 struct tb_path *path;
2471 nhi = &host->ports[7];
2472 port = &dev->ports[3];
2473 dma_tunnel2 = tb_tunnel_alloc_dma(NULL, nhi, port, 9, 2, 9, 2);
2474 KUNIT_ASSERT_NOT_NULL(test, dma_tunnel2);
2475 KUNIT_ASSERT_EQ(test, dma_tunnel2->npaths, (size_t)2);
2477 path = dma_tunnel2->paths[0];
2478 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2479 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2480 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2481 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2482 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2484 path = dma_tunnel2->paths[1];
2485 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2486 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2487 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2488 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2489 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2494 static void tb_test_credit_alloc_all(struct kunit *test)
2496 struct tb_tunnel *pcie_tunnel, *dp_tunnel1, *dp_tunnel2, *usb3_tunnel;
2497 struct tb_tunnel *dma_tunnel1, *dma_tunnel2;
2498 struct tb_switch *host, *dev;
2501 * Create PCIe, 2 x DP, USB 3.x and two DMA tunnels from host to
2502 * device. Expectation is that all these can be established with
2503 * the default credit allocation found in Intel hardware.
2506 host = alloc_host_usb4(test);
2507 dev = alloc_dev_usb4(test, host, 0x1, true);
2509 pcie_tunnel = TB_TEST_PCIE_TUNNEL(test, host, dev);
2510 dp_tunnel1 = TB_TEST_DP_TUNNEL1(test, host, dev);
2511 dp_tunnel2 = TB_TEST_DP_TUNNEL2(test, host, dev);
2512 usb3_tunnel = TB_TEST_USB3_TUNNEL(test, host, dev);
2513 dma_tunnel1 = TB_TEST_DMA_TUNNEL1(test, host, dev);
2514 dma_tunnel2 = TB_TEST_DMA_TUNNEL2(test, host, dev);
2516 tb_tunnel_free(dma_tunnel2);
2517 tb_tunnel_free(dma_tunnel1);
2518 tb_tunnel_free(usb3_tunnel);
2519 tb_tunnel_free(dp_tunnel2);
2520 tb_tunnel_free(dp_tunnel1);
2521 tb_tunnel_free(pcie_tunnel);
2524 static const u32 root_directory[] = {
2525 0x55584401, /* "UXD" v1 */
2526 0x00000018, /* Root directory length */
2527 0x76656e64, /* "vend" */
2528 0x6f726964, /* "orid" */
2529 0x76000001, /* "v" R 1 */
2530 0x00000a27, /* Immediate value, ! Vendor ID */
2531 0x76656e64, /* "vend" */
2532 0x6f726964, /* "orid" */
2533 0x74000003, /* "t" R 3 */
2534 0x0000001a, /* Text leaf offset, (“Apple Inc.”) */
2535 0x64657669, /* "devi" */
2536 0x63656964, /* "ceid" */
2537 0x76000001, /* "v" R 1 */
2538 0x0000000a, /* Immediate value, ! Device ID */
2539 0x64657669, /* "devi" */
2540 0x63656964, /* "ceid" */
2541 0x74000003, /* "t" R 3 */
2542 0x0000001d, /* Text leaf offset, (“Macintosh”) */
2543 0x64657669, /* "devi" */
2544 0x63657276, /* "cerv" */
2545 0x76000001, /* "v" R 1 */
2546 0x80000100, /* Immediate value, Device Revision */
2547 0x6e657477, /* "netw" */
2548 0x6f726b00, /* "ork" */
2549 0x44000014, /* "D" R 20 */
2550 0x00000021, /* Directory data offset, (Network Directory) */
2551 0x4170706c, /* "Appl" */
2552 0x6520496e, /* "e In" */
2553 0x632e0000, /* "c." ! */
2554 0x4d616369, /* "Maci" */
2555 0x6e746f73, /* "ntos" */
2556 0x68000000, /* "h" */
2557 0x00000000, /* padding */
2558 0xca8961c6, /* Directory UUID, Network Directory */
2559 0x9541ce1c, /* Directory UUID, Network Directory */
2560 0x5949b8bd, /* Directory UUID, Network Directory */
2561 0x4f5a5f2e, /* Directory UUID, Network Directory */
2562 0x70727463, /* "prtc" */
2563 0x69640000, /* "id" */
2564 0x76000001, /* "v" R 1 */
2565 0x00000001, /* Immediate value, Network Protocol ID */
2566 0x70727463, /* "prtc" */
2567 0x76657273, /* "vers" */
2568 0x76000001, /* "v" R 1 */
2569 0x00000001, /* Immediate value, Network Protocol Version */
2570 0x70727463, /* "prtc" */
2571 0x72657673, /* "revs" */
2572 0x76000001, /* "v" R 1 */
2573 0x00000001, /* Immediate value, Network Protocol Revision */
2574 0x70727463, /* "prtc" */
2575 0x73746e73, /* "stns" */
2576 0x76000001, /* "v" R 1 */
2577 0x00000000, /* Immediate value, Network Protocol Settings */
2580 static const uuid_t network_dir_uuid =
2581 UUID_INIT(0xc66189ca, 0x1cce, 0x4195,
2582 0xbd, 0xb8, 0x49, 0x59, 0x2e, 0x5f, 0x5a, 0x4f);
2584 static void tb_test_property_parse(struct kunit *test)
2586 struct tb_property_dir *dir, *network_dir;
2587 struct tb_property *p;
2589 dir = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory));
2590 KUNIT_ASSERT_NOT_NULL(test, dir);
2592 p = tb_property_find(dir, "foo", TB_PROPERTY_TYPE_TEXT);
2593 KUNIT_ASSERT_NULL(test, p);
2595 p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_TEXT);
2596 KUNIT_ASSERT_NOT_NULL(test, p);
2597 KUNIT_EXPECT_STREQ(test, p->value.text, "Apple Inc.");
2599 p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_VALUE);
2600 KUNIT_ASSERT_NOT_NULL(test, p);
2601 KUNIT_EXPECT_EQ(test, p->value.immediate, 0xa27);
2603 p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
2604 KUNIT_ASSERT_NOT_NULL(test, p);
2605 KUNIT_EXPECT_STREQ(test, p->value.text, "Macintosh");
2607 p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
2608 KUNIT_ASSERT_NOT_NULL(test, p);
2609 KUNIT_EXPECT_EQ(test, p->value.immediate, 0xa);
2611 p = tb_property_find(dir, "missing", TB_PROPERTY_TYPE_DIRECTORY);
2612 KUNIT_ASSERT_NULL(test, p);
2614 p = tb_property_find(dir, "network", TB_PROPERTY_TYPE_DIRECTORY);
2615 KUNIT_ASSERT_NOT_NULL(test, p);
2617 network_dir = p->value.dir;
2618 KUNIT_EXPECT_TRUE(test, uuid_equal(network_dir->uuid, &network_dir_uuid));
2620 p = tb_property_find(network_dir, "prtcid", TB_PROPERTY_TYPE_VALUE);
2621 KUNIT_ASSERT_NOT_NULL(test, p);
2622 KUNIT_EXPECT_EQ(test, p->value.immediate, 0x1);
2624 p = tb_property_find(network_dir, "prtcvers", TB_PROPERTY_TYPE_VALUE);
2625 KUNIT_ASSERT_NOT_NULL(test, p);
2626 KUNIT_EXPECT_EQ(test, p->value.immediate, 0x1);
2628 p = tb_property_find(network_dir, "prtcrevs", TB_PROPERTY_TYPE_VALUE);
2629 KUNIT_ASSERT_NOT_NULL(test, p);
2630 KUNIT_EXPECT_EQ(test, p->value.immediate, 0x1);
2632 p = tb_property_find(network_dir, "prtcstns", TB_PROPERTY_TYPE_VALUE);
2633 KUNIT_ASSERT_NOT_NULL(test, p);
2634 KUNIT_EXPECT_EQ(test, p->value.immediate, 0x0);
2636 p = tb_property_find(network_dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
2637 KUNIT_EXPECT_TRUE(test, !p);
2638 p = tb_property_find(network_dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
2639 KUNIT_EXPECT_TRUE(test, !p);
2641 tb_property_free_dir(dir);
2644 static void tb_test_property_format(struct kunit *test)
2646 struct tb_property_dir *dir;
2651 dir = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory));
2652 KUNIT_ASSERT_NOT_NULL(test, dir);
2654 ret = tb_property_format_dir(dir, NULL, 0);
2655 KUNIT_ASSERT_EQ(test, ret, ARRAY_SIZE(root_directory));
2659 block = kunit_kzalloc(test, block_len * sizeof(u32), GFP_KERNEL);
2660 KUNIT_ASSERT_NOT_NULL(test, block);
2662 ret = tb_property_format_dir(dir, block, block_len);
2663 KUNIT_EXPECT_EQ(test, ret, 0);
2665 for (i = 0; i < ARRAY_SIZE(root_directory); i++)
2666 KUNIT_EXPECT_EQ(test, root_directory[i], block[i]);
2668 tb_property_free_dir(dir);
2671 static void compare_dirs(struct kunit *test, struct tb_property_dir *d1,
2672 struct tb_property_dir *d2)
2674 struct tb_property *p1, *p2, *tmp;
2678 KUNIT_ASSERT_NOT_NULL(test, d2->uuid);
2679 KUNIT_ASSERT_TRUE(test, uuid_equal(d1->uuid, d2->uuid));
2681 KUNIT_ASSERT_NULL(test, d2->uuid);
2685 tb_property_for_each(d1, tmp)
2687 KUNIT_ASSERT_NE(test, n1, 0);
2690 tb_property_for_each(d2, tmp)
2692 KUNIT_ASSERT_NE(test, n2, 0);
2694 KUNIT_ASSERT_EQ(test, n1, n2);
2698 for (i = 0; i < n1; i++) {
2699 p1 = tb_property_get_next(d1, p1);
2700 KUNIT_ASSERT_NOT_NULL(test, p1);
2701 p2 = tb_property_get_next(d2, p2);
2702 KUNIT_ASSERT_NOT_NULL(test, p2);
2704 KUNIT_ASSERT_STREQ(test, &p1->key[0], &p2->key[0]);
2705 KUNIT_ASSERT_EQ(test, p1->type, p2->type);
2706 KUNIT_ASSERT_EQ(test, p1->length, p2->length);
2709 case TB_PROPERTY_TYPE_DIRECTORY:
2710 KUNIT_ASSERT_NOT_NULL(test, p1->value.dir);
2711 KUNIT_ASSERT_NOT_NULL(test, p2->value.dir);
2712 compare_dirs(test, p1->value.dir, p2->value.dir);
2715 case TB_PROPERTY_TYPE_DATA:
2716 KUNIT_ASSERT_NOT_NULL(test, p1->value.data);
2717 KUNIT_ASSERT_NOT_NULL(test, p2->value.data);
2718 KUNIT_ASSERT_TRUE(test,
2719 !memcmp(p1->value.data, p2->value.data,
2724 case TB_PROPERTY_TYPE_TEXT:
2725 KUNIT_ASSERT_NOT_NULL(test, p1->value.text);
2726 KUNIT_ASSERT_NOT_NULL(test, p2->value.text);
2727 KUNIT_ASSERT_STREQ(test, p1->value.text, p2->value.text);
2730 case TB_PROPERTY_TYPE_VALUE:
2731 KUNIT_ASSERT_EQ(test, p1->value.immediate,
2732 p2->value.immediate);
2735 KUNIT_FAIL(test, "unexpected property type");
2741 static void tb_test_property_copy(struct kunit *test)
2743 struct tb_property_dir *src, *dst;
2747 src = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory));
2748 KUNIT_ASSERT_NOT_NULL(test, src);
2750 dst = tb_property_copy_dir(src);
2751 KUNIT_ASSERT_NOT_NULL(test, dst);
2753 /* Compare the structures */
2754 compare_dirs(test, src, dst);
2756 /* Compare the resulting property block */
2757 ret = tb_property_format_dir(dst, NULL, 0);
2758 KUNIT_ASSERT_EQ(test, ret, ARRAY_SIZE(root_directory));
2760 block = kunit_kzalloc(test, sizeof(root_directory), GFP_KERNEL);
2761 KUNIT_ASSERT_NOT_NULL(test, block);
2763 ret = tb_property_format_dir(dst, block, ARRAY_SIZE(root_directory));
2764 KUNIT_EXPECT_TRUE(test, !ret);
2766 for (i = 0; i < ARRAY_SIZE(root_directory); i++)
2767 KUNIT_EXPECT_EQ(test, root_directory[i], block[i]);
2769 tb_property_free_dir(dst);
2770 tb_property_free_dir(src);
2773 static struct kunit_case tb_test_cases[] = {
2774 KUNIT_CASE(tb_test_path_basic),
2775 KUNIT_CASE(tb_test_path_not_connected_walk),
2776 KUNIT_CASE(tb_test_path_single_hop_walk),
2777 KUNIT_CASE(tb_test_path_daisy_chain_walk),
2778 KUNIT_CASE(tb_test_path_simple_tree_walk),
2779 KUNIT_CASE(tb_test_path_complex_tree_walk),
2780 KUNIT_CASE(tb_test_path_max_length_walk),
2781 KUNIT_CASE(tb_test_path_not_connected),
2782 KUNIT_CASE(tb_test_path_not_bonded_lane0),
2783 KUNIT_CASE(tb_test_path_not_bonded_lane1),
2784 KUNIT_CASE(tb_test_path_not_bonded_lane1_chain),
2785 KUNIT_CASE(tb_test_path_not_bonded_lane1_chain_reverse),
2786 KUNIT_CASE(tb_test_path_mixed_chain),
2787 KUNIT_CASE(tb_test_path_mixed_chain_reverse),
2788 KUNIT_CASE(tb_test_tunnel_pcie),
2789 KUNIT_CASE(tb_test_tunnel_dp),
2790 KUNIT_CASE(tb_test_tunnel_dp_chain),
2791 KUNIT_CASE(tb_test_tunnel_dp_tree),
2792 KUNIT_CASE(tb_test_tunnel_dp_max_length),
2793 KUNIT_CASE(tb_test_tunnel_port_on_path),
2794 KUNIT_CASE(tb_test_tunnel_usb3),
2795 KUNIT_CASE(tb_test_tunnel_dma),
2796 KUNIT_CASE(tb_test_tunnel_dma_rx),
2797 KUNIT_CASE(tb_test_tunnel_dma_tx),
2798 KUNIT_CASE(tb_test_tunnel_dma_chain),
2799 KUNIT_CASE(tb_test_tunnel_dma_match),
2800 KUNIT_CASE(tb_test_credit_alloc_legacy_not_bonded),
2801 KUNIT_CASE(tb_test_credit_alloc_legacy_bonded),
2802 KUNIT_CASE(tb_test_credit_alloc_pcie),
2803 KUNIT_CASE(tb_test_credit_alloc_without_dp),
2804 KUNIT_CASE(tb_test_credit_alloc_dp),
2805 KUNIT_CASE(tb_test_credit_alloc_usb3),
2806 KUNIT_CASE(tb_test_credit_alloc_dma),
2807 KUNIT_CASE(tb_test_credit_alloc_dma_multiple),
2808 KUNIT_CASE(tb_test_credit_alloc_all),
2809 KUNIT_CASE(tb_test_property_parse),
2810 KUNIT_CASE(tb_test_property_format),
2811 KUNIT_CASE(tb_test_property_copy),
2815 static struct kunit_suite tb_test_suite = {
2816 .name = "thunderbolt",
2817 .test_cases = tb_test_cases,
2820 kunit_test_suite(tb_test_suite);