1 // SPDX-License-Identifier: GPL-2.0
3 * Thunderbolt driver - switch/port utility functions
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2018, Intel Corporation
9 #include <linux/delay.h>
10 #include <linux/idr.h>
11 #include <linux/nvmem-provider.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/sched/signal.h>
14 #include <linux/sizes.h>
15 #include <linux/slab.h>
16 #include <linux/module.h>
20 /* Switch NVM support */
24 struct nvm_auth_status {
25 struct list_head list;
30 static bool clx_enabled = true;
31 module_param_named(clx, clx_enabled, bool, 0444);
32 MODULE_PARM_DESC(clx, "allow low power states on the high-speed lanes (default: true)");
35 * Hold NVM authentication failure status per switch This information
36 * needs to stay around even when the switch gets power cycled so we
39 static LIST_HEAD(nvm_auth_status_cache);
40 static DEFINE_MUTEX(nvm_auth_status_lock);
42 static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)
44 struct nvm_auth_status *st;
46 list_for_each_entry(st, &nvm_auth_status_cache, list) {
47 if (uuid_equal(&st->uuid, sw->uuid))
54 static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status)
56 struct nvm_auth_status *st;
58 mutex_lock(&nvm_auth_status_lock);
59 st = __nvm_get_auth_status(sw);
60 mutex_unlock(&nvm_auth_status_lock);
62 *status = st ? st->status : 0;
65 static void nvm_set_auth_status(const struct tb_switch *sw, u32 status)
67 struct nvm_auth_status *st;
69 if (WARN_ON(!sw->uuid))
72 mutex_lock(&nvm_auth_status_lock);
73 st = __nvm_get_auth_status(sw);
76 st = kzalloc(sizeof(*st), GFP_KERNEL);
80 memcpy(&st->uuid, sw->uuid, sizeof(st->uuid));
81 INIT_LIST_HEAD(&st->list);
82 list_add_tail(&st->list, &nvm_auth_status_cache);
87 mutex_unlock(&nvm_auth_status_lock);
90 static void nvm_clear_auth_status(const struct tb_switch *sw)
92 struct nvm_auth_status *st;
94 mutex_lock(&nvm_auth_status_lock);
95 st = __nvm_get_auth_status(sw);
100 mutex_unlock(&nvm_auth_status_lock);
103 static int nvm_validate_and_write(struct tb_switch *sw)
105 unsigned int image_size, hdr_size;
106 const u8 *buf = sw->nvm->buf;
113 image_size = sw->nvm->buf_data_size;
114 if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
118 * FARB pointer must point inside the image and must at least
119 * contain parts of the digital section we will be reading here.
121 hdr_size = (*(u32 *)buf) & 0xffffff;
122 if (hdr_size + NVM_DEVID + 2 >= image_size)
125 /* Digital section start should be aligned to 4k page */
126 if (!IS_ALIGNED(hdr_size, SZ_4K))
130 * Read digital section size and check that it also fits inside
133 ds_size = *(u16 *)(buf + hdr_size);
134 if (ds_size >= image_size)
137 if (!sw->safe_mode) {
141 * Make sure the device ID in the image matches the one
142 * we read from the switch config space.
144 device_id = *(u16 *)(buf + hdr_size + NVM_DEVID);
145 if (device_id != sw->config.device_id)
148 if (sw->generation < 3) {
149 /* Write CSS headers first */
150 ret = dma_port_flash_write(sw->dma_port,
151 DMA_PORT_CSS_ADDRESS, buf + NVM_CSS,
152 DMA_PORT_CSS_MAX_SIZE);
157 /* Skip headers in the image */
159 image_size -= hdr_size;
162 if (tb_switch_is_usb4(sw))
163 ret = usb4_switch_nvm_write(sw, 0, buf, image_size);
165 ret = dma_port_flash_write(sw->dma_port, 0, buf, image_size);
167 sw->nvm->flushed = true;
171 static int nvm_authenticate_host_dma_port(struct tb_switch *sw)
176 * Root switch NVM upgrade requires that we disconnect the
177 * existing paths first (in case it is not in safe mode
180 if (!sw->safe_mode) {
183 ret = tb_domain_disconnect_all_paths(sw->tb);
187 * The host controller goes away pretty soon after this if
188 * everything goes well so getting timeout is expected.
190 ret = dma_port_flash_update_auth(sw->dma_port);
191 if (!ret || ret == -ETIMEDOUT)
195 * Any error from update auth operation requires power
196 * cycling of the host router.
198 tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n");
199 if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0)
200 nvm_set_auth_status(sw, status);
204 * From safe mode we can get out by just power cycling the
207 dma_port_power_cycle(sw->dma_port);
211 static int nvm_authenticate_device_dma_port(struct tb_switch *sw)
213 int ret, retries = 10;
215 ret = dma_port_flash_update_auth(sw->dma_port);
221 /* Power cycle is required */
228 * Poll here for the authentication status. It takes some time
229 * for the device to respond (we get timeout for a while). Once
230 * we get response the device needs to be power cycled in order
231 * to the new NVM to be taken into use.
236 ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
237 if (ret < 0 && ret != -ETIMEDOUT)
241 tb_sw_warn(sw, "failed to authenticate NVM\n");
242 nvm_set_auth_status(sw, status);
245 tb_sw_info(sw, "power cycling the switch now\n");
246 dma_port_power_cycle(sw->dma_port);
256 static void nvm_authenticate_start_dma_port(struct tb_switch *sw)
258 struct pci_dev *root_port;
261 * During host router NVM upgrade we should not allow root port to
262 * go into D3cold because some root ports cannot trigger PME
263 * itself. To be on the safe side keep the root port in D0 during
264 * the whole upgrade process.
266 root_port = pcie_find_root_port(sw->tb->nhi->pdev);
268 pm_runtime_get_noresume(&root_port->dev);
271 static void nvm_authenticate_complete_dma_port(struct tb_switch *sw)
273 struct pci_dev *root_port;
275 root_port = pcie_find_root_port(sw->tb->nhi->pdev);
277 pm_runtime_put(&root_port->dev);
280 static inline bool nvm_readable(struct tb_switch *sw)
282 if (tb_switch_is_usb4(sw)) {
284 * USB4 devices must support NVM operations but it is
285 * optional for hosts. Therefore we query the NVM sector
286 * size here and if it is supported assume NVM
287 * operations are implemented.
289 return usb4_switch_nvm_sector_size(sw) > 0;
292 /* Thunderbolt 2 and 3 devices support NVM through DMA port */
293 return !!sw->dma_port;
296 static inline bool nvm_upgradeable(struct tb_switch *sw)
298 if (sw->no_nvm_upgrade)
300 return nvm_readable(sw);
303 static inline int nvm_read(struct tb_switch *sw, unsigned int address,
304 void *buf, size_t size)
306 if (tb_switch_is_usb4(sw))
307 return usb4_switch_nvm_read(sw, address, buf, size);
308 return dma_port_flash_read(sw->dma_port, address, buf, size);
311 static int nvm_authenticate(struct tb_switch *sw, bool auth_only)
315 if (tb_switch_is_usb4(sw)) {
317 ret = usb4_switch_nvm_set_offset(sw, 0);
321 sw->nvm->authenticating = true;
322 return usb4_switch_nvm_authenticate(sw);
323 } else if (auth_only) {
327 sw->nvm->authenticating = true;
329 nvm_authenticate_start_dma_port(sw);
330 ret = nvm_authenticate_host_dma_port(sw);
332 ret = nvm_authenticate_device_dma_port(sw);
338 static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
341 struct tb_nvm *nvm = priv;
342 struct tb_switch *sw = tb_to_switch(nvm->dev);
345 pm_runtime_get_sync(&sw->dev);
347 if (!mutex_trylock(&sw->tb->lock)) {
348 ret = restart_syscall();
352 ret = nvm_read(sw, offset, val, bytes);
353 mutex_unlock(&sw->tb->lock);
356 pm_runtime_mark_last_busy(&sw->dev);
357 pm_runtime_put_autosuspend(&sw->dev);
362 static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
365 struct tb_nvm *nvm = priv;
366 struct tb_switch *sw = tb_to_switch(nvm->dev);
369 if (!mutex_trylock(&sw->tb->lock))
370 return restart_syscall();
373 * Since writing the NVM image might require some special steps,
374 * for example when CSS headers are written, we cache the image
375 * locally here and handle the special cases when the user asks
376 * us to authenticate the image.
378 ret = tb_nvm_write_buf(nvm, offset, val, bytes);
379 mutex_unlock(&sw->tb->lock);
384 static int tb_switch_nvm_add(struct tb_switch *sw)
390 if (!nvm_readable(sw))
394 * The NVM format of non-Intel hardware is not known so
395 * currently restrict NVM upgrade for Intel hardware. We may
396 * relax this in the future when we learn other NVM formats.
398 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL &&
399 sw->config.vendor_id != 0x8087) {
401 "NVM format of vendor %#x is not known, disabling NVM upgrade\n",
402 sw->config.vendor_id);
406 nvm = tb_nvm_alloc(&sw->dev);
411 * If the switch is in safe-mode the only accessible portion of
412 * the NVM is the non-active one where userspace is expected to
413 * write new functional NVM.
415 if (!sw->safe_mode) {
416 u32 nvm_size, hdr_size;
418 ret = nvm_read(sw, NVM_FLASH_SIZE, &val, sizeof(val));
422 hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K;
423 nvm_size = (SZ_1M << (val & 7)) / 8;
424 nvm_size = (nvm_size - hdr_size) / 2;
426 ret = nvm_read(sw, NVM_VERSION, &val, sizeof(val));
430 nvm->major = val >> 16;
431 nvm->minor = val >> 8;
433 ret = tb_nvm_add_active(nvm, nvm_size, tb_switch_nvm_read);
438 if (!sw->no_nvm_upgrade) {
439 ret = tb_nvm_add_non_active(nvm, NVM_MAX_SIZE,
440 tb_switch_nvm_write);
453 static void tb_switch_nvm_remove(struct tb_switch *sw)
463 /* Remove authentication status in case the switch is unplugged */
464 if (!nvm->authenticating)
465 nvm_clear_auth_status(sw);
470 /* port utility functions */
472 static const char *tb_port_type(const struct tb_regs_port_header *port)
474 switch (port->type >> 16) {
476 switch ((u8) port->type) {
501 static void tb_dump_port(struct tb *tb, const struct tb_port *port)
503 const struct tb_regs_port_header *regs = &port->config;
506 " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
507 regs->port_number, regs->vendor_id, regs->device_id,
508 regs->revision, regs->thunderbolt_version, tb_port_type(regs),
510 tb_dbg(tb, " Max hop id (in/out): %d/%d\n",
511 regs->max_in_hop_id, regs->max_out_hop_id);
512 tb_dbg(tb, " Max counters: %d\n", regs->max_counters);
513 tb_dbg(tb, " NFC Credits: %#x\n", regs->nfc_credits);
514 tb_dbg(tb, " Credits (total/control): %u/%u\n", port->total_credits,
519 * tb_port_state() - get connectedness state of a port
520 * @port: the port to check
522 * The port must have a TB_CAP_PHY (i.e. it should be a real port).
524 * Return: Returns an enum tb_port_state on success or an error code on failure.
526 int tb_port_state(struct tb_port *port)
528 struct tb_cap_phy phy;
530 if (port->cap_phy == 0) {
531 tb_port_WARN(port, "does not have a PHY\n");
534 res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2);
541 * tb_wait_for_port() - wait for a port to become ready
542 * @port: Port to wait
543 * @wait_if_unplugged: Wait also when port is unplugged
545 * Wait up to 1 second for a port to reach state TB_PORT_UP. If
546 * wait_if_unplugged is set then we also wait if the port is in state
547 * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after
548 * switch resume). Otherwise we only wait if a device is registered but the link
549 * has not yet been established.
551 * Return: Returns an error code on failure. Returns 0 if the port is not
552 * connected or failed to reach state TB_PORT_UP within one second. Returns 1
553 * if the port is connected and in state TB_PORT_UP.
555 int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
559 if (!port->cap_phy) {
560 tb_port_WARN(port, "does not have PHY\n");
563 if (tb_is_upstream_port(port)) {
564 tb_port_WARN(port, "is the upstream port\n");
569 state = tb_port_state(port);
572 if (state == TB_PORT_DISABLED) {
573 tb_port_dbg(port, "is disabled (state: 0)\n");
576 if (state == TB_PORT_UNPLUGGED) {
577 if (wait_if_unplugged) {
578 /* used during resume */
580 "is unplugged (state: 7), retrying...\n");
584 tb_port_dbg(port, "is unplugged (state: 7)\n");
587 if (state == TB_PORT_UP) {
588 tb_port_dbg(port, "is connected, link is up (state: 2)\n");
593 * After plug-in the state is TB_PORT_CONNECTING. Give it some
597 "is connected, link is not up (state: %d), retrying...\n",
602 "failed to reach state TB_PORT_UP. Ignoring port...\n");
607 * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port
608 * @port: Port to add/remove NFC credits
609 * @credits: Credits to add/remove
611 * Change the number of NFC credits allocated to @port by @credits. To remove
612 * NFC credits pass a negative amount of credits.
614 * Return: Returns 0 on success or an error code on failure.
616 int tb_port_add_nfc_credits(struct tb_port *port, int credits)
620 if (credits == 0 || port->sw->is_unplugged)
624 * USB4 restricts programming NFC buffers to lane adapters only
625 * so skip other ports.
627 if (tb_switch_is_usb4(port->sw) && !tb_port_is_null(port))
630 nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
632 credits = max_t(int, -nfc_credits, credits);
634 nfc_credits += credits;
636 tb_port_dbg(port, "adding %d NFC credits to %lu", credits,
637 port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK);
639 port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK;
640 port->config.nfc_credits |= nfc_credits;
642 return tb_port_write(port, &port->config.nfc_credits,
643 TB_CFG_PORT, ADP_CS_4, 1);
647 * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
648 * @port: Port whose counters to clear
649 * @counter: Counter index to clear
651 * Return: Returns 0 on success or an error code on failure.
653 int tb_port_clear_counter(struct tb_port *port, int counter)
655 u32 zero[3] = { 0, 0, 0 };
656 tb_port_dbg(port, "clearing counter %d\n", counter);
657 return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3);
661 * tb_port_unlock() - Unlock downstream port
662 * @port: Port to unlock
664 * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the
665 * downstream router accessible for CM.
667 int tb_port_unlock(struct tb_port *port)
669 if (tb_switch_is_icm(port->sw))
671 if (!tb_port_is_null(port))
673 if (tb_switch_is_usb4(port->sw))
674 return usb4_port_unlock(port);
678 static int __tb_port_enable(struct tb_port *port, bool enable)
683 if (!tb_port_is_null(port))
686 ret = tb_port_read(port, &phy, TB_CFG_PORT,
687 port->cap_phy + LANE_ADP_CS_1, 1);
692 phy &= ~LANE_ADP_CS_1_LD;
694 phy |= LANE_ADP_CS_1_LD;
697 ret = tb_port_write(port, &phy, TB_CFG_PORT,
698 port->cap_phy + LANE_ADP_CS_1, 1);
702 tb_port_dbg(port, "lane %sabled\n", enable ? "en" : "dis");
707 * tb_port_enable() - Enable lane adapter
708 * @port: Port to enable (can be %NULL)
710 * This is used for lane 0 and 1 adapters to enable it.
712 int tb_port_enable(struct tb_port *port)
714 return __tb_port_enable(port, true);
718 * tb_port_disable() - Disable lane adapter
719 * @port: Port to disable (can be %NULL)
721 * This is used for lane 0 and 1 adapters to disable it.
723 int tb_port_disable(struct tb_port *port)
725 return __tb_port_enable(port, false);
729 * tb_init_port() - initialize a port
731 * This is a helper method for tb_switch_alloc. Does not check or initialize
732 * any downstream switches.
734 * Return: Returns 0 on success or an error code on failure.
736 static int tb_init_port(struct tb_port *port)
741 INIT_LIST_HEAD(&port->list);
743 /* Control adapter does not have configuration space */
747 res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8);
749 if (res == -ENODEV) {
750 tb_dbg(port->sw->tb, " Port %d: not implemented\n",
752 port->disabled = true;
758 /* Port 0 is the switch itself and has no PHY. */
759 if (port->config.type == TB_TYPE_PORT) {
760 cap = tb_port_find_cap(port, TB_PORT_CAP_PHY);
765 tb_port_WARN(port, "non switch port without a PHY\n");
767 cap = tb_port_find_cap(port, TB_PORT_CAP_USB4);
769 port->cap_usb4 = cap;
772 * USB4 ports the buffers allocated for the control path
773 * can be read from the path config space. Legacy
774 * devices we use hard-coded value.
776 if (tb_switch_is_usb4(port->sw)) {
777 struct tb_regs_hop hop;
779 if (!tb_port_read(port, &hop, TB_CFG_HOPS, 0, 2))
780 port->ctl_credits = hop.initial_credits;
782 if (!port->ctl_credits)
783 port->ctl_credits = 2;
786 cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP);
788 port->cap_adap = cap;
791 port->total_credits =
792 (port->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
793 ADP_CS_4_TOTAL_BUFFERS_SHIFT;
795 tb_dump_port(port->sw->tb, port);
799 static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid,
806 port_max_hopid = port->config.max_in_hop_id;
807 ida = &port->in_hopids;
809 port_max_hopid = port->config.max_out_hop_id;
810 ida = &port->out_hopids;
814 * NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are
817 if (!tb_port_is_nhi(port) && min_hopid < TB_PATH_MIN_HOPID)
818 min_hopid = TB_PATH_MIN_HOPID;
820 if (max_hopid < 0 || max_hopid > port_max_hopid)
821 max_hopid = port_max_hopid;
823 return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL);
827 * tb_port_alloc_in_hopid() - Allocate input HopID from port
828 * @port: Port to allocate HopID for
829 * @min_hopid: Minimum acceptable input HopID
830 * @max_hopid: Maximum acceptable input HopID
832 * Return: HopID between @min_hopid and @max_hopid or negative errno in
835 int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid)
837 return tb_port_alloc_hopid(port, true, min_hopid, max_hopid);
841 * tb_port_alloc_out_hopid() - Allocate output HopID from port
842 * @port: Port to allocate HopID for
843 * @min_hopid: Minimum acceptable output HopID
844 * @max_hopid: Maximum acceptable output HopID
846 * Return: HopID between @min_hopid and @max_hopid or negative errno in
849 int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid)
851 return tb_port_alloc_hopid(port, false, min_hopid, max_hopid);
855 * tb_port_release_in_hopid() - Release allocated input HopID from port
856 * @port: Port whose HopID to release
857 * @hopid: HopID to release
859 void tb_port_release_in_hopid(struct tb_port *port, int hopid)
861 ida_simple_remove(&port->in_hopids, hopid);
865 * tb_port_release_out_hopid() - Release allocated output HopID from port
866 * @port: Port whose HopID to release
867 * @hopid: HopID to release
869 void tb_port_release_out_hopid(struct tb_port *port, int hopid)
871 ida_simple_remove(&port->out_hopids, hopid);
874 static inline bool tb_switch_is_reachable(const struct tb_switch *parent,
875 const struct tb_switch *sw)
877 u64 mask = (1ULL << parent->config.depth * 8) - 1;
878 return (tb_route(parent) & mask) == (tb_route(sw) & mask);
882 * tb_next_port_on_path() - Return next port for given port on a path
883 * @start: Start port of the walk
884 * @end: End port of the walk
885 * @prev: Previous port (%NULL if this is the first)
887 * This function can be used to walk from one port to another if they
888 * are connected through zero or more switches. If the @prev is dual
889 * link port, the function follows that link and returns another end on
892 * If the @end port has been reached, return %NULL.
894 * Domain tb->lock must be held when this function is called.
896 struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
897 struct tb_port *prev)
899 struct tb_port *next;
904 if (prev->sw == end->sw) {
910 if (tb_switch_is_reachable(prev->sw, end->sw)) {
911 next = tb_port_at(tb_route(end->sw), prev->sw);
912 /* Walk down the topology if next == prev */
914 (next == prev || next->dual_link_port == prev))
917 if (tb_is_upstream_port(prev)) {
920 next = tb_upstream_port(prev->sw);
922 * Keep the same link if prev and next are both
925 if (next->dual_link_port &&
926 next->link_nr != prev->link_nr) {
927 next = next->dual_link_port;
932 return next != prev ? next : NULL;
936 * tb_port_get_link_speed() - Get current link speed
937 * @port: Port to check (USB4 or CIO)
939 * Returns link speed in Gb/s or negative errno in case of failure.
941 int tb_port_get_link_speed(struct tb_port *port)
949 ret = tb_port_read(port, &val, TB_CFG_PORT,
950 port->cap_phy + LANE_ADP_CS_1, 1);
954 speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >>
955 LANE_ADP_CS_1_CURRENT_SPEED_SHIFT;
956 return speed == LANE_ADP_CS_1_CURRENT_SPEED_GEN3 ? 20 : 10;
960 * tb_port_get_link_width() - Get current link width
961 * @port: Port to check (USB4 or CIO)
963 * Returns link width. Return values can be 1 (Single-Lane), 2 (Dual-Lane)
964 * or negative errno in case of failure.
966 int tb_port_get_link_width(struct tb_port *port)
974 ret = tb_port_read(port, &val, TB_CFG_PORT,
975 port->cap_phy + LANE_ADP_CS_1, 1);
979 return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >>
980 LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT;
983 static bool tb_port_is_width_supported(struct tb_port *port, int width)
991 ret = tb_port_read(port, &phy, TB_CFG_PORT,
992 port->cap_phy + LANE_ADP_CS_0, 1);
996 widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >>
997 LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT;
999 return !!(widths & width);
1003 * tb_port_set_link_width() - Set target link width of the lane adapter
1004 * @port: Lane adapter
1005 * @width: Target link width (%1 or %2)
1007 * Sets the target link width of the lane adapter to @width. Does not
1008 * enable/disable lane bonding. For that call tb_port_set_lane_bonding().
1010 * Return: %0 in case of success and negative errno in case of error
1012 int tb_port_set_link_width(struct tb_port *port, unsigned int width)
1020 ret = tb_port_read(port, &val, TB_CFG_PORT,
1021 port->cap_phy + LANE_ADP_CS_1, 1);
1025 val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK;
1028 val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE <<
1029 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
1032 val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL <<
1033 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
1039 return tb_port_write(port, &val, TB_CFG_PORT,
1040 port->cap_phy + LANE_ADP_CS_1, 1);
1044 * tb_port_set_lane_bonding() - Enable/disable lane bonding
1045 * @port: Lane adapter
1046 * @bonding: enable/disable bonding
1048 * Enables or disables lane bonding. This should be called after target
1049 * link width has been set (tb_port_set_link_width()). Note in most
1050 * cases one should use tb_port_lane_bonding_enable() instead to enable
1053 * As a side effect sets @port->bonding accordingly (and does the same
1056 * Return: %0 in case of success and negative errno in case of error
1058 int tb_port_set_lane_bonding(struct tb_port *port, bool bonding)
1066 ret = tb_port_read(port, &val, TB_CFG_PORT,
1067 port->cap_phy + LANE_ADP_CS_1, 1);
1072 val |= LANE_ADP_CS_1_LB;
1074 val &= ~LANE_ADP_CS_1_LB;
1076 ret = tb_port_write(port, &val, TB_CFG_PORT,
1077 port->cap_phy + LANE_ADP_CS_1, 1);
1082 * When lane 0 bonding is set it will affect lane 1 too so
1085 port->bonded = bonding;
1086 port->dual_link_port->bonded = bonding;
1092 * tb_port_lane_bonding_enable() - Enable bonding on port
1093 * @port: port to enable
1095 * Enable bonding by setting the link width of the port and the other
1096 * port in case of dual link port. Does not wait for the link to
1097 * actually reach the bonded state so caller needs to call
1098 * tb_port_wait_for_link_width() before enabling any paths through the
1099 * link to make sure the link is in expected state.
1101 * Return: %0 in case of success and negative errno in case of error
1103 int tb_port_lane_bonding_enable(struct tb_port *port)
1108 * Enable lane bonding for both links if not already enabled by
1109 * for example the boot firmware.
1111 ret = tb_port_get_link_width(port);
1113 ret = tb_port_set_link_width(port, 2);
1118 ret = tb_port_get_link_width(port->dual_link_port);
1120 ret = tb_port_set_link_width(port->dual_link_port, 2);
1125 ret = tb_port_set_lane_bonding(port, true);
1132 tb_port_set_link_width(port->dual_link_port, 1);
1134 tb_port_set_link_width(port, 1);
1139 * tb_port_lane_bonding_disable() - Disable bonding on port
1140 * @port: port to disable
1142 * Disable bonding by setting the link width of the port and the
1143 * other port in case of dual link port.
1145 void tb_port_lane_bonding_disable(struct tb_port *port)
1147 tb_port_set_lane_bonding(port, false);
1148 tb_port_set_link_width(port->dual_link_port, 1);
1149 tb_port_set_link_width(port, 1);
1153 * tb_port_wait_for_link_width() - Wait until link reaches specific width
1154 * @port: Port to wait for
1155 * @width: Expected link width (%1 or %2)
1156 * @timeout_msec: Timeout in ms how long to wait
1158 * Should be used after both ends of the link have been bonded (or
1159 * bonding has been disabled) to wait until the link actually reaches
1160 * the expected state. Returns %-ETIMEDOUT if the @width was not reached
1161 * within the given timeout, %0 if it did.
1163 int tb_port_wait_for_link_width(struct tb_port *port, int width,
1166 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
1170 ret = tb_port_get_link_width(port);
1173 * Sometimes we get port locked error when
1174 * polling the lanes so we can ignore it and
1179 } else if (ret == width) {
1183 usleep_range(1000, 2000);
1184 } while (ktime_before(ktime_get(), timeout));
1189 static int tb_port_do_update_credits(struct tb_port *port)
1194 ret = tb_port_read(port, &nfc_credits, TB_CFG_PORT, ADP_CS_4, 1);
1198 if (nfc_credits != port->config.nfc_credits) {
1201 total = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
1202 ADP_CS_4_TOTAL_BUFFERS_SHIFT;
1204 tb_port_dbg(port, "total credits changed %u -> %u\n",
1205 port->total_credits, total);
1207 port->config.nfc_credits = nfc_credits;
1208 port->total_credits = total;
1215 * tb_port_update_credits() - Re-read port total credits
1216 * @port: Port to update
1218 * After the link is bonded (or bonding was disabled) the port total
1219 * credits may change, so this function needs to be called to re-read
1220 * the credits. Updates also the second lane adapter.
1222 int tb_port_update_credits(struct tb_port *port)
1226 ret = tb_port_do_update_credits(port);
1229 return tb_port_do_update_credits(port->dual_link_port);
1232 static int __tb_port_pm_secondary_set(struct tb_port *port, bool secondary)
1237 ret = tb_port_read(port, &phy, TB_CFG_PORT,
1238 port->cap_phy + LANE_ADP_CS_1, 1);
1243 phy |= LANE_ADP_CS_1_PMS;
1245 phy &= ~LANE_ADP_CS_1_PMS;
1247 return tb_port_write(port, &phy, TB_CFG_PORT,
1248 port->cap_phy + LANE_ADP_CS_1, 1);
1251 static int tb_port_pm_secondary_enable(struct tb_port *port)
1253 return __tb_port_pm_secondary_set(port, true);
1256 static int tb_port_pm_secondary_disable(struct tb_port *port)
1258 return __tb_port_pm_secondary_set(port, false);
1261 /* Called for USB4 or Titan Ridge routers only */
1262 static bool tb_port_clx_supported(struct tb_port *port, enum tb_clx clx)
1267 /* Don't enable CLx in case of two single-lane links */
1268 if (!port->bonded && port->dual_link_port)
1271 /* Don't enable CLx in case of inter-domain link */
1275 if (tb_switch_is_usb4(port->sw)) {
1276 if (!usb4_port_clx_supported(port))
1278 } else if (!tb_lc_is_clx_supported(port)) {
1284 /* CL0s and CL1 are enabled and supported together */
1285 mask = LANE_ADP_CS_0_CL0S_SUPPORT | LANE_ADP_CS_0_CL1_SUPPORT;
1288 /* For now we support only CL0s and CL1. Not CL2 */
1294 ret = tb_port_read(port, &val, TB_CFG_PORT,
1295 port->cap_phy + LANE_ADP_CS_0, 1);
1299 return !!(val & mask);
1302 static int __tb_port_clx_set(struct tb_port *port, enum tb_clx clx, bool enable)
1307 /* CL0s and CL1 are enabled and supported together */
1309 mask = LANE_ADP_CS_1_CL0S_ENABLE | LANE_ADP_CS_1_CL1_ENABLE;
1311 /* For now we support only CL0s and CL1. Not CL2 */
1314 ret = tb_port_read(port, &phy, TB_CFG_PORT,
1315 port->cap_phy + LANE_ADP_CS_1, 1);
1324 return tb_port_write(port, &phy, TB_CFG_PORT,
1325 port->cap_phy + LANE_ADP_CS_1, 1);
1328 static int tb_port_clx_disable(struct tb_port *port, enum tb_clx clx)
1330 return __tb_port_clx_set(port, clx, false);
1333 static int tb_port_clx_enable(struct tb_port *port, enum tb_clx clx)
1335 return __tb_port_clx_set(port, clx, true);
1338 static int tb_port_start_lane_initialization(struct tb_port *port)
1342 if (tb_switch_is_usb4(port->sw))
1345 ret = tb_lc_start_lane_initialization(port);
1346 return ret == -EINVAL ? 0 : ret;
1350 * Returns true if the port had something (router, XDomain) connected
1353 static bool tb_port_resume(struct tb_port *port)
1355 bool has_remote = tb_port_has_remote(port);
1358 usb4_port_device_resume(port->usb4);
1359 } else if (!has_remote) {
1361 * For disconnected downstream lane adapters start lane
1362 * initialization now so we detect future connects.
1364 * For XDomain start the lane initialzation now so the
1365 * link gets re-established.
1367 * This is only needed for non-USB4 ports.
1369 if (!tb_is_upstream_port(port) || port->xdomain)
1370 tb_port_start_lane_initialization(port);
1373 return has_remote || port->xdomain;
1377 * tb_port_is_enabled() - Is the adapter port enabled
1378 * @port: Port to check
1380 bool tb_port_is_enabled(struct tb_port *port)
1382 switch (port->config.type) {
1383 case TB_TYPE_PCIE_UP:
1384 case TB_TYPE_PCIE_DOWN:
1385 return tb_pci_port_is_enabled(port);
1387 case TB_TYPE_DP_HDMI_IN:
1388 case TB_TYPE_DP_HDMI_OUT:
1389 return tb_dp_port_is_enabled(port);
1391 case TB_TYPE_USB3_UP:
1392 case TB_TYPE_USB3_DOWN:
1393 return tb_usb3_port_is_enabled(port);
1401 * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled
1402 * @port: USB3 adapter port to check
1404 bool tb_usb3_port_is_enabled(struct tb_port *port)
1408 if (tb_port_read(port, &data, TB_CFG_PORT,
1409 port->cap_adap + ADP_USB3_CS_0, 1))
1412 return !!(data & ADP_USB3_CS_0_PE);
1416 * tb_usb3_port_enable() - Enable USB3 adapter port
1417 * @port: USB3 adapter port to enable
1418 * @enable: Enable/disable the USB3 adapter
1420 int tb_usb3_port_enable(struct tb_port *port, bool enable)
1422 u32 word = enable ? (ADP_USB3_CS_0_PE | ADP_USB3_CS_0_V)
1425 if (!port->cap_adap)
1427 return tb_port_write(port, &word, TB_CFG_PORT,
1428 port->cap_adap + ADP_USB3_CS_0, 1);
1432 * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled
1433 * @port: PCIe port to check
1435 bool tb_pci_port_is_enabled(struct tb_port *port)
1439 if (tb_port_read(port, &data, TB_CFG_PORT,
1440 port->cap_adap + ADP_PCIE_CS_0, 1))
1443 return !!(data & ADP_PCIE_CS_0_PE);
1447 * tb_pci_port_enable() - Enable PCIe adapter port
1448 * @port: PCIe port to enable
1449 * @enable: Enable/disable the PCIe adapter
1451 int tb_pci_port_enable(struct tb_port *port, bool enable)
1453 u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0;
1454 if (!port->cap_adap)
1456 return tb_port_write(port, &word, TB_CFG_PORT,
1457 port->cap_adap + ADP_PCIE_CS_0, 1);
1461 * tb_dp_port_hpd_is_active() - Is HPD already active
1462 * @port: DP out port to check
1464 * Checks if the DP OUT adapter port has HDP bit already set.
1466 int tb_dp_port_hpd_is_active(struct tb_port *port)
1471 ret = tb_port_read(port, &data, TB_CFG_PORT,
1472 port->cap_adap + ADP_DP_CS_2, 1);
1476 return !!(data & ADP_DP_CS_2_HDP);
1480 * tb_dp_port_hpd_clear() - Clear HPD from DP IN port
1481 * @port: Port to clear HPD
1483 * If the DP IN port has HDP set, this function can be used to clear it.
1485 int tb_dp_port_hpd_clear(struct tb_port *port)
1490 ret = tb_port_read(port, &data, TB_CFG_PORT,
1491 port->cap_adap + ADP_DP_CS_3, 1);
1495 data |= ADP_DP_CS_3_HDPC;
1496 return tb_port_write(port, &data, TB_CFG_PORT,
1497 port->cap_adap + ADP_DP_CS_3, 1);
1501 * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port
1502 * @port: DP IN/OUT port to set hops
1503 * @video: Video Hop ID
1504 * @aux_tx: AUX TX Hop ID
1505 * @aux_rx: AUX RX Hop ID
1507 * Programs specified Hop IDs for DP IN/OUT port. Can be called for USB4
1508 * router DP adapters too but does not program the values as the fields
1511 int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
1512 unsigned int aux_tx, unsigned int aux_rx)
1517 if (tb_switch_is_usb4(port->sw))
1520 ret = tb_port_read(port, data, TB_CFG_PORT,
1521 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1525 data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK;
1526 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1527 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1529 data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) &
1530 ADP_DP_CS_0_VIDEO_HOPID_MASK;
1531 data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK;
1532 data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) &
1533 ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1535 return tb_port_write(port, data, TB_CFG_PORT,
1536 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1540 * tb_dp_port_is_enabled() - Is DP adapter port enabled
1541 * @port: DP adapter port to check
1543 bool tb_dp_port_is_enabled(struct tb_port *port)
1547 if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0,
1551 return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE));
1555 * tb_dp_port_enable() - Enables/disables DP paths of a port
1556 * @port: DP IN/OUT port
1557 * @enable: Enable/disable DP path
1559 * Once Hop IDs are programmed DP paths can be enabled or disabled by
1560 * calling this function.
1562 int tb_dp_port_enable(struct tb_port *port, bool enable)
1567 ret = tb_port_read(port, data, TB_CFG_PORT,
1568 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1573 data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE;
1575 data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE);
1577 return tb_port_write(port, data, TB_CFG_PORT,
1578 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1581 /* switch utility functions */
1583 static const char *tb_switch_generation_name(const struct tb_switch *sw)
1585 switch (sw->generation) {
1587 return "Thunderbolt 1";
1589 return "Thunderbolt 2";
1591 return "Thunderbolt 3";
1599 static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw)
1601 const struct tb_regs_switch_header *regs = &sw->config;
1603 tb_dbg(tb, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n",
1604 tb_switch_generation_name(sw), regs->vendor_id, regs->device_id,
1605 regs->revision, regs->thunderbolt_version);
1606 tb_dbg(tb, " Max Port Number: %d\n", regs->max_port_number);
1607 tb_dbg(tb, " Config:\n");
1609 " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
1610 regs->upstream_port_number, regs->depth,
1611 (((u64) regs->route_hi) << 32) | regs->route_lo,
1612 regs->enabled, regs->plug_events_delay);
1613 tb_dbg(tb, " unknown1: %#x unknown4: %#x\n",
1614 regs->__unknown1, regs->__unknown4);
1618 * tb_switch_reset() - reconfigure route, enable and send TB_CFG_PKG_RESET
1619 * @sw: Switch to reset
1621 * Return: Returns 0 on success or an error code on failure.
1623 int tb_switch_reset(struct tb_switch *sw)
1625 struct tb_cfg_result res;
1627 if (sw->generation > 1)
1630 tb_sw_dbg(sw, "resetting switch\n");
1632 res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
1633 TB_CFG_SWITCH, 2, 2);
1636 res = tb_cfg_reset(sw->tb->ctl, tb_route(sw));
1643 * tb_switch_wait_for_bit() - Wait for specified value of bits in offset
1644 * @sw: Router to read the offset value from
1645 * @offset: Offset in the router config space to read from
1646 * @bit: Bit mask in the offset to wait for
1647 * @value: Value of the bits to wait for
1648 * @timeout_msec: Timeout in ms how long to wait
1650 * Wait till the specified bits in specified offset reach specified value.
1651 * Returns %0 in case of success, %-ETIMEDOUT if the @value was not reached
1652 * within the given timeout or a negative errno in case of failure.
1654 int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
1655 u32 value, int timeout_msec)
1657 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
1663 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
1667 if ((val & bit) == value)
1670 usleep_range(50, 100);
1671 } while (ktime_before(ktime_get(), timeout));
1677 * tb_plug_events_active() - enable/disable plug events on a switch
1679 * Also configures a sane plug_events_delay of 255ms.
1681 * Return: Returns 0 on success or an error code on failure.
1683 static int tb_plug_events_active(struct tb_switch *sw, bool active)
1688 if (tb_switch_is_icm(sw) || tb_switch_is_usb4(sw))
1691 sw->config.plug_events_delay = 0xff;
1692 res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1);
1696 res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
1701 data = data & 0xFFFFFF83;
1702 switch (sw->config.device_id) {
1703 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1704 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
1705 case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
1709 * Skip Alpine Ridge, it needs to have vendor
1710 * specific USB hotplug event enabled for the
1711 * internal xHCI to work.
1713 if (!tb_switch_is_alpine_ridge(sw))
1714 data |= TB_PLUG_EVENTS_USB_DISABLE;
1719 return tb_sw_write(sw, &data, TB_CFG_SWITCH,
1720 sw->cap_plug_events + 1, 1);
1723 static ssize_t authorized_show(struct device *dev,
1724 struct device_attribute *attr,
1727 struct tb_switch *sw = tb_to_switch(dev);
1729 return sprintf(buf, "%u\n", sw->authorized);
1732 static int disapprove_switch(struct device *dev, void *not_used)
1734 char *envp[] = { "AUTHORIZED=0", NULL };
1735 struct tb_switch *sw;
1737 sw = tb_to_switch(dev);
1738 if (sw && sw->authorized) {
1741 /* First children */
1742 ret = device_for_each_child_reverse(&sw->dev, NULL, disapprove_switch);
1746 ret = tb_domain_disapprove_switch(sw->tb, sw);
1751 kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp);
1757 static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
1759 char envp_string[13];
1761 char *envp[] = { envp_string, NULL };
1763 if (!mutex_trylock(&sw->tb->lock))
1764 return restart_syscall();
1766 if (!!sw->authorized == !!val)
1770 /* Disapprove switch */
1773 ret = disapprove_switch(&sw->dev, NULL);
1778 /* Approve switch */
1781 ret = tb_domain_approve_switch_key(sw->tb, sw);
1783 ret = tb_domain_approve_switch(sw->tb, sw);
1786 /* Challenge switch */
1789 ret = tb_domain_challenge_switch_key(sw->tb, sw);
1797 sw->authorized = val;
1799 * Notify status change to the userspace, informing the new
1800 * value of /sys/bus/thunderbolt/devices/.../authorized.
1802 sprintf(envp_string, "AUTHORIZED=%u", sw->authorized);
1803 kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp);
1807 mutex_unlock(&sw->tb->lock);
1811 static ssize_t authorized_store(struct device *dev,
1812 struct device_attribute *attr,
1813 const char *buf, size_t count)
1815 struct tb_switch *sw = tb_to_switch(dev);
1819 ret = kstrtouint(buf, 0, &val);
1825 pm_runtime_get_sync(&sw->dev);
1826 ret = tb_switch_set_authorized(sw, val);
1827 pm_runtime_mark_last_busy(&sw->dev);
1828 pm_runtime_put_autosuspend(&sw->dev);
1830 return ret ? ret : count;
1832 static DEVICE_ATTR_RW(authorized);
1834 static ssize_t boot_show(struct device *dev, struct device_attribute *attr,
1837 struct tb_switch *sw = tb_to_switch(dev);
1839 return sprintf(buf, "%u\n", sw->boot);
1841 static DEVICE_ATTR_RO(boot);
1843 static ssize_t device_show(struct device *dev, struct device_attribute *attr,
1846 struct tb_switch *sw = tb_to_switch(dev);
1848 return sprintf(buf, "%#x\n", sw->device);
1850 static DEVICE_ATTR_RO(device);
1853 device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1855 struct tb_switch *sw = tb_to_switch(dev);
1857 return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : "");
1859 static DEVICE_ATTR_RO(device_name);
1862 generation_show(struct device *dev, struct device_attribute *attr, char *buf)
1864 struct tb_switch *sw = tb_to_switch(dev);
1866 return sprintf(buf, "%u\n", sw->generation);
1868 static DEVICE_ATTR_RO(generation);
1870 static ssize_t key_show(struct device *dev, struct device_attribute *attr,
1873 struct tb_switch *sw = tb_to_switch(dev);
1876 if (!mutex_trylock(&sw->tb->lock))
1877 return restart_syscall();
1880 ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
1882 ret = sprintf(buf, "\n");
1884 mutex_unlock(&sw->tb->lock);
1888 static ssize_t key_store(struct device *dev, struct device_attribute *attr,
1889 const char *buf, size_t count)
1891 struct tb_switch *sw = tb_to_switch(dev);
1892 u8 key[TB_SWITCH_KEY_SIZE];
1893 ssize_t ret = count;
1896 if (!strcmp(buf, "\n"))
1898 else if (hex2bin(key, buf, sizeof(key)))
1901 if (!mutex_trylock(&sw->tb->lock))
1902 return restart_syscall();
1904 if (sw->authorized) {
1911 sw->key = kmemdup(key, sizeof(key), GFP_KERNEL);
1917 mutex_unlock(&sw->tb->lock);
1920 static DEVICE_ATTR(key, 0600, key_show, key_store);
1922 static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
1925 struct tb_switch *sw = tb_to_switch(dev);
1927 return sprintf(buf, "%u.0 Gb/s\n", sw->link_speed);
1931 * Currently all lanes must run at the same speed but we expose here
1932 * both directions to allow possible asymmetric links in the future.
1934 static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
1935 static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
1937 static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
1940 struct tb_switch *sw = tb_to_switch(dev);
1942 return sprintf(buf, "%u\n", sw->link_width);
1946 * Currently link has same amount of lanes both directions (1 or 2) but
1947 * expose them separately to allow possible asymmetric links in the future.
1949 static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL);
1950 static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL);
1952 static ssize_t nvm_authenticate_show(struct device *dev,
1953 struct device_attribute *attr, char *buf)
1955 struct tb_switch *sw = tb_to_switch(dev);
1958 nvm_get_auth_status(sw, &status);
1959 return sprintf(buf, "%#x\n", status);
1962 static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf,
1965 struct tb_switch *sw = tb_to_switch(dev);
1968 pm_runtime_get_sync(&sw->dev);
1970 if (!mutex_trylock(&sw->tb->lock)) {
1971 ret = restart_syscall();
1975 /* If NVMem devices are not yet added */
1981 ret = kstrtoint(buf, 10, &val);
1985 /* Always clear the authentication status */
1986 nvm_clear_auth_status(sw);
1989 if (val == AUTHENTICATE_ONLY) {
1993 ret = nvm_authenticate(sw, true);
1995 if (!sw->nvm->flushed) {
1996 if (!sw->nvm->buf) {
2001 ret = nvm_validate_and_write(sw);
2002 if (ret || val == WRITE_ONLY)
2005 if (val == WRITE_AND_AUTHENTICATE) {
2007 ret = tb_lc_force_power(sw);
2009 ret = nvm_authenticate(sw, false);
2015 mutex_unlock(&sw->tb->lock);
2017 pm_runtime_mark_last_busy(&sw->dev);
2018 pm_runtime_put_autosuspend(&sw->dev);
2023 static ssize_t nvm_authenticate_store(struct device *dev,
2024 struct device_attribute *attr, const char *buf, size_t count)
2026 int ret = nvm_authenticate_sysfs(dev, buf, false);
2031 static DEVICE_ATTR_RW(nvm_authenticate);
2033 static ssize_t nvm_authenticate_on_disconnect_show(struct device *dev,
2034 struct device_attribute *attr, char *buf)
2036 return nvm_authenticate_show(dev, attr, buf);
2039 static ssize_t nvm_authenticate_on_disconnect_store(struct device *dev,
2040 struct device_attribute *attr, const char *buf, size_t count)
2044 ret = nvm_authenticate_sysfs(dev, buf, true);
2045 return ret ? ret : count;
2047 static DEVICE_ATTR_RW(nvm_authenticate_on_disconnect);
2049 static ssize_t nvm_version_show(struct device *dev,
2050 struct device_attribute *attr, char *buf)
2052 struct tb_switch *sw = tb_to_switch(dev);
2055 if (!mutex_trylock(&sw->tb->lock))
2056 return restart_syscall();
2063 ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
2065 mutex_unlock(&sw->tb->lock);
2069 static DEVICE_ATTR_RO(nvm_version);
2071 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
2074 struct tb_switch *sw = tb_to_switch(dev);
2076 return sprintf(buf, "%#x\n", sw->vendor);
2078 static DEVICE_ATTR_RO(vendor);
2081 vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
2083 struct tb_switch *sw = tb_to_switch(dev);
2085 return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : "");
2087 static DEVICE_ATTR_RO(vendor_name);
2089 static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
2092 struct tb_switch *sw = tb_to_switch(dev);
2094 return sprintf(buf, "%pUb\n", sw->uuid);
2096 static DEVICE_ATTR_RO(unique_id);
2098 static struct attribute *switch_attrs[] = {
2099 &dev_attr_authorized.attr,
2100 &dev_attr_boot.attr,
2101 &dev_attr_device.attr,
2102 &dev_attr_device_name.attr,
2103 &dev_attr_generation.attr,
2105 &dev_attr_nvm_authenticate.attr,
2106 &dev_attr_nvm_authenticate_on_disconnect.attr,
2107 &dev_attr_nvm_version.attr,
2108 &dev_attr_rx_speed.attr,
2109 &dev_attr_rx_lanes.attr,
2110 &dev_attr_tx_speed.attr,
2111 &dev_attr_tx_lanes.attr,
2112 &dev_attr_vendor.attr,
2113 &dev_attr_vendor_name.attr,
2114 &dev_attr_unique_id.attr,
2118 static umode_t switch_attr_is_visible(struct kobject *kobj,
2119 struct attribute *attr, int n)
2121 struct device *dev = kobj_to_dev(kobj);
2122 struct tb_switch *sw = tb_to_switch(dev);
2124 if (attr == &dev_attr_authorized.attr) {
2125 if (sw->tb->security_level == TB_SECURITY_NOPCIE ||
2126 sw->tb->security_level == TB_SECURITY_DPONLY)
2128 } else if (attr == &dev_attr_device.attr) {
2131 } else if (attr == &dev_attr_device_name.attr) {
2132 if (!sw->device_name)
2134 } else if (attr == &dev_attr_vendor.attr) {
2137 } else if (attr == &dev_attr_vendor_name.attr) {
2138 if (!sw->vendor_name)
2140 } else if (attr == &dev_attr_key.attr) {
2142 sw->tb->security_level == TB_SECURITY_SECURE &&
2143 sw->security_level == TB_SECURITY_SECURE)
2146 } else if (attr == &dev_attr_rx_speed.attr ||
2147 attr == &dev_attr_rx_lanes.attr ||
2148 attr == &dev_attr_tx_speed.attr ||
2149 attr == &dev_attr_tx_lanes.attr) {
2153 } else if (attr == &dev_attr_nvm_authenticate.attr) {
2154 if (nvm_upgradeable(sw))
2157 } else if (attr == &dev_attr_nvm_version.attr) {
2158 if (nvm_readable(sw))
2161 } else if (attr == &dev_attr_boot.attr) {
2165 } else if (attr == &dev_attr_nvm_authenticate_on_disconnect.attr) {
2166 if (sw->quirks & QUIRK_FORCE_POWER_LINK_CONTROLLER)
2171 return sw->safe_mode ? 0 : attr->mode;
2174 static const struct attribute_group switch_group = {
2175 .is_visible = switch_attr_is_visible,
2176 .attrs = switch_attrs,
2179 static const struct attribute_group *switch_groups[] = {
2184 static void tb_switch_release(struct device *dev)
2186 struct tb_switch *sw = tb_to_switch(dev);
2187 struct tb_port *port;
2189 dma_port_free(sw->dma_port);
2191 tb_switch_for_each_port(sw, port) {
2192 ida_destroy(&port->in_hopids);
2193 ida_destroy(&port->out_hopids);
2197 kfree(sw->device_name);
2198 kfree(sw->vendor_name);
2205 static int tb_switch_uevent(struct device *dev, struct kobj_uevent_env *env)
2207 struct tb_switch *sw = tb_to_switch(dev);
2210 if (sw->config.thunderbolt_version == USB4_VERSION_1_0) {
2211 if (add_uevent_var(env, "USB4_VERSION=1.0"))
2215 if (!tb_route(sw)) {
2218 const struct tb_port *port;
2221 /* Device is hub if it has any downstream ports */
2222 tb_switch_for_each_port(sw, port) {
2223 if (!port->disabled && !tb_is_upstream_port(port) &&
2224 tb_port_is_null(port)) {
2230 type = hub ? "hub" : "device";
2233 if (add_uevent_var(env, "USB4_TYPE=%s", type))
2239 * Currently only need to provide the callbacks. Everything else is handled
2240 * in the connection manager.
2242 static int __maybe_unused tb_switch_runtime_suspend(struct device *dev)
2244 struct tb_switch *sw = tb_to_switch(dev);
2245 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
2247 if (cm_ops->runtime_suspend_switch)
2248 return cm_ops->runtime_suspend_switch(sw);
2253 static int __maybe_unused tb_switch_runtime_resume(struct device *dev)
2255 struct tb_switch *sw = tb_to_switch(dev);
2256 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
2258 if (cm_ops->runtime_resume_switch)
2259 return cm_ops->runtime_resume_switch(sw);
2263 static const struct dev_pm_ops tb_switch_pm_ops = {
2264 SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume,
2268 struct device_type tb_switch_type = {
2269 .name = "thunderbolt_device",
2270 .release = tb_switch_release,
2271 .uevent = tb_switch_uevent,
2272 .pm = &tb_switch_pm_ops,
2275 static int tb_switch_get_generation(struct tb_switch *sw)
2277 switch (sw->config.device_id) {
2278 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
2279 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
2280 case PCI_DEVICE_ID_INTEL_LIGHT_PEAK:
2281 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
2282 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
2283 case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
2284 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE:
2285 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE:
2288 case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE:
2289 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
2290 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
2293 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
2294 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
2295 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
2296 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
2297 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
2298 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
2299 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
2300 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
2301 case PCI_DEVICE_ID_INTEL_ICL_NHI0:
2302 case PCI_DEVICE_ID_INTEL_ICL_NHI1:
2306 if (tb_switch_is_usb4(sw))
2310 * For unknown switches assume generation to be 1 to be
2313 tb_sw_warn(sw, "unsupported switch device id %#x\n",
2314 sw->config.device_id);
2319 static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth)
2323 if (tb_switch_is_usb4(sw) ||
2324 (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch)))
2325 max_depth = USB4_SWITCH_MAX_DEPTH;
2327 max_depth = TB_SWITCH_MAX_DEPTH;
2329 return depth > max_depth;
2333 * tb_switch_alloc() - allocate a switch
2334 * @tb: Pointer to the owning domain
2335 * @parent: Parent device for this switch
2336 * @route: Route string for this switch
2338 * Allocates and initializes a switch. Will not upload configuration to
2339 * the switch. For that you need to call tb_switch_configure()
2340 * separately. The returned switch should be released by calling
2343 * Return: Pointer to the allocated switch or ERR_PTR() in case of
2346 struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
2349 struct tb_switch *sw;
2353 /* Unlock the downstream port so we can access the switch below */
2355 struct tb_switch *parent_sw = tb_to_switch(parent);
2356 struct tb_port *down;
2358 down = tb_port_at(route, parent_sw);
2359 tb_port_unlock(down);
2362 depth = tb_route_length(route);
2364 upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
2365 if (upstream_port < 0)
2366 return ERR_PTR(upstream_port);
2368 sw = kzalloc(sizeof(*sw), GFP_KERNEL);
2370 return ERR_PTR(-ENOMEM);
2373 ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5);
2375 goto err_free_sw_ports;
2377 sw->generation = tb_switch_get_generation(sw);
2379 tb_dbg(tb, "current switch config:\n");
2380 tb_dump_switch(tb, sw);
2382 /* configure switch */
2383 sw->config.upstream_port_number = upstream_port;
2384 sw->config.depth = depth;
2385 sw->config.route_hi = upper_32_bits(route);
2386 sw->config.route_lo = lower_32_bits(route);
2387 sw->config.enabled = 0;
2389 /* Make sure we do not exceed maximum topology limit */
2390 if (tb_switch_exceeds_max_depth(sw, depth)) {
2391 ret = -EADDRNOTAVAIL;
2392 goto err_free_sw_ports;
2395 /* initialize ports */
2396 sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
2400 goto err_free_sw_ports;
2403 for (i = 0; i <= sw->config.max_port_number; i++) {
2404 /* minimum setup for tb_find_cap and tb_drom_read to work */
2405 sw->ports[i].sw = sw;
2406 sw->ports[i].port = i;
2408 /* Control port does not need HopID allocation */
2410 ida_init(&sw->ports[i].in_hopids);
2411 ida_init(&sw->ports[i].out_hopids);
2415 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
2417 sw->cap_plug_events = ret;
2419 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_TIME2);
2421 sw->cap_vsec_tmu = ret;
2423 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
2427 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_CP_LP);
2431 /* Root switch is always authorized */
2433 sw->authorized = true;
2435 device_initialize(&sw->dev);
2436 sw->dev.parent = parent;
2437 sw->dev.bus = &tb_bus_type;
2438 sw->dev.type = &tb_switch_type;
2439 sw->dev.groups = switch_groups;
2440 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
2448 return ERR_PTR(ret);
2452 * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode
2453 * @tb: Pointer to the owning domain
2454 * @parent: Parent device for this switch
2455 * @route: Route string for this switch
2457 * This creates a switch in safe mode. This means the switch pretty much
2458 * lacks all capabilities except DMA configuration port before it is
2459 * flashed with a valid NVM firmware.
2461 * The returned switch must be released by calling tb_switch_put().
2463 * Return: Pointer to the allocated switch or ERR_PTR() in case of failure
2466 tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
2468 struct tb_switch *sw;
2470 sw = kzalloc(sizeof(*sw), GFP_KERNEL);
2472 return ERR_PTR(-ENOMEM);
2475 sw->config.depth = tb_route_length(route);
2476 sw->config.route_hi = upper_32_bits(route);
2477 sw->config.route_lo = lower_32_bits(route);
2478 sw->safe_mode = true;
2480 device_initialize(&sw->dev);
2481 sw->dev.parent = parent;
2482 sw->dev.bus = &tb_bus_type;
2483 sw->dev.type = &tb_switch_type;
2484 sw->dev.groups = switch_groups;
2485 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
2491 * tb_switch_configure() - Uploads configuration to the switch
2492 * @sw: Switch to configure
2494 * Call this function before the switch is added to the system. It will
2495 * upload configuration to the switch and makes it available for the
2496 * connection manager to use. Can be called to the switch again after
2497 * resume from low power states to re-initialize it.
2499 * Return: %0 in case of success and negative errno in case of failure
2501 int tb_switch_configure(struct tb_switch *sw)
2503 struct tb *tb = sw->tb;
2507 route = tb_route(sw);
2509 tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n",
2510 sw->config.enabled ? "restoring" : "initializing", route,
2511 tb_route_length(route), sw->config.upstream_port_number);
2513 sw->config.enabled = 1;
2515 if (tb_switch_is_usb4(sw)) {
2517 * For USB4 devices, we need to program the CM version
2518 * accordingly so that it knows to expose all the
2519 * additional capabilities.
2521 sw->config.cmuv = USB4_VERSION_1_0;
2523 /* Enumerate the switch */
2524 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
2529 ret = usb4_switch_setup(sw);
2531 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
2532 tb_sw_warn(sw, "unknown switch vendor id %#x\n",
2533 sw->config.vendor_id);
2535 if (!sw->cap_plug_events) {
2536 tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
2540 /* Enumerate the switch */
2541 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
2547 return tb_plug_events_active(sw, true);
2550 static int tb_switch_set_uuid(struct tb_switch *sw)
2559 if (tb_switch_is_usb4(sw)) {
2560 ret = usb4_switch_read_uid(sw, &sw->uid);
2566 * The newer controllers include fused UUID as part of
2567 * link controller specific registers
2569 ret = tb_lc_read_uuid(sw, uuid);
2579 * ICM generates UUID based on UID and fills the upper
2580 * two words with ones. This is not strictly following
2581 * UUID format but we want to be compatible with it so
2582 * we do the same here.
2584 uuid[0] = sw->uid & 0xffffffff;
2585 uuid[1] = (sw->uid >> 32) & 0xffffffff;
2586 uuid[2] = 0xffffffff;
2587 uuid[3] = 0xffffffff;
2590 sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
2596 static int tb_switch_add_dma_port(struct tb_switch *sw)
2601 switch (sw->generation) {
2603 /* Only root switch can be upgraded */
2610 ret = tb_switch_set_uuid(sw);
2617 * DMA port is the only thing available when the switch
2625 if (sw->no_nvm_upgrade)
2628 if (tb_switch_is_usb4(sw)) {
2629 ret = usb4_switch_nvm_authenticate_status(sw, &status);
2634 tb_sw_info(sw, "switch flash authentication failed\n");
2635 nvm_set_auth_status(sw, status);
2641 /* Root switch DMA port requires running firmware */
2642 if (!tb_route(sw) && !tb_switch_is_icm(sw))
2645 sw->dma_port = dma_port_alloc(sw);
2650 * If there is status already set then authentication failed
2651 * when the dma_port_flash_update_auth() returned. Power cycling
2652 * is not needed (it was done already) so only thing we do here
2653 * is to unblock runtime PM of the root port.
2655 nvm_get_auth_status(sw, &status);
2658 nvm_authenticate_complete_dma_port(sw);
2663 * Check status of the previous flash authentication. If there
2664 * is one we need to power cycle the switch in any case to make
2665 * it functional again.
2667 ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
2671 /* Now we can allow root port to suspend again */
2673 nvm_authenticate_complete_dma_port(sw);
2676 tb_sw_info(sw, "switch flash authentication failed\n");
2677 nvm_set_auth_status(sw, status);
2680 tb_sw_info(sw, "power cycling the switch now\n");
2681 dma_port_power_cycle(sw->dma_port);
2684 * We return error here which causes the switch adding failure.
2685 * It should appear back after power cycle is complete.
2690 static void tb_switch_default_link_ports(struct tb_switch *sw)
2694 for (i = 1; i <= sw->config.max_port_number; i++) {
2695 struct tb_port *port = &sw->ports[i];
2696 struct tb_port *subordinate;
2698 if (!tb_port_is_null(port))
2701 /* Check for the subordinate port */
2702 if (i == sw->config.max_port_number ||
2703 !tb_port_is_null(&sw->ports[i + 1]))
2706 /* Link them if not already done so (by DROM) */
2707 subordinate = &sw->ports[i + 1];
2708 if (!port->dual_link_port && !subordinate->dual_link_port) {
2710 port->dual_link_port = subordinate;
2711 subordinate->link_nr = 1;
2712 subordinate->dual_link_port = port;
2714 tb_sw_dbg(sw, "linked ports %d <-> %d\n",
2715 port->port, subordinate->port);
2720 static bool tb_switch_lane_bonding_possible(struct tb_switch *sw)
2722 const struct tb_port *up = tb_upstream_port(sw);
2724 if (!up->dual_link_port || !up->dual_link_port->remote)
2727 if (tb_switch_is_usb4(sw))
2728 return usb4_switch_lane_bonding_possible(sw);
2729 return tb_lc_lane_bonding_possible(sw);
2732 static int tb_switch_update_link_attributes(struct tb_switch *sw)
2735 bool change = false;
2738 if (!tb_route(sw) || tb_switch_is_icm(sw))
2741 up = tb_upstream_port(sw);
2743 ret = tb_port_get_link_speed(up);
2746 if (sw->link_speed != ret)
2748 sw->link_speed = ret;
2750 ret = tb_port_get_link_width(up);
2753 if (sw->link_width != ret)
2755 sw->link_width = ret;
2757 /* Notify userspace that there is possible link attribute change */
2758 if (device_is_registered(&sw->dev) && change)
2759 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
2765 * tb_switch_lane_bonding_enable() - Enable lane bonding
2766 * @sw: Switch to enable lane bonding
2768 * Connection manager can call this function to enable lane bonding of a
2769 * switch. If conditions are correct and both switches support the feature,
2770 * lanes are bonded. It is safe to call this to any switch.
2772 int tb_switch_lane_bonding_enable(struct tb_switch *sw)
2774 struct tb_switch *parent = tb_to_switch(sw->dev.parent);
2775 struct tb_port *up, *down;
2776 u64 route = tb_route(sw);
2782 if (!tb_switch_lane_bonding_possible(sw))
2785 up = tb_upstream_port(sw);
2786 down = tb_port_at(route, parent);
2788 if (!tb_port_is_width_supported(up, 2) ||
2789 !tb_port_is_width_supported(down, 2))
2792 ret = tb_port_lane_bonding_enable(up);
2794 tb_port_warn(up, "failed to enable lane bonding\n");
2798 ret = tb_port_lane_bonding_enable(down);
2800 tb_port_warn(down, "failed to enable lane bonding\n");
2801 tb_port_lane_bonding_disable(up);
2805 ret = tb_port_wait_for_link_width(down, 2, 100);
2807 tb_port_warn(down, "timeout enabling lane bonding\n");
2811 tb_port_update_credits(down);
2812 tb_port_update_credits(up);
2813 tb_switch_update_link_attributes(sw);
2815 tb_sw_dbg(sw, "lane bonding enabled\n");
2820 * tb_switch_lane_bonding_disable() - Disable lane bonding
2821 * @sw: Switch whose lane bonding to disable
2823 * Disables lane bonding between @sw and parent. This can be called even
2824 * if lanes were not bonded originally.
2826 void tb_switch_lane_bonding_disable(struct tb_switch *sw)
2828 struct tb_switch *parent = tb_to_switch(sw->dev.parent);
2829 struct tb_port *up, *down;
2834 up = tb_upstream_port(sw);
2838 down = tb_port_at(tb_route(sw), parent);
2840 tb_port_lane_bonding_disable(up);
2841 tb_port_lane_bonding_disable(down);
2844 * It is fine if we get other errors as the router might have
2847 if (tb_port_wait_for_link_width(down, 1, 100) == -ETIMEDOUT)
2848 tb_sw_warn(sw, "timeout disabling lane bonding\n");
2850 tb_port_update_credits(down);
2851 tb_port_update_credits(up);
2852 tb_switch_update_link_attributes(sw);
2854 tb_sw_dbg(sw, "lane bonding disabled\n");
2858 * tb_switch_configure_link() - Set link configured
2859 * @sw: Switch whose link is configured
2861 * Sets the link upstream from @sw configured (from both ends) so that
2862 * it will not be disconnected when the domain exits sleep. Can be
2863 * called for any switch.
2865 * It is recommended that this is called after lane bonding is enabled.
2867 * Returns %0 on success and negative errno in case of error.
2869 int tb_switch_configure_link(struct tb_switch *sw)
2871 struct tb_port *up, *down;
2874 if (!tb_route(sw) || tb_switch_is_icm(sw))
2877 up = tb_upstream_port(sw);
2878 if (tb_switch_is_usb4(up->sw))
2879 ret = usb4_port_configure(up);
2881 ret = tb_lc_configure_port(up);
2886 if (tb_switch_is_usb4(down->sw))
2887 return usb4_port_configure(down);
2888 return tb_lc_configure_port(down);
2892 * tb_switch_unconfigure_link() - Unconfigure link
2893 * @sw: Switch whose link is unconfigured
2895 * Sets the link unconfigured so the @sw will be disconnected if the
2896 * domain exists sleep.
2898 void tb_switch_unconfigure_link(struct tb_switch *sw)
2900 struct tb_port *up, *down;
2902 if (sw->is_unplugged)
2904 if (!tb_route(sw) || tb_switch_is_icm(sw))
2907 up = tb_upstream_port(sw);
2908 if (tb_switch_is_usb4(up->sw))
2909 usb4_port_unconfigure(up);
2911 tb_lc_unconfigure_port(up);
2914 if (tb_switch_is_usb4(down->sw))
2915 usb4_port_unconfigure(down);
2917 tb_lc_unconfigure_port(down);
2920 static void tb_switch_credits_init(struct tb_switch *sw)
2922 if (tb_switch_is_icm(sw))
2924 if (!tb_switch_is_usb4(sw))
2926 if (usb4_switch_credits_init(sw))
2927 tb_sw_info(sw, "failed to determine preferred buffer allocation, using defaults\n");
2931 * tb_switch_add() - Add a switch to the domain
2932 * @sw: Switch to add
2934 * This is the last step in adding switch to the domain. It will read
2935 * identification information from DROM and initializes ports so that
2936 * they can be used to connect other switches. The switch will be
2937 * exposed to the userspace when this function successfully returns. To
2938 * remove and release the switch, call tb_switch_remove().
2940 * Return: %0 in case of success and negative errno in case of failure
2942 int tb_switch_add(struct tb_switch *sw)
2947 * Initialize DMA control port now before we read DROM. Recent
2948 * host controllers have more complete DROM on NVM that includes
2949 * vendor and model identification strings which we then expose
2950 * to the userspace. NVM can be accessed through DMA
2951 * configuration based mailbox.
2953 ret = tb_switch_add_dma_port(sw);
2955 dev_err(&sw->dev, "failed to add DMA port\n");
2959 if (!sw->safe_mode) {
2960 tb_switch_credits_init(sw);
2963 ret = tb_drom_read(sw);
2965 dev_warn(&sw->dev, "reading DROM failed: %d\n", ret);
2966 tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
2968 tb_check_quirks(sw);
2970 ret = tb_switch_set_uuid(sw);
2972 dev_err(&sw->dev, "failed to set UUID\n");
2976 for (i = 0; i <= sw->config.max_port_number; i++) {
2977 if (sw->ports[i].disabled) {
2978 tb_port_dbg(&sw->ports[i], "disabled by eeprom\n");
2981 ret = tb_init_port(&sw->ports[i]);
2983 dev_err(&sw->dev, "failed to initialize port %d\n", i);
2988 tb_switch_default_link_ports(sw);
2990 ret = tb_switch_update_link_attributes(sw);
2994 ret = tb_switch_tmu_init(sw);
2999 ret = device_add(&sw->dev);
3001 dev_err(&sw->dev, "failed to add device: %d\n", ret);
3006 dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n",
3007 sw->vendor, sw->device);
3008 if (sw->vendor_name && sw->device_name)
3009 dev_info(&sw->dev, "%s %s\n", sw->vendor_name,
3013 ret = usb4_switch_add_ports(sw);
3015 dev_err(&sw->dev, "failed to add USB4 ports\n");
3019 ret = tb_switch_nvm_add(sw);
3021 dev_err(&sw->dev, "failed to add NVM devices\n");
3026 * Thunderbolt routers do not generate wakeups themselves but
3027 * they forward wakeups from tunneled protocols, so enable it
3030 device_init_wakeup(&sw->dev, true);
3032 pm_runtime_set_active(&sw->dev);
3034 pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY);
3035 pm_runtime_use_autosuspend(&sw->dev);
3036 pm_runtime_mark_last_busy(&sw->dev);
3037 pm_runtime_enable(&sw->dev);
3038 pm_request_autosuspend(&sw->dev);
3041 tb_switch_debugfs_init(sw);
3045 usb4_switch_remove_ports(sw);
3047 device_del(&sw->dev);
3053 * tb_switch_remove() - Remove and release a switch
3054 * @sw: Switch to remove
3056 * This will remove the switch from the domain and release it after last
3057 * reference count drops to zero. If there are switches connected below
3058 * this switch, they will be removed as well.
3060 void tb_switch_remove(struct tb_switch *sw)
3062 struct tb_port *port;
3064 tb_switch_debugfs_remove(sw);
3067 pm_runtime_get_sync(&sw->dev);
3068 pm_runtime_disable(&sw->dev);
3071 /* port 0 is the switch itself and never has a remote */
3072 tb_switch_for_each_port(sw, port) {
3073 if (tb_port_has_remote(port)) {
3074 tb_switch_remove(port->remote->sw);
3075 port->remote = NULL;
3076 } else if (port->xdomain) {
3077 tb_xdomain_remove(port->xdomain);
3078 port->xdomain = NULL;
3081 /* Remove any downstream retimers */
3082 tb_retimer_remove_all(port);
3085 if (!sw->is_unplugged)
3086 tb_plug_events_active(sw, false);
3088 tb_switch_nvm_remove(sw);
3089 usb4_switch_remove_ports(sw);
3092 dev_info(&sw->dev, "device disconnected\n");
3093 device_unregister(&sw->dev);
3097 * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches
3098 * @sw: Router to mark unplugged
3100 void tb_sw_set_unplugged(struct tb_switch *sw)
3102 struct tb_port *port;
3104 if (sw == sw->tb->root_switch) {
3105 tb_sw_WARN(sw, "cannot unplug root switch\n");
3108 if (sw->is_unplugged) {
3109 tb_sw_WARN(sw, "is_unplugged already set\n");
3112 sw->is_unplugged = true;
3113 tb_switch_for_each_port(sw, port) {
3114 if (tb_port_has_remote(port))
3115 tb_sw_set_unplugged(port->remote->sw);
3116 else if (port->xdomain)
3117 port->xdomain->is_unplugged = true;
3121 static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
3124 tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags);
3126 tb_sw_dbg(sw, "disabling wakeup\n");
3128 if (tb_switch_is_usb4(sw))
3129 return usb4_switch_set_wake(sw, flags);
3130 return tb_lc_set_wake(sw, flags);
3133 int tb_switch_resume(struct tb_switch *sw)
3135 struct tb_port *port;
3138 tb_sw_dbg(sw, "resuming switch\n");
3141 * Check for UID of the connected switches except for root
3142 * switch which we assume cannot be removed.
3148 * Check first that we can still read the switch config
3149 * space. It may be that there is now another domain
3152 err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw));
3154 tb_sw_info(sw, "switch not present anymore\n");
3158 /* We don't have any way to confirm this was the same device */
3162 if (tb_switch_is_usb4(sw))
3163 err = usb4_switch_read_uid(sw, &uid);
3165 err = tb_drom_read_uid_only(sw, &uid);
3167 tb_sw_warn(sw, "uid read failed\n");
3170 if (sw->uid != uid) {
3172 "changed while suspended (uid %#llx -> %#llx)\n",
3178 err = tb_switch_configure(sw);
3183 tb_switch_set_wake(sw, 0);
3185 err = tb_switch_tmu_init(sw);
3189 /* check for surviving downstream switches */
3190 tb_switch_for_each_port(sw, port) {
3191 if (!tb_port_is_null(port))
3194 if (!tb_port_resume(port))
3197 if (tb_wait_for_port(port, true) <= 0) {
3199 "lost during suspend, disconnecting\n");
3200 if (tb_port_has_remote(port))
3201 tb_sw_set_unplugged(port->remote->sw);
3202 else if (port->xdomain)
3203 port->xdomain->is_unplugged = true;
3206 * Always unlock the port so the downstream
3207 * switch/domain is accessible.
3209 if (tb_port_unlock(port))
3210 tb_port_warn(port, "failed to unlock port\n");
3211 if (port->remote && tb_switch_resume(port->remote->sw)) {
3213 "lost during suspend, disconnecting\n");
3214 tb_sw_set_unplugged(port->remote->sw);
3222 * tb_switch_suspend() - Put a switch to sleep
3223 * @sw: Switch to suspend
3224 * @runtime: Is this runtime suspend or system sleep
3226 * Suspends router and all its children. Enables wakes according to
3227 * value of @runtime and then sets sleep bit for the router. If @sw is
3228 * host router the domain is ready to go to sleep once this function
3231 void tb_switch_suspend(struct tb_switch *sw, bool runtime)
3233 unsigned int flags = 0;
3234 struct tb_port *port;
3237 tb_sw_dbg(sw, "suspending switch\n");
3240 * Actually only needed for Titan Ridge but for simplicity can be
3241 * done for USB4 device too as CLx is re-enabled at resume.
3242 * CL0s and CL1 are enabled and supported together.
3244 if (tb_switch_is_clx_enabled(sw, TB_CL1)) {
3245 if (tb_switch_disable_clx(sw, TB_CL1))
3246 tb_sw_warn(sw, "failed to disable %s on upstream port\n",
3247 tb_switch_clx_name(TB_CL1));
3250 err = tb_plug_events_active(sw, false);
3254 tb_switch_for_each_port(sw, port) {
3255 if (tb_port_has_remote(port))
3256 tb_switch_suspend(port->remote->sw, runtime);
3260 /* Trigger wake when something is plugged in/out */
3261 flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT;
3262 flags |= TB_WAKE_ON_USB4;
3263 flags |= TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE | TB_WAKE_ON_DP;
3264 } else if (device_may_wakeup(&sw->dev)) {
3265 flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE;
3268 tb_switch_set_wake(sw, flags);
3270 if (tb_switch_is_usb4(sw))
3271 usb4_switch_set_sleep(sw);
3273 tb_lc_set_sleep(sw);
3277 * tb_switch_query_dp_resource() - Query availability of DP resource
3278 * @sw: Switch whose DP resource is queried
3281 * Queries availability of DP resource for DP tunneling using switch
3282 * specific means. Returns %true if resource is available.
3284 bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
3286 if (tb_switch_is_usb4(sw))
3287 return usb4_switch_query_dp_resource(sw, in);
3288 return tb_lc_dp_sink_query(sw, in);
3292 * tb_switch_alloc_dp_resource() - Allocate available DP resource
3293 * @sw: Switch whose DP resource is allocated
3296 * Allocates DP resource for DP tunneling. The resource must be
3297 * available for this to succeed (see tb_switch_query_dp_resource()).
3298 * Returns %0 in success and negative errno otherwise.
3300 int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
3304 if (tb_switch_is_usb4(sw))
3305 ret = usb4_switch_alloc_dp_resource(sw, in);
3307 ret = tb_lc_dp_sink_alloc(sw, in);
3310 tb_sw_warn(sw, "failed to allocate DP resource for port %d\n",
3313 tb_sw_dbg(sw, "allocated DP resource for port %d\n", in->port);
3319 * tb_switch_dealloc_dp_resource() - De-allocate DP resource
3320 * @sw: Switch whose DP resource is de-allocated
3323 * De-allocates DP resource that was previously allocated for DP
3326 void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
3330 if (tb_switch_is_usb4(sw))
3331 ret = usb4_switch_dealloc_dp_resource(sw, in);
3333 ret = tb_lc_dp_sink_dealloc(sw, in);
3336 tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n",
3339 tb_sw_dbg(sw, "released DP resource for port %d\n", in->port);
3342 struct tb_sw_lookup {
3350 static int tb_switch_match(struct device *dev, const void *data)
3352 struct tb_switch *sw = tb_to_switch(dev);
3353 const struct tb_sw_lookup *lookup = data;
3357 if (sw->tb != lookup->tb)
3361 return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid));
3363 if (lookup->route) {
3364 return sw->config.route_lo == lower_32_bits(lookup->route) &&
3365 sw->config.route_hi == upper_32_bits(lookup->route);
3368 /* Root switch is matched only by depth */
3372 return sw->link == lookup->link && sw->depth == lookup->depth;
3376 * tb_switch_find_by_link_depth() - Find switch by link and depth
3377 * @tb: Domain the switch belongs
3378 * @link: Link number the switch is connected
3379 * @depth: Depth of the switch in link
3381 * Returned switch has reference count increased so the caller needs to
3382 * call tb_switch_put() when done with the switch.
3384 struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
3386 struct tb_sw_lookup lookup;
3389 memset(&lookup, 0, sizeof(lookup));
3392 lookup.depth = depth;
3394 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
3396 return tb_to_switch(dev);
3402 * tb_switch_find_by_uuid() - Find switch by UUID
3403 * @tb: Domain the switch belongs
3404 * @uuid: UUID to look for
3406 * Returned switch has reference count increased so the caller needs to
3407 * call tb_switch_put() when done with the switch.
3409 struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid)
3411 struct tb_sw_lookup lookup;
3414 memset(&lookup, 0, sizeof(lookup));
3418 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
3420 return tb_to_switch(dev);
3426 * tb_switch_find_by_route() - Find switch by route string
3427 * @tb: Domain the switch belongs
3428 * @route: Route string to look for
3430 * Returned switch has reference count increased so the caller needs to
3431 * call tb_switch_put() when done with the switch.
3433 struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route)
3435 struct tb_sw_lookup lookup;
3439 return tb_switch_get(tb->root_switch);
3441 memset(&lookup, 0, sizeof(lookup));
3443 lookup.route = route;
3445 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
3447 return tb_to_switch(dev);
3453 * tb_switch_find_port() - return the first port of @type on @sw or NULL
3454 * @sw: Switch to find the port from
3455 * @type: Port type to look for
3457 struct tb_port *tb_switch_find_port(struct tb_switch *sw,
3458 enum tb_port_type type)
3460 struct tb_port *port;
3462 tb_switch_for_each_port(sw, port) {
3463 if (port->config.type == type)
3470 static int tb_switch_pm_secondary_resolve(struct tb_switch *sw)
3472 struct tb_switch *parent = tb_switch_parent(sw);
3473 struct tb_port *up, *down;
3479 up = tb_upstream_port(sw);
3480 down = tb_port_at(tb_route(sw), parent);
3481 ret = tb_port_pm_secondary_enable(up);
3485 return tb_port_pm_secondary_disable(down);
3488 static int __tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx)
3490 struct tb_switch *parent = tb_switch_parent(sw);
3491 bool up_clx_support, down_clx_support;
3492 struct tb_port *up, *down;
3495 if (!tb_switch_is_clx_supported(sw))
3499 * Enable CLx for host router's downstream port as part of the
3500 * downstream router enabling procedure.
3505 /* Enable CLx only for first hop router (depth = 1) */
3506 if (tb_route(parent))
3509 ret = tb_switch_pm_secondary_resolve(sw);
3513 up = tb_upstream_port(sw);
3514 down = tb_port_at(tb_route(sw), parent);
3516 up_clx_support = tb_port_clx_supported(up, clx);
3517 down_clx_support = tb_port_clx_supported(down, clx);
3519 tb_port_dbg(up, "%s %ssupported\n", tb_switch_clx_name(clx),
3520 up_clx_support ? "" : "not ");
3521 tb_port_dbg(down, "%s %ssupported\n", tb_switch_clx_name(clx),
3522 down_clx_support ? "" : "not ");
3524 if (!up_clx_support || !down_clx_support)
3527 ret = tb_port_clx_enable(up, clx);
3531 ret = tb_port_clx_enable(down, clx);
3533 tb_port_clx_disable(up, clx);
3537 ret = tb_switch_mask_clx_objections(sw);
3539 tb_port_clx_disable(up, clx);
3540 tb_port_clx_disable(down, clx);
3546 tb_port_dbg(up, "%s enabled\n", tb_switch_clx_name(clx));
3551 * tb_switch_enable_clx() - Enable CLx on upstream port of specified router
3552 * @sw: Router to enable CLx for
3553 * @clx: The CLx state to enable
3555 * Enable CLx state only for first hop router. That is the most common
3556 * use-case, that is intended for better thermal management, and so helps
3557 * to improve performance. CLx is enabled only if both sides of the link
3558 * support CLx, and if both sides of the link are not configured as two
3559 * single lane links and only if the link is not inter-domain link. The
3560 * complete set of conditions is described in CM Guide 1.0 section 8.1.
3562 * Return: Returns 0 on success or an error code on failure.
3564 int tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx)
3566 struct tb_switch *root_sw = sw->tb->root_switch;
3572 * CLx is not enabled and validated on Intel USB4 platforms before
3575 if (root_sw->generation < 4 || tb_switch_is_tiger_lake(root_sw))
3580 /* CL0s and CL1 are enabled and supported together */
3581 return __tb_switch_enable_clx(sw, clx);
3588 static int __tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx)
3590 struct tb_switch *parent = tb_switch_parent(sw);
3591 struct tb_port *up, *down;
3594 if (!tb_switch_is_clx_supported(sw))
3598 * Disable CLx for host router's downstream port as part of the
3599 * downstream router enabling procedure.
3604 /* Disable CLx only for first hop router (depth = 1) */
3605 if (tb_route(parent))
3608 up = tb_upstream_port(sw);
3609 down = tb_port_at(tb_route(sw), parent);
3610 ret = tb_port_clx_disable(up, clx);
3614 ret = tb_port_clx_disable(down, clx);
3618 sw->clx = TB_CLX_DISABLE;
3620 tb_port_dbg(up, "%s disabled\n", tb_switch_clx_name(clx));
3625 * tb_switch_disable_clx() - Disable CLx on upstream port of specified router
3626 * @sw: Router to disable CLx for
3627 * @clx: The CLx state to disable
3629 * Return: Returns 0 on success or an error code on failure.
3631 int tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx)
3638 /* CL0s and CL1 are enabled and supported together */
3639 return __tb_switch_disable_clx(sw, clx);
3647 * tb_switch_mask_clx_objections() - Mask CLx objections for a router
3648 * @sw: Router to mask objections for
3650 * Mask the objections coming from the second depth routers in order to
3651 * stop these objections from interfering with the CLx states of the first
3654 int tb_switch_mask_clx_objections(struct tb_switch *sw)
3656 int up_port = sw->config.upstream_port_number;
3657 u32 offset, val[2], mask_obj, unmask_obj;
3660 /* Only Titan Ridge of pre-USB4 devices support CLx states */
3661 if (!tb_switch_is_titan_ridge(sw))
3668 * In Titan Ridge there are only 2 dual-lane Thunderbolt ports:
3669 * Port A consists of lane adapters 1,2 and
3670 * Port B consists of lane adapters 3,4
3671 * If upstream port is A, (lanes are 1,2), we mask objections from
3672 * port B (lanes 3,4) and unmask objections from Port A and vice-versa.
3675 mask_obj = TB_LOW_PWR_C0_PORT_B_MASK;
3676 unmask_obj = TB_LOW_PWR_C1_PORT_A_MASK;
3677 offset = TB_LOW_PWR_C1_CL1;
3679 mask_obj = TB_LOW_PWR_C1_PORT_A_MASK;
3680 unmask_obj = TB_LOW_PWR_C0_PORT_B_MASK;
3681 offset = TB_LOW_PWR_C3_CL1;
3684 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
3685 sw->cap_lp + offset, ARRAY_SIZE(val));
3689 for (i = 0; i < ARRAY_SIZE(val); i++) {
3691 val[i] &= ~unmask_obj;
3694 return tb_sw_write(sw, &val, TB_CFG_SWITCH,
3695 sw->cap_lp + offset, ARRAY_SIZE(val));
3699 * Can be used for read/write a specified PCIe bridge for any Thunderbolt 3
3700 * device. For now used only for Titan Ridge.
3702 static int tb_switch_pcie_bridge_write(struct tb_switch *sw, unsigned int bridge,
3703 unsigned int pcie_offset, u32 value)
3705 u32 offset, command, val;
3708 if (sw->generation != 3)
3711 offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_WR_DATA;
3712 ret = tb_sw_write(sw, &value, TB_CFG_SWITCH, offset, 1);
3716 command = pcie_offset & TB_PLUG_EVENTS_PCIE_CMD_DW_OFFSET_MASK;
3717 command |= BIT(bridge + TB_PLUG_EVENTS_PCIE_CMD_BR_SHIFT);
3718 command |= TB_PLUG_EVENTS_PCIE_CMD_RD_WR_MASK;
3719 command |= TB_PLUG_EVENTS_PCIE_CMD_COMMAND_VAL
3720 << TB_PLUG_EVENTS_PCIE_CMD_COMMAND_SHIFT;
3721 command |= TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK;
3723 offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_CMD;
3725 ret = tb_sw_write(sw, &command, TB_CFG_SWITCH, offset, 1);
3729 ret = tb_switch_wait_for_bit(sw, offset,
3730 TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK, 0, 100);
3734 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
3738 if (val & TB_PLUG_EVENTS_PCIE_CMD_TIMEOUT_MASK)
3745 * tb_switch_pcie_l1_enable() - Enable PCIe link to enter L1 state
3746 * @sw: Router to enable PCIe L1
3748 * For Titan Ridge switch to enter CLx state, its PCIe bridges shall enable
3749 * entry to PCIe L1 state. Shall be called after the upstream PCIe tunnel
3750 * was configured. Due to Intel platforms limitation, shall be called only
3751 * for first hop switch.
3753 int tb_switch_pcie_l1_enable(struct tb_switch *sw)
3755 struct tb_switch *parent = tb_switch_parent(sw);
3761 if (!tb_switch_is_titan_ridge(sw))
3764 /* Enable PCIe L1 enable only for first hop router (depth = 1) */
3765 if (tb_route(parent))
3768 /* Write to downstream PCIe bridge #5 aka Dn4 */
3769 ret = tb_switch_pcie_bridge_write(sw, 5, 0x143, 0x0c7806b1);
3773 /* Write to Upstream PCIe bridge #0 aka Up0 */
3774 return tb_switch_pcie_bridge_write(sw, 0, 0x143, 0x0c5806b1);
3778 * tb_switch_xhci_connect() - Connect internal xHCI
3779 * @sw: Router whose xHCI to connect
3781 * Can be called to any router. For Alpine Ridge and Titan Ridge
3782 * performs special flows that bring the xHCI functional for any device
3783 * connected to the type-C port. Call only after PCIe tunnel has been
3784 * established. The function only does the connect if not done already
3785 * so can be called several times for the same router.
3787 int tb_switch_xhci_connect(struct tb_switch *sw)
3789 bool usb_port1, usb_port3, xhci_port1, xhci_port3;
3790 struct tb_port *port1, *port3;
3793 port1 = &sw->ports[1];
3794 port3 = &sw->ports[3];
3796 if (tb_switch_is_alpine_ridge(sw)) {
3797 usb_port1 = tb_lc_is_usb_plugged(port1);
3798 usb_port3 = tb_lc_is_usb_plugged(port3);
3799 xhci_port1 = tb_lc_is_xhci_connected(port1);
3800 xhci_port3 = tb_lc_is_xhci_connected(port3);
3802 /* Figure out correct USB port to connect */
3803 if (usb_port1 && !xhci_port1) {
3804 ret = tb_lc_xhci_connect(port1);
3808 if (usb_port3 && !xhci_port3)
3809 return tb_lc_xhci_connect(port3);
3810 } else if (tb_switch_is_titan_ridge(sw)) {
3811 ret = tb_lc_xhci_connect(port1);
3814 return tb_lc_xhci_connect(port3);
3821 * tb_switch_xhci_disconnect() - Disconnect internal xHCI
3822 * @sw: Router whose xHCI to disconnect
3824 * The opposite of tb_switch_xhci_connect(). Disconnects xHCI on both
3827 void tb_switch_xhci_disconnect(struct tb_switch *sw)
3829 if (sw->generation == 3) {
3830 struct tb_port *port1 = &sw->ports[1];
3831 struct tb_port *port3 = &sw->ports[3];
3833 tb_lc_xhci_disconnect(port1);
3834 tb_port_dbg(port1, "disconnected xHCI\n");
3835 tb_lc_xhci_disconnect(port3);
3836 tb_port_dbg(port3, "disconnected xHCI\n");